xref: /wlan-driver/qcacld-3.0/core/dp/txrx/ol_txrx.h (revision 5113495b16420b49004c444715d2daae2066e7dc)
1*5113495bSYour Name /*
2*5113495bSYour Name  * Copyright (c) 2012-2020 The Linux Foundation. All rights reserved.
3*5113495bSYour Name  * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
4*5113495bSYour Name  *
5*5113495bSYour Name  * Permission to use, copy, modify, and/or distribute this software for
6*5113495bSYour Name  * any purpose with or without fee is hereby granted, provided that the
7*5113495bSYour Name  * above copyright notice and this permission notice appear in all
8*5113495bSYour Name  * copies.
9*5113495bSYour Name  *
10*5113495bSYour Name  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11*5113495bSYour Name  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12*5113495bSYour Name  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13*5113495bSYour Name  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14*5113495bSYour Name  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15*5113495bSYour Name  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16*5113495bSYour Name  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17*5113495bSYour Name  * PERFORMANCE OF THIS SOFTWARE.
18*5113495bSYour Name  */
19*5113495bSYour Name 
20*5113495bSYour Name #ifndef _OL_TXRX__H_
21*5113495bSYour Name #define _OL_TXRX__H_
22*5113495bSYour Name 
23*5113495bSYour Name #include <qdf_nbuf.h>           /* qdf_nbuf_t */
24*5113495bSYour Name #include <cdp_txrx_cmn.h>       /* ol_txrx_vdev_t, etc. */
25*5113495bSYour Name #include "cds_sched.h"
26*5113495bSYour Name #include <cdp_txrx_handle.h>
27*5113495bSYour Name #include <ol_txrx_types.h>
28*5113495bSYour Name #include <ol_txrx_internal.h>
29*5113495bSYour Name #include <qdf_hrtimer.h>
30*5113495bSYour Name 
31*5113495bSYour Name /*
32*5113495bSYour Name  * Pool of tx descriptors reserved for
33*5113495bSYour Name  * high-priority traffic, such as ARP/EAPOL etc
34*5113495bSYour Name  * only for forwarding path.
35*5113495bSYour Name  */
36*5113495bSYour Name #define OL_TX_NON_FWD_RESERVE	100
37*5113495bSYour Name 
38*5113495bSYour Name /**
39*5113495bSYour Name  * enum ol_txrx_fc_limit_id - Flow control identifier for
40*5113495bSYour Name  * vdev limits based on band, channel bw and number of spatial streams
41*5113495bSYour Name  * @TXRX_FC_5GH_80M_2x2: Limit for 5GHz, 80MHz BW, 2x2 NSS
42*5113495bSYour Name  * @TXRX_FC_5GH_40M_2x2:
43*5113495bSYour Name  * @TXRX_FC_5GH_20M_2x2:
44*5113495bSYour Name  * @TXRX_FC_5GH_80M_1x1:
45*5113495bSYour Name  * @TXRX_FC_5GH_40M_1x1:
46*5113495bSYour Name  * @TXRX_FC_5GH_20M_1x1:
47*5113495bSYour Name  * @TXRX_FC_2GH_40M_2x2:
48*5113495bSYour Name  * @TXRX_FC_2GH_20M_2x2:
49*5113495bSYour Name  * @TXRX_FC_2GH_40M_1x1:
50*5113495bSYour Name  * @TXRX_FC_2GH_20M_1x1:
51*5113495bSYour Name  */
52*5113495bSYour Name enum ol_txrx_fc_limit_id {
53*5113495bSYour Name 	TXRX_FC_5GH_80M_2x2,
54*5113495bSYour Name 	TXRX_FC_5GH_40M_2x2,
55*5113495bSYour Name 	TXRX_FC_5GH_20M_2x2,
56*5113495bSYour Name 	TXRX_FC_5GH_80M_1x1,
57*5113495bSYour Name 	TXRX_FC_5GH_40M_1x1,
58*5113495bSYour Name 	TXRX_FC_5GH_20M_1x1,
59*5113495bSYour Name 	TXRX_FC_2GH_40M_2x2,
60*5113495bSYour Name 	TXRX_FC_2GH_20M_2x2,
61*5113495bSYour Name 	TXRX_FC_2GH_40M_1x1,
62*5113495bSYour Name 	TXRX_FC_2GH_20M_1x1,
63*5113495bSYour Name 	TXRX_FC_MAX
64*5113495bSYour Name };
65*5113495bSYour Name 
66*5113495bSYour Name #define TXRX_RFS_ENABLE_PEER_ID_UNMAP_COUNT    3
67*5113495bSYour Name #define TXRX_RFS_DISABLE_PEER_ID_UNMAP_COUNT   1
68*5113495bSYour Name 
69*5113495bSYour Name ol_txrx_peer_handle ol_txrx_peer_get_ref_by_addr(ol_txrx_pdev_handle pdev,
70*5113495bSYour Name 						 u8 *peer_addr,
71*5113495bSYour Name 						 enum peer_debug_id_type
72*5113495bSYour Name 						 dbg_id);
73*5113495bSYour Name 
74*5113495bSYour Name int  ol_txrx_peer_release_ref(ol_txrx_peer_handle peer,
75*5113495bSYour Name 			      enum peer_debug_id_type dbg_id);
76*5113495bSYour Name 
77*5113495bSYour Name /**
78*5113495bSYour Name  * ol_txrx_soc_attach() - initialize the soc
79*5113495bSYour Name  * @scn_handle: Opaque SOC handle from control plane
80*5113495bSYour Name  * @dp_ol_if_ops: Offload Operations
81*5113495bSYour Name  *
82*5113495bSYour Name  * Return: SOC handle on success, NULL on failure
83*5113495bSYour Name  */
84*5113495bSYour Name ol_txrx_soc_handle ol_txrx_soc_attach(void *scn_handle,
85*5113495bSYour Name 				      struct ol_if_ops *dp_ol_if_ops);
86*5113495bSYour Name 
87*5113495bSYour Name /**
88*5113495bSYour Name  * ol_tx_desc_pool_size_hl() - allocate tx descriptor pool size for HL systems
89*5113495bSYour Name  * @ctrl_pdev: the control pdev handle
90*5113495bSYour Name  *
91*5113495bSYour Name  * Return: allocated pool size
92*5113495bSYour Name  */
93*5113495bSYour Name u_int16_t
94*5113495bSYour Name ol_tx_desc_pool_size_hl(struct cdp_cfg *ctrl_pdev);
95*5113495bSYour Name 
96*5113495bSYour Name #ifndef OL_TX_AVG_FRM_BYTES
97*5113495bSYour Name #define OL_TX_AVG_FRM_BYTES 1000
98*5113495bSYour Name #endif
99*5113495bSYour Name 
100*5113495bSYour Name #ifndef OL_TX_DESC_POOL_SIZE_MIN_HL
101*5113495bSYour Name #define OL_TX_DESC_POOL_SIZE_MIN_HL 500
102*5113495bSYour Name #endif
103*5113495bSYour Name 
104*5113495bSYour Name #ifndef OL_TX_DESC_POOL_SIZE_MAX_HL
105*5113495bSYour Name #define OL_TX_DESC_POOL_SIZE_MAX_HL 5000
106*5113495bSYour Name #endif
107*5113495bSYour Name 
108*5113495bSYour Name #ifndef FW_STATS_DESC_POOL_SIZE
109*5113495bSYour Name #define FW_STATS_DESC_POOL_SIZE 10
110*5113495bSYour Name #endif
111*5113495bSYour Name 
112*5113495bSYour Name #ifdef QCA_HL_NETDEV_FLOW_CONTROL
113*5113495bSYour Name #define TXRX_HL_TX_FLOW_CTRL_VDEV_LOW_WATER_MARK 400
114*5113495bSYour Name #define TXRX_HL_TX_FLOW_CTRL_MGMT_RESERVED 100
115*5113495bSYour Name #endif
116*5113495bSYour Name 
117*5113495bSYour Name #define TXRX_HL_TX_DESC_HI_PRIO_RESERVED 20
118*5113495bSYour Name #define TXRX_HL_TX_DESC_QUEUE_RESTART_TH \
119*5113495bSYour Name 		(TXRX_HL_TX_DESC_HI_PRIO_RESERVED + 100)
120*5113495bSYour Name 
121*5113495bSYour Name struct peer_hang_data {
122*5113495bSYour Name 	uint16_t tlv_header;
123*5113495bSYour Name 	uint8_t peer_mac_addr[QDF_MAC_ADDR_SIZE];
124*5113495bSYour Name 	uint16_t peer_timeout_bitmask;
125*5113495bSYour Name } qdf_packed;
126*5113495bSYour Name 
127*5113495bSYour Name #if defined(CONFIG_HL_SUPPORT) && defined(FEATURE_WLAN_TDLS)
128*5113495bSYour Name 
129*5113495bSYour Name void
130*5113495bSYour Name ol_txrx_hl_tdls_flag_reset(struct cdp_soc_t *soc_hdl,
131*5113495bSYour Name 			   uint8_t vdev_id, bool flag);
132*5113495bSYour Name #else
133*5113495bSYour Name 
134*5113495bSYour Name static inline void
ol_txrx_hl_tdls_flag_reset(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,bool flag)135*5113495bSYour Name ol_txrx_hl_tdls_flag_reset(struct cdp_soc_t *soc_hdl,
136*5113495bSYour Name 			   uint8_t vdev_id, bool flag)
137*5113495bSYour Name {
138*5113495bSYour Name }
139*5113495bSYour Name #endif
140*5113495bSYour Name 
141*5113495bSYour Name #ifdef WDI_EVENT_ENABLE
142*5113495bSYour Name void *ol_get_pldev(struct cdp_soc_t *soc, uint8_t pdev_id);
143*5113495bSYour Name #else
144*5113495bSYour Name static inline
ol_get_pldev(struct cdp_soc_t * soc,uint8_t pdev_id)145*5113495bSYour Name void *ol_get_pldev(struct cdp_soc_t *soc, uint8_t pdev_id)
146*5113495bSYour Name {
147*5113495bSYour Name 	return NULL;
148*5113495bSYour Name }
149*5113495bSYour Name #endif
150*5113495bSYour Name 
151*5113495bSYour Name #ifdef QCA_SUPPORT_TXRX_LOCAL_PEER_ID
152*5113495bSYour Name ol_txrx_peer_handle
153*5113495bSYour Name ol_txrx_peer_get_ref_by_local_id(struct cdp_pdev *ppdev,
154*5113495bSYour Name 				 uint8_t local_peer_id,
155*5113495bSYour Name 				 enum peer_debug_id_type dbg_id);
156*5113495bSYour Name #endif /* QCA_SUPPORT_TXRX_LOCAL_PEER_ID */
157*5113495bSYour Name 
158*5113495bSYour Name /**
159*5113495bSYour Name  * ol_txrx_get_pdev_from_pdev_id() - Returns pdev object given the pdev id
160*5113495bSYour Name  * @soc: core DP soc context
161*5113495bSYour Name  * @pdev_id: pdev id from pdev object can be retrieved
162*5113495bSYour Name  *
163*5113495bSYour Name  * Return: Pointer to DP pdev object
164*5113495bSYour Name  */
165*5113495bSYour Name 
166*5113495bSYour Name static inline struct ol_txrx_pdev_t *
ol_txrx_get_pdev_from_pdev_id(struct ol_txrx_soc_t * soc,uint8_t pdev_id)167*5113495bSYour Name ol_txrx_get_pdev_from_pdev_id(struct ol_txrx_soc_t *soc,
168*5113495bSYour Name 			      uint8_t pdev_id)
169*5113495bSYour Name {
170*5113495bSYour Name 	return soc->pdev_list[pdev_id];
171*5113495bSYour Name }
172*5113495bSYour Name 
173*5113495bSYour Name /*
174*5113495bSYour Name  * @nbuf: buffer which contains data to be displayed
175*5113495bSYour Name  * @nbuf_paddr: physical address of the buffer
176*5113495bSYour Name  * @len: defines the size of the data to be displayed
177*5113495bSYour Name  *
178*5113495bSYour Name  * Return: None
179*5113495bSYour Name  */
180*5113495bSYour Name void
181*5113495bSYour Name ol_txrx_dump_pkt(qdf_nbuf_t nbuf, uint32_t nbuf_paddr, int len);
182*5113495bSYour Name 
183*5113495bSYour Name /**
184*5113495bSYour Name  * ol_txrx_get_vdev_from_vdev_id() - get vdev from vdev_id
185*5113495bSYour Name  * @vdev_id: vdev_id
186*5113495bSYour Name  *
187*5113495bSYour Name  * Return: vdev handle
188*5113495bSYour Name  *            NULL if not found.
189*5113495bSYour Name  */
190*5113495bSYour Name struct cdp_vdev *ol_txrx_get_vdev_from_vdev_id(uint8_t vdev_id);
191*5113495bSYour Name 
192*5113495bSYour Name /**
193*5113495bSYour Name  * ol_txrx_get_vdev_from_soc_vdev_id() - get vdev from soc and vdev_id
194*5113495bSYour Name  * @soc: datapath soc handle
195*5113495bSYour Name  * @vdev_id: vdev_id
196*5113495bSYour Name  *
197*5113495bSYour Name  * Return: vdev handle
198*5113495bSYour Name  *            NULL if not found.
199*5113495bSYour Name  */
200*5113495bSYour Name struct ol_txrx_vdev_t *ol_txrx_get_vdev_from_soc_vdev_id(
201*5113495bSYour Name 				struct ol_txrx_soc_t *soc, uint8_t vdev_id);
202*5113495bSYour Name 
203*5113495bSYour Name /**
204*5113495bSYour Name  * ol_txrx_get_mon_vdev_from_pdev() - get monitor mode vdev from pdev
205*5113495bSYour Name  * @soc: datapath soc handle
206*5113495bSYour Name  * @pdev_id: the physical device id the virtual device belongs to
207*5113495bSYour Name  *
208*5113495bSYour Name  * Return: vdev id
209*5113495bSYour Name  *         error if not found.
210*5113495bSYour Name  */
211*5113495bSYour Name uint8_t ol_txrx_get_mon_vdev_from_pdev(struct cdp_soc_t *soc,
212*5113495bSYour Name 				       uint8_t pdev_id);
213*5113495bSYour Name 
214*5113495bSYour Name /**
215*5113495bSYour Name  * ol_txrx_get_vdev_by_peer_addr() - Get vdev handle by peer mac address
216*5113495bSYour Name  * @ppdev - data path device instance
217*5113495bSYour Name  * @peer_addr - peer mac address
218*5113495bSYour Name  *
219*5113495bSYour Name  * Get virtual interface handle by local peer mac address
220*5113495bSYour Name  *
221*5113495bSYour Name  * Return: Virtual interface instance handle
222*5113495bSYour Name  *         NULL in case cannot find
223*5113495bSYour Name  */
224*5113495bSYour Name ol_txrx_vdev_handle
225*5113495bSYour Name ol_txrx_get_vdev_by_peer_addr(struct cdp_pdev *ppdev,
226*5113495bSYour Name 			      struct qdf_mac_addr peer_addr);
227*5113495bSYour Name 
228*5113495bSYour Name void *ol_txrx_find_peer_by_addr(struct cdp_pdev *pdev,
229*5113495bSYour Name 				uint8_t *peer_addr);
230*5113495bSYour Name 
231*5113495bSYour Name /**
232*5113495bSYour Name  * @brief specify the peer's authentication state
233*5113495bSYour Name  * @details
234*5113495bSYour Name  *  Specify the peer's authentication state (none, connected, authenticated)
235*5113495bSYour Name  *  to allow the data SW to determine whether to filter out invalid data frames.
236*5113495bSYour Name  *  (In the "connected" state, where security is enabled, but authentication
237*5113495bSYour Name  *  has not completed, tx and rx data frames other than EAPOL or WAPI should
238*5113495bSYour Name  *  be discarded.)
239*5113495bSYour Name  *  This function is only relevant for systems in which the tx and rx filtering
240*5113495bSYour Name  *  are done in the host rather than in the target.
241*5113495bSYour Name  *
242*5113495bSYour Name  * @param soc - datapath soc handle
243*5113495bSYour Name  * @param peer_mac - mac address of which peer has changed its state
244*5113495bSYour Name  * @param state - the new state of the peer
245*5113495bSYour Name  *
246*5113495bSYour Name  * Return: QDF Status
247*5113495bSYour Name  */
248*5113495bSYour Name QDF_STATUS ol_txrx_peer_state_update(struct cdp_soc_t *soc_hdl,
249*5113495bSYour Name 				     uint8_t *peer_mac,
250*5113495bSYour Name 				     enum ol_txrx_peer_state state);
251*5113495bSYour Name 
252*5113495bSYour Name void htt_pkt_log_init(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, void *scn);
253*5113495bSYour Name void peer_unmap_timer_handler(void *data);
254*5113495bSYour Name 
255*5113495bSYour Name #ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
256*5113495bSYour Name /**
257*5113495bSYour Name  * ol_txrx_register_tx_flow_control() - register tx flow control callback
258*5113495bSYour Name  * @soc_hdl: soc handle
259*5113495bSYour Name  * @vdev_id: vdev_id
260*5113495bSYour Name  * @flowControl: flow control callback
261*5113495bSYour Name  * @osif_fc_ctx: callback context
262*5113495bSYour Name  * @flow_control_is_pause: is vdev paused by flow control
263*5113495bSYour Name  *
264*5113495bSYour Name  * Return: 0 for success or error code
265*5113495bSYour Name  */
266*5113495bSYour Name int ol_txrx_register_tx_flow_control(struct cdp_soc_t *soc_hdl,
267*5113495bSYour Name 				     uint8_t vdev_id,
268*5113495bSYour Name 				     ol_txrx_tx_flow_control_fp flow_control,
269*5113495bSYour Name 				     void *osif_fc_ctx,
270*5113495bSYour Name 				     ol_txrx_tx_flow_control_is_pause_fp
271*5113495bSYour Name 				     flow_control_is_pause);
272*5113495bSYour Name 
273*5113495bSYour Name /**
274*5113495bSYour Name  * ol_txrx_de_register_tx_flow_control_cb() - deregister tx flow control
275*5113495bSYour Name  *                                            callback
276*5113495bSYour Name  * @soc_hdl: soc handle
277*5113495bSYour Name  * @vdev_id: vdev_id
278*5113495bSYour Name  *
279*5113495bSYour Name  * Return: 0 for success or error code
280*5113495bSYour Name  */
281*5113495bSYour Name int ol_txrx_deregister_tx_flow_control_cb(struct cdp_soc_t *soc_hdl,
282*5113495bSYour Name 					  uint8_t vdev_id);
283*5113495bSYour Name 
284*5113495bSYour Name bool ol_txrx_get_tx_resource(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
285*5113495bSYour Name 			     struct qdf_mac_addr peer_addr,
286*5113495bSYour Name 			     unsigned int low_watermark,
287*5113495bSYour Name 			     unsigned int high_watermark_offset);
288*5113495bSYour Name 
289*5113495bSYour Name /**
290*5113495bSYour Name  * ol_txrx_ll_set_tx_pause_q_depth() - set pause queue depth
291*5113495bSYour Name  * @soc_hdl: soc handle
292*5113495bSYour Name  * @vdev_id: vdev id
293*5113495bSYour Name  * @pause_q_depth: pause queue depth
294*5113495bSYour Name  *
295*5113495bSYour Name  * Return: 0 for success or error code
296*5113495bSYour Name  */
297*5113495bSYour Name int ol_txrx_ll_set_tx_pause_q_depth(struct cdp_soc_t *soc_hdl,
298*5113495bSYour Name 				    uint8_t vdev_id, int pause_q_depth);
299*5113495bSYour Name #endif
300*5113495bSYour Name 
301*5113495bSYour Name void ol_tx_init_pdev(ol_txrx_pdev_handle pdev);
302*5113495bSYour Name 
303*5113495bSYour Name #ifdef CONFIG_HL_SUPPORT
304*5113495bSYour Name void ol_txrx_vdev_txqs_init(struct ol_txrx_vdev_t *vdev);
305*5113495bSYour Name void ol_txrx_vdev_tx_queue_free(struct ol_txrx_vdev_t *vdev);
306*5113495bSYour Name void ol_txrx_peer_txqs_init(struct ol_txrx_pdev_t *pdev,
307*5113495bSYour Name 			    struct ol_txrx_peer_t *peer);
308*5113495bSYour Name void ol_txrx_peer_tx_queue_free(struct ol_txrx_pdev_t *pdev,
309*5113495bSYour Name 				struct ol_txrx_peer_t *peer);
310*5113495bSYour Name #else
311*5113495bSYour Name static inline void
ol_txrx_vdev_txqs_init(struct ol_txrx_vdev_t * vdev)312*5113495bSYour Name ol_txrx_vdev_txqs_init(struct ol_txrx_vdev_t *vdev) {}
313*5113495bSYour Name 
314*5113495bSYour Name static inline void
ol_txrx_vdev_tx_queue_free(struct ol_txrx_vdev_t * vdev)315*5113495bSYour Name ol_txrx_vdev_tx_queue_free(struct ol_txrx_vdev_t *vdev) {}
316*5113495bSYour Name 
317*5113495bSYour Name static inline void
ol_txrx_peer_txqs_init(struct ol_txrx_pdev_t * pdev,struct ol_txrx_peer_t * peer)318*5113495bSYour Name ol_txrx_peer_txqs_init(struct ol_txrx_pdev_t *pdev,
319*5113495bSYour Name 		       struct ol_txrx_peer_t *peer) {}
320*5113495bSYour Name 
321*5113495bSYour Name static inline void
ol_txrx_peer_tx_queue_free(struct ol_txrx_pdev_t * pdev,struct ol_txrx_peer_t * peer)322*5113495bSYour Name ol_txrx_peer_tx_queue_free(struct ol_txrx_pdev_t *pdev,
323*5113495bSYour Name 			   struct ol_txrx_peer_t *peer) {}
324*5113495bSYour Name #endif
325*5113495bSYour Name 
326*5113495bSYour Name #if defined(CONFIG_HL_SUPPORT) && defined(DEBUG_HL_LOGGING)
327*5113495bSYour Name void ol_txrx_pdev_txq_log_init(struct ol_txrx_pdev_t *pdev);
328*5113495bSYour Name void ol_txrx_pdev_txq_log_destroy(struct ol_txrx_pdev_t *pdev);
329*5113495bSYour Name void ol_txrx_pdev_grp_stats_init(struct ol_txrx_pdev_t *pdev);
330*5113495bSYour Name void ol_txrx_pdev_grp_stat_destroy(struct ol_txrx_pdev_t *pdev);
331*5113495bSYour Name #else
332*5113495bSYour Name static inline void
ol_txrx_pdev_txq_log_init(struct ol_txrx_pdev_t * pdev)333*5113495bSYour Name ol_txrx_pdev_txq_log_init(struct ol_txrx_pdev_t *pdev) {}
334*5113495bSYour Name 
335*5113495bSYour Name static inline void
ol_txrx_pdev_txq_log_destroy(struct ol_txrx_pdev_t * pdev)336*5113495bSYour Name ol_txrx_pdev_txq_log_destroy(struct ol_txrx_pdev_t *pdev) {}
337*5113495bSYour Name 
338*5113495bSYour Name static inline void
ol_txrx_pdev_grp_stats_init(struct ol_txrx_pdev_t * pdev)339*5113495bSYour Name ol_txrx_pdev_grp_stats_init(struct ol_txrx_pdev_t *pdev) {}
340*5113495bSYour Name 
341*5113495bSYour Name static inline void
ol_txrx_pdev_grp_stat_destroy(struct ol_txrx_pdev_t * pdev)342*5113495bSYour Name ol_txrx_pdev_grp_stat_destroy(struct ol_txrx_pdev_t *pdev) {}
343*5113495bSYour Name #endif
344*5113495bSYour Name 
345*5113495bSYour Name #if defined(CONFIG_HL_SUPPORT) && defined(FEATURE_WLAN_TDLS)
346*5113495bSYour Name /**
347*5113495bSYour Name  * ol_txrx_copy_mac_addr_raw() - copy raw mac addr
348*5113495bSYour Name  * @soc_hdl: datapath soc handle
349*5113495bSYour Name  * @vdev_id: the data virtual device id
350*5113495bSYour Name  * @bss_addr: bss address
351*5113495bSYour Name  *
352*5113495bSYour Name  * Return: None
353*5113495bSYour Name  */
354*5113495bSYour Name void ol_txrx_copy_mac_addr_raw(struct cdp_soc_t *soc, uint8_t vdev_id,
355*5113495bSYour Name 			       uint8_t *bss_addr);
356*5113495bSYour Name 
357*5113495bSYour Name /**
358*5113495bSYour Name  * ol_txrx_add_last_real_peer() - add last peer
359*5113495bSYour Name  * @soc_hdl: datapath soc handle
360*5113495bSYour Name  * @pdev_id: the data physical device id
361*5113495bSYour Name  * @vdev_id: virtual device id
362*5113495bSYour Name  *
363*5113495bSYour Name  * Return: None
364*5113495bSYour Name  */
365*5113495bSYour Name void ol_txrx_add_last_real_peer(struct cdp_soc_t *soc, uint8_t pdev_id,
366*5113495bSYour Name 				uint8_t vdev_id);
367*5113495bSYour Name 
368*5113495bSYour Name /**
369*5113495bSYour Name  * is_vdev_restore_last_peer() - check for vdev last peer
370*5113495bSYour Name  * @soc: datapath soc handle
371*5113495bSYour Name  * vdev_id: vdev id
372*5113495bSYour Name  * @peer_mac: peer mac address
373*5113495bSYour Name  *
374*5113495bSYour Name  * Return: true if last peer is not null
375*5113495bSYour Name  */
376*5113495bSYour Name bool is_vdev_restore_last_peer(struct cdp_soc_t *soc, uint8_t vdev_id,
377*5113495bSYour Name 			       uint8_t *peer_mac);
378*5113495bSYour Name 
379*5113495bSYour Name /**
380*5113495bSYour Name  * ol_txrx_update_last_real_peer() - check for vdev last peer
381*5113495bSYour Name  * @soc: datapath soc handle
382*5113495bSYour Name  * @pdev_id: the data physical device id
383*5113495bSYour Name  * @vdev_id: vdev_id
384*5113495bSYour Name  * @restore_last_peer: restore last peer flag
385*5113495bSYour Name  *
386*5113495bSYour Name  * Return: None
387*5113495bSYour Name  */
388*5113495bSYour Name void ol_txrx_update_last_real_peer(struct cdp_soc_t *soc, uint8_t pdev_id,
389*5113495bSYour Name 				   uint8_t vdev_id,
390*5113495bSYour Name 				   bool restore_last_peer);
391*5113495bSYour Name 
392*5113495bSYour Name /**
393*5113495bSYour Name  * ol_txrx_set_peer_as_tdls_peer() - mark peer as tdls peer
394*5113495bSYour Name  * @soc: pointer to SOC handle
395*5113495bSYour Name  * @vdev_id: virtual interface id
396*5113495bSYour Name  * @peer_mac: peer mac address
397*5113495bSYour Name  * @value: false/true
398*5113495bSYour Name  *
399*5113495bSYour Name  * Return: None
400*5113495bSYour Name  */
401*5113495bSYour Name void ol_txrx_set_peer_as_tdls_peer(struct cdp_soc_t *soc, uint8_t vdev_id,
402*5113495bSYour Name 				   uint8_t *peer_mac, bool val);
403*5113495bSYour Name 
404*5113495bSYour Name /**
405*5113495bSYour Name  * ol_txrx_set_tdls_offchan_enabled() - set tdls offchan enabled
406*5113495bSYour Name  * @soc: pointer to SOC handle
407*5113495bSYour Name  * @vdev_id: virtual interface id
408*5113495bSYour Name  * @peer_mac: peer mac address
409*5113495bSYour Name  * @value: false/true
410*5113495bSYour Name  *
411*5113495bSYour Name  * Return: None
412*5113495bSYour Name  */
413*5113495bSYour Name void ol_txrx_set_tdls_offchan_enabled(struct cdp_soc_t *soc, uint8_t vdev_id,
414*5113495bSYour Name 				      uint8_t *peer_mac, bool val);
415*5113495bSYour Name #endif
416*5113495bSYour Name 
417*5113495bSYour Name #if defined(FEATURE_TSO) && defined(FEATURE_TSO_DEBUG)
418*5113495bSYour Name void ol_txrx_stats_display_tso(ol_txrx_pdev_handle pdev);
419*5113495bSYour Name void ol_txrx_tso_stats_init(ol_txrx_pdev_handle pdev);
420*5113495bSYour Name void ol_txrx_tso_stats_deinit(ol_txrx_pdev_handle pdev);
421*5113495bSYour Name void ol_txrx_tso_stats_clear(ol_txrx_pdev_handle pdev);
422*5113495bSYour Name #else
423*5113495bSYour Name static inline
ol_txrx_stats_display_tso(ol_txrx_pdev_handle pdev)424*5113495bSYour Name void ol_txrx_stats_display_tso(ol_txrx_pdev_handle pdev)
425*5113495bSYour Name {
426*5113495bSYour Name 	ol_txrx_err("TSO is not supported");
427*5113495bSYour Name }
428*5113495bSYour Name 
429*5113495bSYour Name static inline
ol_txrx_tso_stats_init(ol_txrx_pdev_handle pdev)430*5113495bSYour Name void ol_txrx_tso_stats_init(ol_txrx_pdev_handle pdev) {}
431*5113495bSYour Name 
432*5113495bSYour Name static inline
ol_txrx_tso_stats_deinit(ol_txrx_pdev_handle pdev)433*5113495bSYour Name void ol_txrx_tso_stats_deinit(ol_txrx_pdev_handle pdev) {}
434*5113495bSYour Name 
435*5113495bSYour Name static inline
ol_txrx_tso_stats_clear(ol_txrx_pdev_handle pdev)436*5113495bSYour Name void ol_txrx_tso_stats_clear(ol_txrx_pdev_handle pdev) {}
437*5113495bSYour Name #endif
438*5113495bSYour Name 
439*5113495bSYour Name struct ol_tx_desc_t *
440*5113495bSYour Name ol_txrx_mgmt_tx_desc_alloc(struct ol_txrx_pdev_t *pdev,
441*5113495bSYour Name 			   struct ol_txrx_vdev_t *vdev,
442*5113495bSYour Name 			   qdf_nbuf_t tx_mgmt_frm,
443*5113495bSYour Name 			   struct ol_txrx_msdu_info_t *tx_msdu_info);
444*5113495bSYour Name 
445*5113495bSYour Name int ol_txrx_mgmt_send_frame(struct ol_txrx_vdev_t *vdev,
446*5113495bSYour Name 			    struct ol_tx_desc_t *tx_desc,
447*5113495bSYour Name 			    qdf_nbuf_t tx_mgmt_frm,
448*5113495bSYour Name 			    struct ol_txrx_msdu_info_t *tx_msdu_info,
449*5113495bSYour Name 			    uint16_t chanfreq);
450*5113495bSYour Name 
451*5113495bSYour Name #ifdef CONFIG_HL_SUPPORT
452*5113495bSYour Name static inline
ol_tx_get_desc_global_pool_size(struct ol_txrx_pdev_t * pdev)453*5113495bSYour Name uint32_t ol_tx_get_desc_global_pool_size(struct ol_txrx_pdev_t *pdev)
454*5113495bSYour Name {
455*5113495bSYour Name 	return ol_tx_desc_pool_size_hl(pdev->ctrl_pdev);
456*5113495bSYour Name }
457*5113495bSYour Name #else
458*5113495bSYour Name #ifdef QCA_LL_TX_FLOW_CONTROL_V2
459*5113495bSYour Name static inline
ol_tx_get_desc_global_pool_size(struct ol_txrx_pdev_t * pdev)460*5113495bSYour Name uint32_t ol_tx_get_desc_global_pool_size(struct ol_txrx_pdev_t *pdev)
461*5113495bSYour Name {
462*5113495bSYour Name 	return pdev->num_msdu_desc;
463*5113495bSYour Name }
464*5113495bSYour Name #else
465*5113495bSYour Name static inline
ol_tx_get_desc_global_pool_size(struct ol_txrx_pdev_t * pdev)466*5113495bSYour Name uint32_t ol_tx_get_desc_global_pool_size(struct ol_txrx_pdev_t *pdev)
467*5113495bSYour Name {
468*5113495bSYour Name 	return ol_cfg_target_tx_credit(pdev->ctrl_pdev);
469*5113495bSYour Name }
470*5113495bSYour Name #endif
471*5113495bSYour Name #endif
472*5113495bSYour Name 
473*5113495bSYour Name /**
474*5113495bSYour Name  * cdp_soc_t_to_ol_txrx_soc_t() - typecast cdp_soc_t to ol_txrx_soc_t
475*5113495bSYour Name  * @soc: OL soc handle
476*5113495bSYour Name  *
477*5113495bSYour Name  * Return: struct ol_txrx_soc_t pointer
478*5113495bSYour Name  */
479*5113495bSYour Name static inline
cdp_soc_t_to_ol_txrx_soc_t(ol_txrx_soc_handle soc)480*5113495bSYour Name struct ol_txrx_soc_t *cdp_soc_t_to_ol_txrx_soc_t(ol_txrx_soc_handle soc)
481*5113495bSYour Name {
482*5113495bSYour Name 	return (struct ol_txrx_soc_t *)soc;
483*5113495bSYour Name }
484*5113495bSYour Name 
485*5113495bSYour Name /**
486*5113495bSYour Name  * ol_txrx_soc_t_to_cdp_soc_t() - typecast ol_txrx_soc_t to cdp_soc
487*5113495bSYour Name  * @soc: Opaque soc handle
488*5113495bSYour Name  *
489*5113495bSYour Name  * Return: struct cdp_soc_t pointer
490*5113495bSYour Name  */
491*5113495bSYour Name static inline
ol_txrx_soc_t_to_cdp_soc_t(struct ol_txrx_soc_t * soc)492*5113495bSYour Name ol_txrx_soc_handle ol_txrx_soc_t_to_cdp_soc_t(struct ol_txrx_soc_t *soc)
493*5113495bSYour Name {
494*5113495bSYour Name 	return (struct cdp_soc_t *)soc;
495*5113495bSYour Name }
496*5113495bSYour Name 
497*5113495bSYour Name /**
498*5113495bSYour Name  * cdp_pdev_to_ol_txrx_pdev_t() - typecast cdp_pdev to ol_txrx_pdev_t
499*5113495bSYour Name  * @pdev: OL pdev handle
500*5113495bSYour Name  *
501*5113495bSYour Name  * Return: struct ol_txrx_pdev_t pointer
502*5113495bSYour Name  */
503*5113495bSYour Name static inline
cdp_pdev_to_ol_txrx_pdev_t(struct cdp_pdev * pdev)504*5113495bSYour Name struct ol_txrx_pdev_t *cdp_pdev_to_ol_txrx_pdev_t(struct cdp_pdev *pdev)
505*5113495bSYour Name {
506*5113495bSYour Name 	return (struct ol_txrx_pdev_t *)pdev;
507*5113495bSYour Name }
508*5113495bSYour Name 
509*5113495bSYour Name /**
510*5113495bSYour Name  * ol_txrx_pdev_t_to_cdp_pdev() - typecast ol_txrx_pdev_t to cdp_pdev
511*5113495bSYour Name  * @pdev: Opaque pdev handle
512*5113495bSYour Name  *
513*5113495bSYour Name  * Return: struct cdp_pdev pointer
514*5113495bSYour Name  */
515*5113495bSYour Name static inline
ol_txrx_pdev_t_to_cdp_pdev(struct ol_txrx_pdev_t * pdev)516*5113495bSYour Name struct cdp_pdev *ol_txrx_pdev_t_to_cdp_pdev(struct ol_txrx_pdev_t *pdev)
517*5113495bSYour Name {
518*5113495bSYour Name 	return (struct cdp_pdev *)pdev;
519*5113495bSYour Name }
520*5113495bSYour Name 
521*5113495bSYour Name /**
522*5113495bSYour Name  * cdp_vdev_to_ol_txrx_vdev_t() - typecast cdp_vdev to ol_txrx_vdev_t
523*5113495bSYour Name  * @vdev: OL vdev handle
524*5113495bSYour Name  *
525*5113495bSYour Name  * Return: struct ol_txrx_vdev_t pointer
526*5113495bSYour Name  */
527*5113495bSYour Name static inline
cdp_vdev_to_ol_txrx_vdev_t(struct cdp_vdev * vdev)528*5113495bSYour Name struct ol_txrx_vdev_t *cdp_vdev_to_ol_txrx_vdev_t(struct cdp_vdev *vdev)
529*5113495bSYour Name {
530*5113495bSYour Name 	return (struct ol_txrx_vdev_t *)vdev;
531*5113495bSYour Name }
532*5113495bSYour Name 
533*5113495bSYour Name /**
534*5113495bSYour Name  * ol_txrx_vdev_t_to_cdp_vdev() - typecast ol_txrx_vdev_t to cdp_vdev
535*5113495bSYour Name  * @vdev: Opaque vdev handle
536*5113495bSYour Name  *
537*5113495bSYour Name  * Return: struct cdp_vdev pointer
538*5113495bSYour Name  */
539*5113495bSYour Name static inline
ol_txrx_vdev_t_to_cdp_vdev(struct ol_txrx_vdev_t * vdev)540*5113495bSYour Name struct cdp_vdev *ol_txrx_vdev_t_to_cdp_vdev(struct ol_txrx_vdev_t *vdev)
541*5113495bSYour Name {
542*5113495bSYour Name 	return (struct cdp_vdev *)vdev;
543*5113495bSYour Name }
544*5113495bSYour Name 
545*5113495bSYour Name #ifdef QCA_LL_TX_FLOW_CONTROL_V2
546*5113495bSYour Name void ol_tx_set_desc_global_pool_size(uint32_t num_msdu_desc);
547*5113495bSYour Name uint32_t ol_tx_get_total_free_desc(struct ol_txrx_pdev_t *pdev);
548*5113495bSYour Name /**
549*5113495bSYour Name  * ol_txrx_fwd_desc_thresh_check() - check to forward packet to tx path
550*5113495bSYour Name  * @vdev: which virtual device the frames were addressed to
551*5113495bSYour Name  *
552*5113495bSYour Name  * This API is to check whether enough descriptors are available or not
553*5113495bSYour Name  * to forward packet to tx path. If not enough descriptors left,
554*5113495bSYour Name  * start dropping tx-path packets.
555*5113495bSYour Name  * Do not pause netif queues as still a pool of descriptors is reserved
556*5113495bSYour Name  * for high-priority traffic such as EAPOL/ARP etc.
557*5113495bSYour Name  * In case of intra-bss forwarding, it could be possible that tx-path can
558*5113495bSYour Name  * consume all the tx descriptors and pause netif queues. Due to this,
559*5113495bSYour Name  * there would be some left for stack triggered packets such as ARP packets
560*5113495bSYour Name  * which could lead to disconnection of device. To avoid this, reserved
561*5113495bSYour Name  * a pool of descriptors for high-priority packets, i.e., reduce the
562*5113495bSYour Name  * threshold of drop in the intra-bss forwarding path.
563*5113495bSYour Name  *
564*5113495bSYour Name  * Return: true ; forward the packet, i.e., below threshold
565*5113495bSYour Name  *         false; not enough descriptors, drop the packet
566*5113495bSYour Name  */
567*5113495bSYour Name bool ol_txrx_fwd_desc_thresh_check(struct ol_txrx_vdev_t *txrx_vdev);
568*5113495bSYour Name 
569*5113495bSYour Name /**
570*5113495bSYour Name  * ol_tx_desc_thresh_reached() - is tx desc threshold reached
571*5113495bSYour Name  * @soc_hdl: Datapath soc handle
572*5113495bSYour Name  * @vdev_id: id of vdev
573*5113495bSYour Name  *
574*5113495bSYour Name  * Return: true if tx desc available reached threshold or false otherwise
575*5113495bSYour Name  */
ol_tx_desc_thresh_reached(struct cdp_soc_t * soc_hdl,uint8_t vdev_id)576*5113495bSYour Name static inline bool ol_tx_desc_thresh_reached(struct cdp_soc_t *soc_hdl,
577*5113495bSYour Name 					     uint8_t vdev_id)
578*5113495bSYour Name {
579*5113495bSYour Name 	struct ol_txrx_vdev_t *vdev;
580*5113495bSYour Name 
581*5113495bSYour Name 	vdev = (struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
582*5113495bSYour Name 	if (!vdev) {
583*5113495bSYour Name 		dp_err("vdev is NULL");
584*5113495bSYour Name 		return false;
585*5113495bSYour Name 	}
586*5113495bSYour Name 
587*5113495bSYour Name 	return !(ol_txrx_fwd_desc_thresh_check(vdev));
588*5113495bSYour Name }
589*5113495bSYour Name 
590*5113495bSYour Name #else
591*5113495bSYour Name /**
592*5113495bSYour Name  * ol_tx_get_total_free_desc() - get total free descriptors
593*5113495bSYour Name  * @pdev: pdev handle
594*5113495bSYour Name  *
595*5113495bSYour Name  * Return: total free descriptors
596*5113495bSYour Name  */
597*5113495bSYour Name static inline
ol_tx_get_total_free_desc(struct ol_txrx_pdev_t * pdev)598*5113495bSYour Name uint32_t ol_tx_get_total_free_desc(struct ol_txrx_pdev_t *pdev)
599*5113495bSYour Name {
600*5113495bSYour Name 	return pdev->tx_desc.num_free;
601*5113495bSYour Name }
602*5113495bSYour Name 
603*5113495bSYour Name static inline
ol_txrx_fwd_desc_thresh_check(struct ol_txrx_vdev_t * txrx_vdev)604*5113495bSYour Name bool ol_txrx_fwd_desc_thresh_check(struct ol_txrx_vdev_t *txrx_vdev)
605*5113495bSYour Name {
606*5113495bSYour Name 	return true;
607*5113495bSYour Name }
608*5113495bSYour Name 
609*5113495bSYour Name #endif
610*5113495bSYour Name 
611*5113495bSYour Name #if defined(FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL) && \
612*5113495bSYour Name 	defined(FEATURE_HL_DBS_GROUP_CREDIT_SHARING)
613*5113495bSYour Name static inline void
ol_txrx_init_txq_group_limit_lend(struct ol_txrx_pdev_t * pdev)614*5113495bSYour Name ol_txrx_init_txq_group_limit_lend(struct ol_txrx_pdev_t *pdev)
615*5113495bSYour Name {
616*5113495bSYour Name 	BUILD_BUG_ON(OL_TX_MAX_GROUPS_PER_QUEUE > 1);
617*5113495bSYour Name 	BUILD_BUG_ON(OL_TX_MAX_TXQ_GROUPS > 2);
618*5113495bSYour Name 	pdev->limit_lend = 0;
619*5113495bSYour Name 	pdev->min_reserve = 0;
620*5113495bSYour Name }
621*5113495bSYour Name #else
622*5113495bSYour Name static inline void
ol_txrx_init_txq_group_limit_lend(struct ol_txrx_pdev_t * pdev)623*5113495bSYour Name ol_txrx_init_txq_group_limit_lend(struct ol_txrx_pdev_t *pdev)
624*5113495bSYour Name {}
625*5113495bSYour Name #endif
626*5113495bSYour Name 
627*5113495bSYour Name int ol_txrx_fw_stats_desc_pool_init(struct ol_txrx_pdev_t *pdev,
628*5113495bSYour Name 				    uint8_t pool_size);
629*5113495bSYour Name void ol_txrx_fw_stats_desc_pool_deinit(struct ol_txrx_pdev_t *pdev);
630*5113495bSYour Name struct ol_txrx_fw_stats_desc_t
631*5113495bSYour Name 	*ol_txrx_fw_stats_desc_alloc(struct ol_txrx_pdev_t *pdev);
632*5113495bSYour Name struct ol_txrx_stats_req_internal
633*5113495bSYour Name 	*ol_txrx_fw_stats_desc_get_req(struct ol_txrx_pdev_t *pdev,
634*5113495bSYour Name 				       uint8_t desc_id);
635*5113495bSYour Name 
636*5113495bSYour Name #ifdef QCA_HL_NETDEV_FLOW_CONTROL
637*5113495bSYour Name /**
638*5113495bSYour Name  * ol_txrx_register_hl_flow_control() -register hl netdev flow control callback
639*5113495bSYour Name  * @soc_hdl: soc handle
640*5113495bSYour Name  * @pdev_id: datapath pdev identifier
641*5113495bSYour Name  * @flowControl: flow control callback
642*5113495bSYour Name  *
643*5113495bSYour Name  * Return: 0 for success or error code
644*5113495bSYour Name  */
645*5113495bSYour Name int ol_txrx_register_hl_flow_control(struct cdp_soc_t *soc_hdl,
646*5113495bSYour Name 				     uint8_t pdev_id,
647*5113495bSYour Name 				     tx_pause_callback flowcontrol);
648*5113495bSYour Name 
649*5113495bSYour Name /**
650*5113495bSYour Name  * ol_txrx_set_vdev_os_queue_status() - Set OS queue status for a vdev
651*5113495bSYour Name  * @soc_hdl: soc handle
652*5113495bSYour Name  * @vdev_id: vdev id for the vdev under consideration.
653*5113495bSYour Name  * @action: action to be done on queue for vdev
654*5113495bSYour Name  *
655*5113495bSYour Name  * Return: 0 on success, -EINVAL on failure
656*5113495bSYour Name  */
657*5113495bSYour Name int ol_txrx_set_vdev_os_queue_status(struct cdp_soc_t *soc_hdl, u8 vdev_id,
658*5113495bSYour Name 				     enum netif_action_type action);
659*5113495bSYour Name 
660*5113495bSYour Name /**
661*5113495bSYour Name  * ol_txrx_set_vdev_tx_desc_limit() - Set TX descriptor limits for a vdev
662*5113495bSYour Name  * @soc_hdl: soc handle
663*5113495bSYour Name  * @vdev_id: vdev id for the vdev under consideration.
664*5113495bSYour Name  * @chan_freq: channel frequency on which the vdev has been started.
665*5113495bSYour Name  *
666*5113495bSYour Name  * Return: 0 on success, -EINVAL on failure
667*5113495bSYour Name  */
668*5113495bSYour Name int ol_txrx_set_vdev_tx_desc_limit(struct cdp_soc_t *soc_hdl, u8 vdev_id,
669*5113495bSYour Name 				   u32 chan_freq);
670*5113495bSYour Name #endif
671*5113495bSYour Name 
672*5113495bSYour Name /**
673*5113495bSYour Name  * ol_txrx_get_new_htt_msg_format() - check htt h2t msg feature
674*5113495bSYour Name  * @pdev - datapath device instance
675*5113495bSYour Name  *
676*5113495bSYour Name  * Check if h2t message length includes htc header length
677*5113495bSYour Name  *
678*5113495bSYour Name  * return if new htt h2t msg feature enabled
679*5113495bSYour Name  */
680*5113495bSYour Name bool ol_txrx_get_new_htt_msg_format(struct ol_txrx_pdev_t *pdev);
681*5113495bSYour Name 
682*5113495bSYour Name /**
683*5113495bSYour Name  * ol_txrx_set_new_htt_msg_format() - set htt h2t msg feature
684*5113495bSYour Name  * @val - enable or disable new htt h2t msg feature
685*5113495bSYour Name  *
686*5113495bSYour Name  * Set if h2t message length includes htc header length
687*5113495bSYour Name  *
688*5113495bSYour Name  * return NONE
689*5113495bSYour Name  */
690*5113495bSYour Name void ol_txrx_set_new_htt_msg_format(uint8_t val);
691*5113495bSYour Name 
692*5113495bSYour Name /**
693*5113495bSYour Name  * ol_txrx_set_peer_unmap_conf_support() - set peer unmap conf feature
694*5113495bSYour Name  * @val - enable or disable peer unmap conf feature
695*5113495bSYour Name  *
696*5113495bSYour Name  * Set if peer unamp conf feature is supported by both FW and in INI
697*5113495bSYour Name  *
698*5113495bSYour Name  * return NONE
699*5113495bSYour Name  */
700*5113495bSYour Name void ol_txrx_set_peer_unmap_conf_support(bool val);
701*5113495bSYour Name 
702*5113495bSYour Name /**
703*5113495bSYour Name  * ol_txrx_get_peer_unmap_conf_support() - check peer unmap conf feature
704*5113495bSYour Name  *
705*5113495bSYour Name  * Check if peer unmap conf feature is enabled
706*5113495bSYour Name  *
707*5113495bSYour Name  * return true is peer unmap conf feature is enabled else false
708*5113495bSYour Name  */
709*5113495bSYour Name bool ol_txrx_get_peer_unmap_conf_support(void);
710*5113495bSYour Name 
711*5113495bSYour Name /**
712*5113495bSYour Name  * ol_txrx_get_tx_compl_tsf64() - check tx compl tsf64 feature
713*5113495bSYour Name  *
714*5113495bSYour Name  * Check if tx compl tsf64 feature is enabled
715*5113495bSYour Name  *
716*5113495bSYour Name  * return true is tx compl tsf64 feature is enabled else false
717*5113495bSYour Name  */
718*5113495bSYour Name bool ol_txrx_get_tx_compl_tsf64(void);
719*5113495bSYour Name 
720*5113495bSYour Name /**
721*5113495bSYour Name  * ol_txrx_set_tx_compl_tsf64() - set tx compl tsf64 feature
722*5113495bSYour Name  * @val - enable or disable tx compl tsf64 feature
723*5113495bSYour Name  *
724*5113495bSYour Name  * Set if tx compl tsf64 feature is supported FW
725*5113495bSYour Name  *
726*5113495bSYour Name  * return NONE
727*5113495bSYour Name  */
728*5113495bSYour Name void ol_txrx_set_tx_compl_tsf64(bool val);
729*5113495bSYour Name 
730*5113495bSYour Name #ifdef QCA_SUPPORT_TXRX_DRIVER_TCP_DEL_ACK
731*5113495bSYour Name 
732*5113495bSYour Name /**
733*5113495bSYour Name  * ol_txrx_vdev_init_tcp_del_ack() - initialize tcp delayed ack structure
734*5113495bSYour Name  * @vdev: vdev handle
735*5113495bSYour Name  *
736*5113495bSYour Name  * Return: none
737*5113495bSYour Name  */
738*5113495bSYour Name void ol_txrx_vdev_init_tcp_del_ack(struct ol_txrx_vdev_t *vdev);
739*5113495bSYour Name 
740*5113495bSYour Name /**
741*5113495bSYour Name  * ol_txrx_vdev_deinit_tcp_del_ack() - deinitialize tcp delayed ack structure
742*5113495bSYour Name  * @vdev: vdev handle
743*5113495bSYour Name  *
744*5113495bSYour Name  * Return: none
745*5113495bSYour Name  */
746*5113495bSYour Name void ol_txrx_vdev_deinit_tcp_del_ack(struct ol_txrx_vdev_t *vdev);
747*5113495bSYour Name 
748*5113495bSYour Name /**
749*5113495bSYour Name  * ol_txrx_vdev_free_tcp_node() - add tcp node in free list
750*5113495bSYour Name  * @vdev: vdev handle
751*5113495bSYour Name  * @node: tcp stream node
752*5113495bSYour Name  *
753*5113495bSYour Name  * Return: none
754*5113495bSYour Name  */
755*5113495bSYour Name void ol_txrx_vdev_free_tcp_node(struct ol_txrx_vdev_t *vdev,
756*5113495bSYour Name 				struct tcp_stream_node *node);
757*5113495bSYour Name 
758*5113495bSYour Name /**
759*5113495bSYour Name  * ol_txrx_vdev_alloc_tcp_node() - allocate tcp node
760*5113495bSYour Name  * @vdev: vdev handle
761*5113495bSYour Name  *
762*5113495bSYour Name  * Return: tcp stream node
763*5113495bSYour Name  */
764*5113495bSYour Name struct tcp_stream_node *
765*5113495bSYour Name ol_txrx_vdev_alloc_tcp_node(struct ol_txrx_vdev_t *vdev);
766*5113495bSYour Name 
767*5113495bSYour Name /**
768*5113495bSYour Name  * ol_tx_pdev_reset_driver_del_ack() - reset driver delayed ack enabled flag
769*5113495bSYour Name  * @ppdev: the data physical device
770*5113495bSYour Name  *
771*5113495bSYour Name  * Return: none
772*5113495bSYour Name  */
773*5113495bSYour Name void
774*5113495bSYour Name ol_tx_pdev_reset_driver_del_ack(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
775*5113495bSYour Name 
776*5113495bSYour Name /**
777*5113495bSYour Name  * ol_tx_vdev_set_driver_del_ack_enable() - set driver delayed ack enabled flag
778*5113495bSYour Name  * @soc_hdl: datapath soc handle
779*5113495bSYour Name  * @vdev_id: vdev id
780*5113495bSYour Name  * @rx_packets: number of rx packets
781*5113495bSYour Name  * @time_in_ms: time in ms
782*5113495bSYour Name  * @high_th: high threshold
783*5113495bSYour Name  * @low_th: low threshold
784*5113495bSYour Name  *
785*5113495bSYour Name  * Return: none
786*5113495bSYour Name  */
787*5113495bSYour Name void
788*5113495bSYour Name ol_tx_vdev_set_driver_del_ack_enable(struct cdp_soc_t *soc_hdl,
789*5113495bSYour Name 				     uint8_t vdev_id,
790*5113495bSYour Name 				     unsigned long rx_packets,
791*5113495bSYour Name 				     uint32_t time_in_ms,
792*5113495bSYour Name 				     uint32_t high_th,
793*5113495bSYour Name 				     uint32_t low_th);
794*5113495bSYour Name 
795*5113495bSYour Name /**
796*5113495bSYour Name  * ol_tx_hl_send_all_tcp_ack() - send all queued tcp ack packets
797*5113495bSYour Name  * @vdev: vdev handle
798*5113495bSYour Name  *
799*5113495bSYour Name  * Return: none
800*5113495bSYour Name  */
801*5113495bSYour Name void ol_tx_hl_send_all_tcp_ack(struct ol_txrx_vdev_t *vdev);
802*5113495bSYour Name 
803*5113495bSYour Name /**
804*5113495bSYour Name  * tcp_del_ack_tasklet() - tasklet function to send ack packets
805*5113495bSYour Name  * @data: vdev handle
806*5113495bSYour Name  *
807*5113495bSYour Name  * Return: none
808*5113495bSYour Name  */
809*5113495bSYour Name void tcp_del_ack_tasklet(void *data);
810*5113495bSYour Name 
811*5113495bSYour Name /**
812*5113495bSYour Name  * ol_tx_get_stream_id() - get stream_id from packet info
813*5113495bSYour Name  * @info: packet info
814*5113495bSYour Name  *
815*5113495bSYour Name  * Return: stream_id
816*5113495bSYour Name  */
817*5113495bSYour Name uint16_t ol_tx_get_stream_id(struct packet_info *info);
818*5113495bSYour Name 
819*5113495bSYour Name /**
820*5113495bSYour Name  * ol_tx_get_packet_info() - update packet info for passed msdu
821*5113495bSYour Name  * @msdu: packet
822*5113495bSYour Name  * @info: packet info
823*5113495bSYour Name  *
824*5113495bSYour Name  * Return: none
825*5113495bSYour Name  */
826*5113495bSYour Name void ol_tx_get_packet_info(qdf_nbuf_t msdu, struct packet_info *info);
827*5113495bSYour Name 
828*5113495bSYour Name /**
829*5113495bSYour Name  * ol_tx_hl_find_and_send_tcp_stream() - find and send tcp stream for passed
830*5113495bSYour Name  *                                       stream info
831*5113495bSYour Name  * @vdev: vdev handle
832*5113495bSYour Name  * @info: packet info
833*5113495bSYour Name  *
834*5113495bSYour Name  * Return: none
835*5113495bSYour Name  */
836*5113495bSYour Name void ol_tx_hl_find_and_send_tcp_stream(struct ol_txrx_vdev_t *vdev,
837*5113495bSYour Name 				       struct packet_info *info);
838*5113495bSYour Name 
839*5113495bSYour Name /**
840*5113495bSYour Name  * ol_tx_hl_find_and_replace_tcp_ack() - find and replace tcp ack packet for
841*5113495bSYour Name  *                                       passed packet info
842*5113495bSYour Name  * @vdev: vdev handle
843*5113495bSYour Name  * @msdu: packet
844*5113495bSYour Name  * @info: packet info
845*5113495bSYour Name  *
846*5113495bSYour Name  * Return: none
847*5113495bSYour Name  */
848*5113495bSYour Name void ol_tx_hl_find_and_replace_tcp_ack(struct ol_txrx_vdev_t *vdev,
849*5113495bSYour Name 				       qdf_nbuf_t msdu,
850*5113495bSYour Name 				       struct packet_info *info);
851*5113495bSYour Name 
852*5113495bSYour Name /**
853*5113495bSYour Name  * ol_tx_hl_vdev_tcp_del_ack_timer() - delayed ack timer function
854*5113495bSYour Name  * @timer: timer handle
855*5113495bSYour Name  *
856*5113495bSYour Name  * Return: enum
857*5113495bSYour Name  */
858*5113495bSYour Name enum qdf_hrtimer_restart_status
859*5113495bSYour Name ol_tx_hl_vdev_tcp_del_ack_timer(qdf_hrtimer_data_t *timer);
860*5113495bSYour Name 
861*5113495bSYour Name /**
862*5113495bSYour Name  * ol_tx_hl_del_ack_queue_flush_all() - drop all queued packets
863*5113495bSYour Name  * @vdev: vdev handle
864*5113495bSYour Name  *
865*5113495bSYour Name  * Return: none
866*5113495bSYour Name  */
867*5113495bSYour Name void ol_tx_hl_del_ack_queue_flush_all(struct ol_txrx_vdev_t *vdev);
868*5113495bSYour Name 
869*5113495bSYour Name #else
870*5113495bSYour Name 
871*5113495bSYour Name static inline
ol_txrx_vdev_init_tcp_del_ack(struct ol_txrx_vdev_t * vdev)872*5113495bSYour Name void ol_txrx_vdev_init_tcp_del_ack(struct ol_txrx_vdev_t *vdev)
873*5113495bSYour Name {
874*5113495bSYour Name }
875*5113495bSYour Name 
876*5113495bSYour Name static inline
ol_txrx_vdev_deinit_tcp_del_ack(struct ol_txrx_vdev_t * vdev)877*5113495bSYour Name void ol_txrx_vdev_deinit_tcp_del_ack(struct ol_txrx_vdev_t *vdev)
878*5113495bSYour Name {
879*5113495bSYour Name }
880*5113495bSYour Name 
881*5113495bSYour Name static inline
ol_tx_pdev_reset_driver_del_ack(struct cdp_soc_t * soc_hdl,uint8_t pdev_id)882*5113495bSYour Name void ol_tx_pdev_reset_driver_del_ack(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
883*5113495bSYour Name {
884*5113495bSYour Name }
885*5113495bSYour Name 
886*5113495bSYour Name static inline
ol_tx_vdev_set_driver_del_ack_enable(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,unsigned long rx_packets,uint32_t time_in_ms,uint32_t high_th,uint32_t low_th)887*5113495bSYour Name void ol_tx_vdev_set_driver_del_ack_enable(struct cdp_soc_t *soc_hdl,
888*5113495bSYour Name 					  uint8_t vdev_id,
889*5113495bSYour Name 					  unsigned long rx_packets,
890*5113495bSYour Name 					  uint32_t time_in_ms,
891*5113495bSYour Name 					  uint32_t high_th,
892*5113495bSYour Name 					  uint32_t low_th)
893*5113495bSYour Name {
894*5113495bSYour Name }
895*5113495bSYour Name 
896*5113495bSYour Name #endif
897*5113495bSYour Name 
898*5113495bSYour Name #ifdef WLAN_SUPPORT_TXRX_HL_BUNDLE
899*5113495bSYour Name void ol_tx_vdev_set_bundle_require(uint8_t vdev_id, unsigned long tx_bytes,
900*5113495bSYour Name 				   uint32_t time_in_ms, uint32_t high_th,
901*5113495bSYour Name 				   uint32_t low_th);
902*5113495bSYour Name 
903*5113495bSYour Name void ol_tx_pdev_reset_bundle_require(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
904*5113495bSYour Name 
905*5113495bSYour Name #else
906*5113495bSYour Name 
907*5113495bSYour Name static inline
ol_tx_vdev_set_bundle_require(uint8_t vdev_id,unsigned long tx_bytes,uint32_t time_in_ms,uint32_t high_th,uint32_t low_th)908*5113495bSYour Name void ol_tx_vdev_set_bundle_require(uint8_t vdev_id, unsigned long tx_bytes,
909*5113495bSYour Name 				   uint32_t time_in_ms, uint32_t high_th,
910*5113495bSYour Name 				   uint32_t low_th)
911*5113495bSYour Name {
912*5113495bSYour Name }
913*5113495bSYour Name 
914*5113495bSYour Name static inline
ol_tx_pdev_reset_bundle_require(struct cdp_soc_t * soc_hdl,uint8_t pdev_id)915*5113495bSYour Name void ol_tx_pdev_reset_bundle_require(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
916*5113495bSYour Name {
917*5113495bSYour Name }
918*5113495bSYour Name #endif
919*5113495bSYour Name 
920*5113495bSYour Name #endif /* _OL_TXRX__H_ */
921