xref: /wlan-driver/qcacld-3.0/core/dp/txrx/ol_txrx.c (revision 5113495b16420b49004c444715d2daae2066e7dc)
1*5113495bSYour Name /*
2*5113495bSYour Name  * Copyright (c) 2011-2021 The Linux Foundation. All rights reserved.
3*5113495bSYour Name  * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
4*5113495bSYour Name  *
5*5113495bSYour Name  * Permission to use, copy, modify, and/or distribute this software for
6*5113495bSYour Name  * any purpose with or without fee is hereby granted, provided that the
7*5113495bSYour Name  * above copyright notice and this permission notice appear in all
8*5113495bSYour Name  * copies.
9*5113495bSYour Name  *
10*5113495bSYour Name  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11*5113495bSYour Name  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12*5113495bSYour Name  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13*5113495bSYour Name  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14*5113495bSYour Name  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15*5113495bSYour Name  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16*5113495bSYour Name  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17*5113495bSYour Name  * PERFORMANCE OF THIS SOFTWARE.
18*5113495bSYour Name  */
19*5113495bSYour Name 
20*5113495bSYour Name /*=== includes ===*/
21*5113495bSYour Name /* header files for OS primitives */
22*5113495bSYour Name #include <osdep.h>              /* uint32_t, etc. */
23*5113495bSYour Name #include <qdf_mem.h>         /* qdf_mem_malloc,free */
24*5113495bSYour Name #include <qdf_types.h>          /* qdf_device_t, qdf_print */
25*5113495bSYour Name #include <qdf_lock.h>           /* qdf_spinlock */
26*5113495bSYour Name #include <qdf_atomic.h>         /* qdf_atomic_read */
27*5113495bSYour Name #include <qdf_debugfs.h>
28*5113495bSYour Name 
29*5113495bSYour Name /* header files for utilities */
30*5113495bSYour Name #include "queue.h"          /* TAILQ */
31*5113495bSYour Name 
32*5113495bSYour Name /* header files for configuration API */
33*5113495bSYour Name #include <ol_cfg.h>             /* ol_cfg_is_high_latency */
34*5113495bSYour Name #include <ol_if_athvar.h>
35*5113495bSYour Name 
36*5113495bSYour Name /* header files for HTT API */
37*5113495bSYour Name #include <ol_htt_api.h>
38*5113495bSYour Name #include <ol_htt_tx_api.h>
39*5113495bSYour Name 
40*5113495bSYour Name /* header files for our own APIs */
41*5113495bSYour Name #include <ol_txrx_api.h>
42*5113495bSYour Name #include <ol_txrx_dbg.h>
43*5113495bSYour Name #include <cdp_txrx_ocb.h>
44*5113495bSYour Name #include <ol_txrx_ctrl_api.h>
45*5113495bSYour Name #include <cdp_txrx_stats.h>
46*5113495bSYour Name #include <ol_txrx_osif_api.h>
47*5113495bSYour Name /* header files for our internal definitions */
48*5113495bSYour Name #include <ol_txrx_internal.h>   /* TXRX_ASSERT, etc. */
49*5113495bSYour Name #include <wdi_event.h>          /* WDI events */
50*5113495bSYour Name #include <ol_tx.h>              /* ol_tx_ll */
51*5113495bSYour Name #include <ol_rx.h>              /* ol_rx_deliver */
52*5113495bSYour Name #include <ol_txrx_peer_find.h>  /* ol_txrx_peer_find_attach, etc. */
53*5113495bSYour Name #include <ol_rx_pn.h>           /* ol_rx_pn_check, etc. */
54*5113495bSYour Name #include <ol_rx_fwd.h>          /* ol_rx_fwd_check, etc. */
55*5113495bSYour Name #include <ol_rx_reorder_timeout.h>      /* OL_RX_REORDER_TIMEOUT_INIT, etc. */
56*5113495bSYour Name #include <ol_rx_reorder.h>
57*5113495bSYour Name #include <ol_tx_send.h>         /* ol_tx_discard_target_frms */
58*5113495bSYour Name #include <ol_tx_desc.h>         /* ol_tx_desc_frame_free */
59*5113495bSYour Name #include <ol_tx_queue.h>
60*5113495bSYour Name #include <ol_tx_sched.h>           /* ol_tx_sched_attach, etc. */
61*5113495bSYour Name #include <ol_txrx.h>
62*5113495bSYour Name #include <ol_txrx_types.h>
63*5113495bSYour Name #include <ol_cfg.h>
64*5113495bSYour Name #include <cdp_txrx_flow_ctrl_legacy.h>
65*5113495bSYour Name #include <cdp_txrx_cmn_reg.h>
66*5113495bSYour Name #include <cdp_txrx_bus.h>
67*5113495bSYour Name #include <cdp_txrx_ipa.h>
68*5113495bSYour Name #include <cdp_txrx_pmf.h>
69*5113495bSYour Name #include "wma.h"
70*5113495bSYour Name #include "hif.h"
71*5113495bSYour Name #include "hif_main.h"
72*5113495bSYour Name #include <cdp_txrx_peer_ops.h>
73*5113495bSYour Name #ifndef REMOVE_PKT_LOG
74*5113495bSYour Name #include "pktlog_ac.h"
75*5113495bSYour Name #endif
76*5113495bSYour Name #include <wlan_policy_mgr_api.h>
77*5113495bSYour Name #include "epping_main.h"
78*5113495bSYour Name #include <a_types.h>
79*5113495bSYour Name #include <cdp_txrx_handle.h>
80*5113495bSYour Name #include <cdp_txrx_cmn_reg.h>
81*5113495bSYour Name #include "wlan_qct_sys.h"
82*5113495bSYour Name 
83*5113495bSYour Name #include <htt_internal.h>
84*5113495bSYour Name #include <ol_txrx_ipa.h>
85*5113495bSYour Name #include "wlan_roam_debug.h"
86*5113495bSYour Name #include "cfg_ucfg_api.h"
87*5113495bSYour Name #ifdef DP_SUPPORT_RECOVERY_NOTIFY
88*5113495bSYour Name #include <qdf_notifier.h>
89*5113495bSYour Name #include <qdf_hang_event_notifier.h>
90*5113495bSYour Name #endif
91*5113495bSYour Name 
92*5113495bSYour Name #define DPT_DEBUGFS_PERMS	(QDF_FILE_USR_READ |	\
93*5113495bSYour Name 				QDF_FILE_USR_WRITE |	\
94*5113495bSYour Name 				QDF_FILE_GRP_READ |	\
95*5113495bSYour Name 				QDF_FILE_OTH_READ)
96*5113495bSYour Name 
97*5113495bSYour Name #define DPT_DEBUGFS_NUMBER_BASE	10
98*5113495bSYour Name /**
99*5113495bSYour Name  * enum dpt_set_param_debugfs - dpt set params
100*5113495bSYour Name  * @DPT_SET_PARAM_PROTO_BITMAP : set proto bitmap
101*5113495bSYour Name  * @DPT_SET_PARAM_NR_RECORDS: set num of records
102*5113495bSYour Name  * @DPT_SET_PARAM_VERBOSITY: set verbosity
103*5113495bSYour Name  */
104*5113495bSYour Name enum dpt_set_param_debugfs {
105*5113495bSYour Name 	DPT_SET_PARAM_PROTO_BITMAP = 1,
106*5113495bSYour Name 	DPT_SET_PARAM_NR_RECORDS = 2,
107*5113495bSYour Name 	DPT_SET_PARAM_VERBOSITY = 3,
108*5113495bSYour Name 	DPT_SET_PARAM_NUM_RECORDS_TO_DUMP = 4,
109*5113495bSYour Name 	DPT_SET_PARAM_MAX,
110*5113495bSYour Name };
111*5113495bSYour Name 
112*5113495bSYour Name static void ol_vdev_rx_set_intrabss_fwd(struct cdp_soc_t *soc_hdl,
113*5113495bSYour Name 					uint8_t vdev_id, bool val);
114*5113495bSYour Name uint32_t ol_txrx_get_tx_pending(struct cdp_pdev *pdev_handle);
115*5113495bSYour Name extern void
116*5113495bSYour Name ol_txrx_set_wmm_param(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
117*5113495bSYour Name 		      struct ol_tx_wmm_param_t wmm_param);
118*5113495bSYour Name 
119*5113495bSYour Name /* thresh for peer's cached buf queue beyond which the elements are dropped */
120*5113495bSYour Name #define OL_TXRX_CACHED_BUFQ_THRESH 128
121*5113495bSYour Name 
122*5113495bSYour Name #ifdef DP_SUPPORT_RECOVERY_NOTIFY
123*5113495bSYour Name static
ol_peer_recovery_notifier_cb(struct notifier_block * block,unsigned long state,void * data)124*5113495bSYour Name int ol_peer_recovery_notifier_cb(struct notifier_block *block,
125*5113495bSYour Name 				 unsigned long state, void *data)
126*5113495bSYour Name {
127*5113495bSYour Name 	struct qdf_notifer_data *notif_data = data;
128*5113495bSYour Name 	qdf_notif_block *notif_block;
129*5113495bSYour Name 	struct ol_txrx_peer_t *peer;
130*5113495bSYour Name 	struct peer_hang_data hang_data = {0};
131*5113495bSYour Name 	enum peer_debug_id_type dbg_id;
132*5113495bSYour Name 
133*5113495bSYour Name 	if (!data || !block)
134*5113495bSYour Name 		return -EINVAL;
135*5113495bSYour Name 
136*5113495bSYour Name 	notif_block = qdf_container_of(block, qdf_notif_block, notif_block);
137*5113495bSYour Name 
138*5113495bSYour Name 	peer = notif_block->priv_data;
139*5113495bSYour Name 	if (!peer)
140*5113495bSYour Name 		return -EINVAL;
141*5113495bSYour Name 
142*5113495bSYour Name 	if (notif_data->offset + sizeof(struct peer_hang_data) >
143*5113495bSYour Name 			QDF_WLAN_HANG_FW_OFFSET)
144*5113495bSYour Name 		return NOTIFY_STOP_MASK;
145*5113495bSYour Name 
146*5113495bSYour Name 	QDF_HANG_EVT_SET_HDR(&hang_data.tlv_header,
147*5113495bSYour Name 			     HANG_EVT_TAG_DP_PEER_INFO,
148*5113495bSYour Name 			     QDF_HANG_GET_STRUCT_TLVLEN(struct peer_hang_data));
149*5113495bSYour Name 
150*5113495bSYour Name 	qdf_mem_copy(&hang_data.peer_mac_addr, &peer->mac_addr.raw,
151*5113495bSYour Name 		     QDF_MAC_ADDR_SIZE);
152*5113495bSYour Name 
153*5113495bSYour Name 	for (dbg_id = 0; dbg_id < PEER_DEBUG_ID_MAX; dbg_id++)
154*5113495bSYour Name 		if (qdf_atomic_read(&peer->access_list[dbg_id]))
155*5113495bSYour Name 			hang_data.peer_timeout_bitmask |= (1 << dbg_id);
156*5113495bSYour Name 
157*5113495bSYour Name 	qdf_mem_copy(notif_data->hang_data + notif_data->offset,
158*5113495bSYour Name 		     &hang_data, sizeof(struct peer_hang_data));
159*5113495bSYour Name 	notif_data->offset += sizeof(struct peer_hang_data);
160*5113495bSYour Name 
161*5113495bSYour Name 	return 0;
162*5113495bSYour Name }
163*5113495bSYour Name 
164*5113495bSYour Name static qdf_notif_block ol_peer_recovery_notifier = {
165*5113495bSYour Name 	.notif_block.notifier_call = ol_peer_recovery_notifier_cb,
166*5113495bSYour Name };
167*5113495bSYour Name 
168*5113495bSYour Name static
ol_register_peer_recovery_notifier(struct ol_txrx_peer_t * peer)169*5113495bSYour Name QDF_STATUS ol_register_peer_recovery_notifier(struct ol_txrx_peer_t *peer)
170*5113495bSYour Name {
171*5113495bSYour Name 	ol_peer_recovery_notifier.priv_data = peer;
172*5113495bSYour Name 
173*5113495bSYour Name 	return qdf_hang_event_register_notifier(&ol_peer_recovery_notifier);
174*5113495bSYour Name }
175*5113495bSYour Name 
176*5113495bSYour Name static
ol_unregister_peer_recovery_notifier(void)177*5113495bSYour Name QDF_STATUS ol_unregister_peer_recovery_notifier(void)
178*5113495bSYour Name {
179*5113495bSYour Name 	return qdf_hang_event_unregister_notifier(&ol_peer_recovery_notifier);
180*5113495bSYour Name }
181*5113495bSYour Name #else
182*5113495bSYour Name static inline
ol_register_peer_recovery_notifier(struct ol_txrx_peer_t * peer)183*5113495bSYour Name QDF_STATUS ol_register_peer_recovery_notifier(struct ol_txrx_peer_t *peer)
184*5113495bSYour Name {
185*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
186*5113495bSYour Name }
187*5113495bSYour Name 
188*5113495bSYour Name static
ol_unregister_peer_recovery_notifier(void)189*5113495bSYour Name QDF_STATUS ol_unregister_peer_recovery_notifier(void)
190*5113495bSYour Name {
191*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
192*5113495bSYour Name }
193*5113495bSYour Name #endif
194*5113495bSYour Name 
195*5113495bSYour Name /**
196*5113495bSYour Name  * ol_tx_mark_first_wakeup_packet() - set flag to indicate that
197*5113495bSYour Name  *    fw is compatible for marking first packet after wow wakeup
198*5113495bSYour Name  * @soc_hdl: Datapath soc handle
199*5113495bSYour Name  * @pdev_id: id of data path pdev handle
200*5113495bSYour Name  * @value: 1 for enabled/ 0 for disabled
201*5113495bSYour Name  *
202*5113495bSYour Name  * Return: None
203*5113495bSYour Name  */
ol_tx_mark_first_wakeup_packet(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,uint8_t value)204*5113495bSYour Name static void ol_tx_mark_first_wakeup_packet(struct cdp_soc_t *soc_hdl,
205*5113495bSYour Name 					   uint8_t pdev_id, uint8_t value)
206*5113495bSYour Name {
207*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
208*5113495bSYour Name 	ol_txrx_pdev_handle pdev = ol_txrx_get_pdev_from_pdev_id(soc, pdev_id);
209*5113495bSYour Name 
210*5113495bSYour Name 	if (!pdev) {
211*5113495bSYour Name 		ol_txrx_err("pdev is NULL");
212*5113495bSYour Name 		return;
213*5113495bSYour Name 	}
214*5113495bSYour Name 
215*5113495bSYour Name 	htt_mark_first_wakeup_packet(pdev->htt_pdev, value);
216*5113495bSYour Name }
217*5113495bSYour Name 
218*5113495bSYour Name /**
219*5113495bSYour Name  * ol_tx_set_is_mgmt_over_wmi_enabled() - set flag to indicate that mgmt over
220*5113495bSYour Name  *                                        wmi is enabled or not.
221*5113495bSYour Name  * @value: 1 for enabled/ 0 for disable
222*5113495bSYour Name  *
223*5113495bSYour Name  * Return: None
224*5113495bSYour Name  */
ol_tx_set_is_mgmt_over_wmi_enabled(uint8_t value)225*5113495bSYour Name void ol_tx_set_is_mgmt_over_wmi_enabled(uint8_t value)
226*5113495bSYour Name {
227*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
228*5113495bSYour Name 	ol_txrx_pdev_handle pdev;
229*5113495bSYour Name 
230*5113495bSYour Name 	if (qdf_unlikely(!soc))
231*5113495bSYour Name 		return;
232*5113495bSYour Name 
233*5113495bSYour Name 	pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
234*5113495bSYour Name 	if (!pdev) {
235*5113495bSYour Name 		ol_txrx_err("pdev is NULL");
236*5113495bSYour Name 		return;
237*5113495bSYour Name 	}
238*5113495bSYour Name 
239*5113495bSYour Name 	pdev->is_mgmt_over_wmi_enabled = value;
240*5113495bSYour Name }
241*5113495bSYour Name 
242*5113495bSYour Name /**
243*5113495bSYour Name  * ol_tx_get_is_mgmt_over_wmi_enabled() - get value of is_mgmt_over_wmi_enabled
244*5113495bSYour Name  *
245*5113495bSYour Name  * Return: is_mgmt_over_wmi_enabled
246*5113495bSYour Name  */
ol_tx_get_is_mgmt_over_wmi_enabled(void)247*5113495bSYour Name uint8_t ol_tx_get_is_mgmt_over_wmi_enabled(void)
248*5113495bSYour Name {
249*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
250*5113495bSYour Name 	ol_txrx_pdev_handle pdev;
251*5113495bSYour Name 
252*5113495bSYour Name 	if (qdf_unlikely(!soc))
253*5113495bSYour Name 		return 0;
254*5113495bSYour Name 
255*5113495bSYour Name 	pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
256*5113495bSYour Name 	if (!pdev) {
257*5113495bSYour Name 		ol_txrx_err("pdev is NULL");
258*5113495bSYour Name 		return 0;
259*5113495bSYour Name 	}
260*5113495bSYour Name 
261*5113495bSYour Name 	return pdev->is_mgmt_over_wmi_enabled;
262*5113495bSYour Name }
263*5113495bSYour Name 
264*5113495bSYour Name 
265*5113495bSYour Name #ifdef QCA_SUPPORT_TXRX_LOCAL_PEER_ID
266*5113495bSYour Name static void *
ol_txrx_find_peer_by_addr_and_vdev(struct cdp_pdev * ppdev,struct cdp_vdev * pvdev,uint8_t * peer_addr)267*5113495bSYour Name ol_txrx_find_peer_by_addr_and_vdev(struct cdp_pdev *ppdev,
268*5113495bSYour Name 	struct cdp_vdev *pvdev, uint8_t *peer_addr)
269*5113495bSYour Name {
270*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
271*5113495bSYour Name 	struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
272*5113495bSYour Name 	struct ol_txrx_peer_t *peer;
273*5113495bSYour Name 
274*5113495bSYour Name 	peer = ol_txrx_peer_vdev_find_hash(pdev, vdev, peer_addr, 0, 1);
275*5113495bSYour Name 	if (!peer)
276*5113495bSYour Name 		return NULL;
277*5113495bSYour Name 	ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
278*5113495bSYour Name 	return peer;
279*5113495bSYour Name }
280*5113495bSYour Name 
281*5113495bSYour Name /**
282*5113495bSYour Name  * ol_txrx_get_vdevid() - Get virtual interface id which peer registered
283*5113495bSYour Name  * @soc_hdl - data path soc handle
284*5113495bSYour Name  * @peer_mac - peer mac address
285*5113495bSYour Name  * @vdev_id - virtual interface id which peer registered
286*5113495bSYour Name  *
287*5113495bSYour Name  * Get virtual interface id which peer registered
288*5113495bSYour Name  *
289*5113495bSYour Name  * Return: QDF_STATUS_SUCCESS registration success
290*5113495bSYour Name  *         QDF_STATUS_E_NOSUPPORT not support this feature
291*5113495bSYour Name  */
ol_txrx_get_vdevid(struct cdp_soc_t * soc_hdl,uint8_t * peer_mac,uint8_t * vdev_id)292*5113495bSYour Name static QDF_STATUS ol_txrx_get_vdevid(struct cdp_soc_t *soc_hdl,
293*5113495bSYour Name 				     uint8_t *peer_mac, uint8_t *vdev_id)
294*5113495bSYour Name {
295*5113495bSYour Name 	uint8_t pdev_id = OL_TXRX_PDEV_ID;
296*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
297*5113495bSYour Name 	ol_txrx_pdev_handle pdev = ol_txrx_get_pdev_from_pdev_id(soc, pdev_id);
298*5113495bSYour Name 	struct ol_txrx_peer_t *peer =
299*5113495bSYour Name 		ol_txrx_peer_find_hash_find_get_ref(pdev, peer_mac, 0, 1,
300*5113495bSYour Name 						    PEER_DEBUG_ID_OL_INTERNAL);
301*5113495bSYour Name 
302*5113495bSYour Name 	if (!peer) {
303*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
304*5113495bSYour Name 			  "peer argument is null!!");
305*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
306*5113495bSYour Name 	}
307*5113495bSYour Name 
308*5113495bSYour Name 	*vdev_id = peer->vdev->vdev_id;
309*5113495bSYour Name 	ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
310*5113495bSYour Name 
311*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
312*5113495bSYour Name }
313*5113495bSYour Name 
314*5113495bSYour Name ol_txrx_vdev_handle
ol_txrx_get_vdev_by_peer_addr(struct cdp_pdev * ppdev,struct qdf_mac_addr peer_addr)315*5113495bSYour Name ol_txrx_get_vdev_by_peer_addr(struct cdp_pdev *ppdev,
316*5113495bSYour Name 			      struct qdf_mac_addr peer_addr)
317*5113495bSYour Name {
318*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev = cdp_pdev_to_ol_txrx_pdev_t(ppdev);
319*5113495bSYour Name 	struct ol_txrx_peer_t *peer = NULL;
320*5113495bSYour Name 	ol_txrx_vdev_handle vdev;
321*5113495bSYour Name 
322*5113495bSYour Name 	if (!pdev) {
323*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
324*5113495bSYour Name 			  "PDEV not found for peer_addr: " QDF_MAC_ADDR_FMT,
325*5113495bSYour Name 			  QDF_MAC_ADDR_REF(peer_addr.bytes));
326*5113495bSYour Name 		return NULL;
327*5113495bSYour Name 	}
328*5113495bSYour Name 
329*5113495bSYour Name 	peer = ol_txrx_peer_get_ref_by_addr(pdev, peer_addr.bytes,
330*5113495bSYour Name 					    PEER_DEBUG_ID_OL_INTERNAL);
331*5113495bSYour Name 
332*5113495bSYour Name 	if (!peer) {
333*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
334*5113495bSYour Name 			  "Peer not found for peer_addr:" QDF_MAC_ADDR_FMT,
335*5113495bSYour Name 			  QDF_MAC_ADDR_REF(peer_addr.bytes));
336*5113495bSYour Name 		return NULL;
337*5113495bSYour Name 	}
338*5113495bSYour Name 
339*5113495bSYour Name 	vdev = peer->vdev;
340*5113495bSYour Name 	ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
341*5113495bSYour Name 
342*5113495bSYour Name 	return vdev;
343*5113495bSYour Name }
344*5113495bSYour Name 
345*5113495bSYour Name /**
346*5113495bSYour Name  * ol_txrx_wrapper_get_vdev_by_peer_addr() - Get vdev handle by peer mac address
347*5113495bSYour Name  * @ppdev - data path device instance
348*5113495bSYour Name  * @peer_addr - peer mac address
349*5113495bSYour Name  *
350*5113495bSYour Name  * Get virtual interface handle by local peer mac address
351*5113495bSYour Name  *
352*5113495bSYour Name  * Return: Virtual interface instance handle
353*5113495bSYour Name  *         NULL in case cannot find
354*5113495bSYour Name  */
355*5113495bSYour Name static struct cdp_vdev *
ol_txrx_wrapper_get_vdev_by_peer_addr(struct cdp_pdev * ppdev,struct qdf_mac_addr peer_addr)356*5113495bSYour Name ol_txrx_wrapper_get_vdev_by_peer_addr(struct cdp_pdev *ppdev,
357*5113495bSYour Name 				      struct qdf_mac_addr peer_addr)
358*5113495bSYour Name {
359*5113495bSYour Name 	return (struct cdp_vdev *)ol_txrx_get_vdev_by_peer_addr(ppdev,
360*5113495bSYour Name 								peer_addr);
361*5113495bSYour Name }
362*5113495bSYour Name 
363*5113495bSYour Name /*
364*5113495bSYour Name  * ol_txrx_find_peer_exist - find peer if already exists
365*5113495bSYour Name  * @soc_hdl: datapath soc handle
366*5113495bSYour Name  * @pdev_id: physical device instance id
367*5113495bSYour Name  * @peer_mac_addr: peer mac address
368*5113495bSYour Name  *
369*5113495bSYour Name  * Return: true or false
370*5113495bSYour Name  */
ol_txrx_find_peer_exist(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,uint8_t * peer_addr)371*5113495bSYour Name static bool ol_txrx_find_peer_exist(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
372*5113495bSYour Name 				    uint8_t *peer_addr)
373*5113495bSYour Name {
374*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
375*5113495bSYour Name 	ol_txrx_pdev_handle pdev = ol_txrx_get_pdev_from_pdev_id(soc, pdev_id);
376*5113495bSYour Name 
377*5113495bSYour Name 	if (!pdev)
378*5113495bSYour Name 		return false;
379*5113495bSYour Name 
380*5113495bSYour Name 	return !!ol_txrx_find_peer_by_addr(ol_txrx_pdev_t_to_cdp_pdev(pdev),
381*5113495bSYour Name 					   peer_addr);
382*5113495bSYour Name }
383*5113495bSYour Name 
384*5113495bSYour Name /*
385*5113495bSYour Name  * ol_txrx_find_peer_exist_on_vdev - find if duplicate peer exists
386*5113495bSYour Name  * on the given vdev
387*5113495bSYour Name  * @soc_hdl: datapath soc handle
388*5113495bSYour Name  * @vdev_id: vdev instance id
389*5113495bSYour Name  * @peer_mac_addr: peer mac address
390*5113495bSYour Name  *
391*5113495bSYour Name  * Return: true or false
392*5113495bSYour Name  */
ol_txrx_find_peer_exist_on_vdev(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,uint8_t * peer_addr)393*5113495bSYour Name static bool ol_txrx_find_peer_exist_on_vdev(struct cdp_soc_t *soc_hdl,
394*5113495bSYour Name 					    uint8_t vdev_id,
395*5113495bSYour Name 					    uint8_t *peer_addr)
396*5113495bSYour Name {
397*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
398*5113495bSYour Name 	ol_txrx_vdev_handle vdev = ol_txrx_get_vdev_from_soc_vdev_id(soc,
399*5113495bSYour Name 								     vdev_id);
400*5113495bSYour Name 
401*5113495bSYour Name 	if (!vdev)
402*5113495bSYour Name 		return false;
403*5113495bSYour Name 
404*5113495bSYour Name 	return !!ol_txrx_find_peer_by_addr_and_vdev(
405*5113495bSYour Name 					ol_txrx_pdev_t_to_cdp_pdev(vdev->pdev),
406*5113495bSYour Name 					ol_txrx_vdev_t_to_cdp_vdev(vdev),
407*5113495bSYour Name 					peer_addr);
408*5113495bSYour Name }
409*5113495bSYour Name 
410*5113495bSYour Name /*
411*5113495bSYour Name  * ol_txrx_find_peer_exist_on_other_vdev - find if duplicate peer exists
412*5113495bSYour Name  * on other than the given vdev
413*5113495bSYour Name  * @soc_hdl: datapath soc handle
414*5113495bSYour Name  * @vdev_id: vdev instance id
415*5113495bSYour Name  * @peer_mac_addr: peer mac address
416*5113495bSYour Name  * @max_bssid: max number of bssids
417*5113495bSYour Name  *
418*5113495bSYour Name  * Return: true or false
419*5113495bSYour Name  */
ol_txrx_find_peer_exist_on_other_vdev(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,uint8_t * peer_addr,uint16_t max_bssid)420*5113495bSYour Name static bool ol_txrx_find_peer_exist_on_other_vdev(struct cdp_soc_t *soc_hdl,
421*5113495bSYour Name 						  uint8_t vdev_id,
422*5113495bSYour Name 						  uint8_t *peer_addr,
423*5113495bSYour Name 						  uint16_t max_bssid)
424*5113495bSYour Name {
425*5113495bSYour Name 	int i;
426*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
427*5113495bSYour Name 	struct ol_txrx_vdev_t *vdev;
428*5113495bSYour Name 
429*5113495bSYour Name 	for (i = 0; i < max_bssid; i++) {
430*5113495bSYour Name 		vdev = ol_txrx_get_vdev_from_soc_vdev_id(soc, i);
431*5113495bSYour Name 		/* Need to check vdevs other than the vdev_id */
432*5113495bSYour Name 		if (vdev_id == i || !vdev)
433*5113495bSYour Name 			continue;
434*5113495bSYour Name 		if (ol_txrx_find_peer_by_addr_and_vdev(
435*5113495bSYour Name 					ol_txrx_pdev_t_to_cdp_pdev(vdev->pdev),
436*5113495bSYour Name 					ol_txrx_vdev_t_to_cdp_vdev(vdev),
437*5113495bSYour Name 					peer_addr)) {
438*5113495bSYour Name 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
439*5113495bSYour Name 				  "%s: Duplicate peer "QDF_MAC_ADDR_FMT" already exist on vdev %d",
440*5113495bSYour Name 				  __func__, QDF_MAC_ADDR_REF(peer_addr), i);
441*5113495bSYour Name 			return true;
442*5113495bSYour Name 		}
443*5113495bSYour Name 	}
444*5113495bSYour Name 
445*5113495bSYour Name 	return false;
446*5113495bSYour Name }
447*5113495bSYour Name 
448*5113495bSYour Name /**
449*5113495bSYour Name  * ol_txrx_find_peer_by_addr() - find peer via peer mac addr and peer_id
450*5113495bSYour Name  * @ppdev: pointer of type cdp_pdev
451*5113495bSYour Name  * @peer_addr: peer mac addr
452*5113495bSYour Name  *
453*5113495bSYour Name  * This function finds a peer with given mac address and returns its peer_id.
454*5113495bSYour Name  * Note that this function does not increment the peer->ref_cnt.
455*5113495bSYour Name  * This means that the peer may be deleted in some other parallel context after
456*5113495bSYour Name  * its been found.
457*5113495bSYour Name  *
458*5113495bSYour Name  * Return: peer handle if peer is found, NULL if peer is not found.
459*5113495bSYour Name  */
ol_txrx_find_peer_by_addr(struct cdp_pdev * ppdev,uint8_t * peer_addr)460*5113495bSYour Name void *ol_txrx_find_peer_by_addr(struct cdp_pdev *ppdev,
461*5113495bSYour Name 				uint8_t *peer_addr)
462*5113495bSYour Name {
463*5113495bSYour Name 	struct ol_txrx_peer_t *peer;
464*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
465*5113495bSYour Name 
466*5113495bSYour Name 	peer = ol_txrx_peer_find_hash_find_get_ref(pdev, peer_addr, 0, 1,
467*5113495bSYour Name 						   PEER_DEBUG_ID_OL_INTERNAL);
468*5113495bSYour Name 	if (!peer)
469*5113495bSYour Name 		return NULL;
470*5113495bSYour Name 	ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
471*5113495bSYour Name 	return peer;
472*5113495bSYour Name }
473*5113495bSYour Name 
474*5113495bSYour Name /**
475*5113495bSYour Name  * ol_txrx_peer_get_ref_by_addr() - get peer ref via peer mac addr and peer_id
476*5113495bSYour Name  * @pdev: pointer of type ol_txrx_pdev_handle
477*5113495bSYour Name  * @peer_addr: peer mac addr
478*5113495bSYour Name  *
479*5113495bSYour Name  * This function finds the peer with given mac address and returns its peer_id.
480*5113495bSYour Name  * Note that this function increments the peer->ref_cnt.
481*5113495bSYour Name  * This makes sure that peer will be valid. This also means the caller needs to
482*5113495bSYour Name  * call the corresponding API - ol_txrx_peer_release_ref to delete the peer
483*5113495bSYour Name  * reference.
484*5113495bSYour Name  * Sample usage:
485*5113495bSYour Name  *    {
486*5113495bSYour Name  *      //the API call below increments the peer->ref_cnt
487*5113495bSYour Name  *      peer = ol_txrx_peer_get_ref_by_addr(pdev, peer_addr, peer_id, dbg_id);
488*5113495bSYour Name  *
489*5113495bSYour Name  *      // Once peer usage is done
490*5113495bSYour Name  *
491*5113495bSYour Name  *      //the API call below decrements the peer->ref_cnt
492*5113495bSYour Name  *       ol_txrx_peer_release_ref(peer, dbg_id);
493*5113495bSYour Name  *    }
494*5113495bSYour Name  *
495*5113495bSYour Name  * Return: peer handle if the peer is found, NULL if peer is not found.
496*5113495bSYour Name  */
ol_txrx_peer_get_ref_by_addr(ol_txrx_pdev_handle pdev,u8 * peer_addr,enum peer_debug_id_type dbg_id)497*5113495bSYour Name ol_txrx_peer_handle ol_txrx_peer_get_ref_by_addr(ol_txrx_pdev_handle pdev,
498*5113495bSYour Name 						 u8 *peer_addr,
499*5113495bSYour Name 						 enum peer_debug_id_type dbg_id)
500*5113495bSYour Name {
501*5113495bSYour Name 	struct ol_txrx_peer_t *peer;
502*5113495bSYour Name 
503*5113495bSYour Name 	peer = ol_txrx_peer_find_hash_find_get_ref(pdev, peer_addr, 0, 1,
504*5113495bSYour Name 						   dbg_id);
505*5113495bSYour Name 	if (!peer)
506*5113495bSYour Name 		return NULL;
507*5113495bSYour Name 
508*5113495bSYour Name 	return peer;
509*5113495bSYour Name }
510*5113495bSYour Name 
511*5113495bSYour Name /**
512*5113495bSYour Name  * @brief Find a txrx peer handle from a peer's local ID
513*5113495bSYour Name  * @param pdev - the data physical device object
514*5113495bSYour Name  * @param local_peer_id - the ID txrx assigned locally to the peer in question
515*5113495bSYour Name  * @dbg_id - debug_id to track caller
516*5113495bSYour Name  * @return handle to the txrx peer object
517*5113495bSYour Name  * @details
518*5113495bSYour Name  *  The control SW typically uses the txrx peer handle to refer to the peer.
519*5113495bSYour Name  *  In unusual circumstances, if it is infeasible for the control SW maintain
520*5113495bSYour Name  *  the txrx peer handle but it can maintain a small integer local peer ID,
521*5113495bSYour Name  *  this function allows the peer handled to be retrieved, based on the local
522*5113495bSYour Name  *  peer ID.
523*5113495bSYour Name  *
524*5113495bSYour Name  * Note that this function increments the peer->ref_cnt.
525*5113495bSYour Name  * This makes sure that peer will be valid. This also means the caller needs to
526*5113495bSYour Name  * call the corresponding API -
527*5113495bSYour Name  *          ol_txrx_peer_release_ref
528*5113495bSYour Name  *
529*5113495bSYour Name  * reference.
530*5113495bSYour Name  * Sample usage:
531*5113495bSYour Name  *    {
532*5113495bSYour Name  *      //the API call below increments the peer->ref_cnt
533*5113495bSYour Name  *      peer = ol_txrx_peer_get_ref_by_local_id(pdev,local_peer_id, dbg_id);
534*5113495bSYour Name  *
535*5113495bSYour Name  *      // Once peer usage is done
536*5113495bSYour Name  *
537*5113495bSYour Name  *      //the API call below decrements the peer->ref_cnt
538*5113495bSYour Name  *      ol_txrx_peer_release_ref(peer, dbg_id);
539*5113495bSYour Name  *    }
540*5113495bSYour Name  *
541*5113495bSYour Name  * Return: peer handle if the peer is found, NULL if peer is not found.
542*5113495bSYour Name  */
543*5113495bSYour Name ol_txrx_peer_handle
ol_txrx_peer_get_ref_by_local_id(struct cdp_pdev * ppdev,uint8_t local_peer_id,enum peer_debug_id_type dbg_id)544*5113495bSYour Name ol_txrx_peer_get_ref_by_local_id(struct cdp_pdev *ppdev,
545*5113495bSYour Name 			      uint8_t local_peer_id,
546*5113495bSYour Name 			      enum peer_debug_id_type dbg_id)
547*5113495bSYour Name {
548*5113495bSYour Name 	struct ol_txrx_peer_t *peer = NULL;
549*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
550*5113495bSYour Name 
551*5113495bSYour Name 	if ((local_peer_id == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
552*5113495bSYour Name 	    (local_peer_id >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
553*5113495bSYour Name 		return NULL;
554*5113495bSYour Name 	}
555*5113495bSYour Name 
556*5113495bSYour Name 	qdf_spin_lock_bh(&pdev->peer_ref_mutex);
557*5113495bSYour Name 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
558*5113495bSYour Name 	peer = pdev->local_peer_ids.map[local_peer_id];
559*5113495bSYour Name 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
560*5113495bSYour Name 	if (peer && peer->valid)
561*5113495bSYour Name 		ol_txrx_peer_get_ref(peer, dbg_id);
562*5113495bSYour Name 	else
563*5113495bSYour Name 		peer = NULL;
564*5113495bSYour Name 	qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
565*5113495bSYour Name 
566*5113495bSYour Name 	return peer;
567*5113495bSYour Name }
568*5113495bSYour Name 
ol_txrx_local_peer_id_pool_init(struct ol_txrx_pdev_t * pdev)569*5113495bSYour Name static void ol_txrx_local_peer_id_pool_init(struct ol_txrx_pdev_t *pdev)
570*5113495bSYour Name {
571*5113495bSYour Name 	int i;
572*5113495bSYour Name 
573*5113495bSYour Name 	/* point the freelist to the first ID */
574*5113495bSYour Name 	pdev->local_peer_ids.freelist = 0;
575*5113495bSYour Name 
576*5113495bSYour Name 	/* link each ID to the next one */
577*5113495bSYour Name 	for (i = 0; i < OL_TXRX_NUM_LOCAL_PEER_IDS; i++) {
578*5113495bSYour Name 		pdev->local_peer_ids.pool[i] = i + 1;
579*5113495bSYour Name 		pdev->local_peer_ids.map[i] = NULL;
580*5113495bSYour Name 	}
581*5113495bSYour Name 
582*5113495bSYour Name 	/* link the last ID to itself, to mark the end of the list */
583*5113495bSYour Name 	i = OL_TXRX_NUM_LOCAL_PEER_IDS;
584*5113495bSYour Name 	pdev->local_peer_ids.pool[i] = i;
585*5113495bSYour Name 
586*5113495bSYour Name 	qdf_spinlock_create(&pdev->local_peer_ids.lock);
587*5113495bSYour Name }
588*5113495bSYour Name 
589*5113495bSYour Name static void
ol_txrx_local_peer_id_alloc(struct ol_txrx_pdev_t * pdev,struct ol_txrx_peer_t * peer)590*5113495bSYour Name ol_txrx_local_peer_id_alloc(struct ol_txrx_pdev_t *pdev,
591*5113495bSYour Name 			    struct ol_txrx_peer_t *peer)
592*5113495bSYour Name {
593*5113495bSYour Name 	int i;
594*5113495bSYour Name 
595*5113495bSYour Name 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
596*5113495bSYour Name 	i = pdev->local_peer_ids.freelist;
597*5113495bSYour Name 	if (pdev->local_peer_ids.pool[i] == i) {
598*5113495bSYour Name 		/* the list is empty, except for the list-end marker */
599*5113495bSYour Name 		peer->local_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
600*5113495bSYour Name 	} else {
601*5113495bSYour Name 		/* take the head ID and advance the freelist */
602*5113495bSYour Name 		peer->local_id = i;
603*5113495bSYour Name 		pdev->local_peer_ids.freelist = pdev->local_peer_ids.pool[i];
604*5113495bSYour Name 		pdev->local_peer_ids.map[i] = peer;
605*5113495bSYour Name 	}
606*5113495bSYour Name 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
607*5113495bSYour Name }
608*5113495bSYour Name 
609*5113495bSYour Name static void
ol_txrx_local_peer_id_free(struct ol_txrx_pdev_t * pdev,struct ol_txrx_peer_t * peer)610*5113495bSYour Name ol_txrx_local_peer_id_free(struct ol_txrx_pdev_t *pdev,
611*5113495bSYour Name 			   struct ol_txrx_peer_t *peer)
612*5113495bSYour Name {
613*5113495bSYour Name 	int i = peer->local_id;
614*5113495bSYour Name 
615*5113495bSYour Name 	if ((i == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
616*5113495bSYour Name 	    (i >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
617*5113495bSYour Name 		return;
618*5113495bSYour Name 	}
619*5113495bSYour Name 	/* put this ID on the head of the freelist */
620*5113495bSYour Name 	qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
621*5113495bSYour Name 	pdev->local_peer_ids.pool[i] = pdev->local_peer_ids.freelist;
622*5113495bSYour Name 	pdev->local_peer_ids.freelist = i;
623*5113495bSYour Name 	pdev->local_peer_ids.map[i] = NULL;
624*5113495bSYour Name 	qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
625*5113495bSYour Name }
626*5113495bSYour Name 
ol_txrx_local_peer_id_cleanup(struct ol_txrx_pdev_t * pdev)627*5113495bSYour Name static void ol_txrx_local_peer_id_cleanup(struct ol_txrx_pdev_t *pdev)
628*5113495bSYour Name {
629*5113495bSYour Name 	qdf_spinlock_destroy(&pdev->local_peer_ids.lock);
630*5113495bSYour Name }
631*5113495bSYour Name 
632*5113495bSYour Name #else
633*5113495bSYour Name #define ol_txrx_local_peer_id_pool_init(pdev)   /* no-op */
634*5113495bSYour Name #define ol_txrx_local_peer_id_alloc(pdev, peer) /* no-op */
635*5113495bSYour Name #define ol_txrx_local_peer_id_free(pdev, peer)  /* no-op */
636*5113495bSYour Name #define ol_txrx_local_peer_id_cleanup(pdev)     /* no-op */
637*5113495bSYour Name #endif
638*5113495bSYour Name 
639*5113495bSYour Name #if defined(CONFIG_DP_TRACE) && defined(WLAN_DEBUGFS)
640*5113495bSYour Name /**
641*5113495bSYour Name  * ol_txrx_read_dpt_buff_debugfs() - read dp trace buffer
642*5113495bSYour Name  * @file: file to read
643*5113495bSYour Name  * @arg: pdev object
644*5113495bSYour Name  *
645*5113495bSYour Name  * Return: QDF_STATUS
646*5113495bSYour Name  */
ol_txrx_read_dpt_buff_debugfs(qdf_debugfs_file_t file,void * arg)647*5113495bSYour Name static QDF_STATUS ol_txrx_read_dpt_buff_debugfs(qdf_debugfs_file_t file,
648*5113495bSYour Name 						void *arg)
649*5113495bSYour Name {
650*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)arg;
651*5113495bSYour Name 	uint32_t i = 0;
652*5113495bSYour Name 	QDF_STATUS status = QDF_STATUS_SUCCESS;
653*5113495bSYour Name 
654*5113495bSYour Name 	if (pdev->state == QDF_DPT_DEBUGFS_STATE_SHOW_STATE_INVALID)
655*5113495bSYour Name 		return QDF_STATUS_E_INVAL;
656*5113495bSYour Name 	else if (pdev->state == QDF_DPT_DEBUGFS_STATE_SHOW_COMPLETE) {
657*5113495bSYour Name 		pdev->state = QDF_DPT_DEBUGFS_STATE_SHOW_STATE_INIT;
658*5113495bSYour Name 		return QDF_STATUS_SUCCESS;
659*5113495bSYour Name 	}
660*5113495bSYour Name 
661*5113495bSYour Name 	i = qdf_dpt_get_curr_pos_debugfs(file, pdev->state);
662*5113495bSYour Name 	status =  qdf_dpt_dump_stats_debugfs(file, i);
663*5113495bSYour Name 	if (status == QDF_STATUS_E_FAILURE)
664*5113495bSYour Name 		pdev->state = QDF_DPT_DEBUGFS_STATE_SHOW_IN_PROGRESS;
665*5113495bSYour Name 	else if (status == QDF_STATUS_SUCCESS)
666*5113495bSYour Name 		pdev->state = QDF_DPT_DEBUGFS_STATE_SHOW_COMPLETE;
667*5113495bSYour Name 
668*5113495bSYour Name 	return status;
669*5113495bSYour Name }
670*5113495bSYour Name 
671*5113495bSYour Name /**
672*5113495bSYour Name  * ol_txrx_conv_str_to_int_debugfs() - convert string to int
673*5113495bSYour Name  * @buf: buffer containing string
674*5113495bSYour Name  * @len: buffer len
675*5113495bSYour Name  * @proto_bitmap: defines the protocol to be tracked
676*5113495bSYour Name  * @nr_records: defines the nth packet which is traced
677*5113495bSYour Name  * @verbosity: defines the verbosity level
678*5113495bSYour Name  *
679*5113495bSYour Name  * This function expects char buffer to be null terminated.
680*5113495bSYour Name  * Otherwise results could be unexpected values.
681*5113495bSYour Name  *
682*5113495bSYour Name  * Return: 0 on success
683*5113495bSYour Name  */
ol_txrx_conv_str_to_int_debugfs(char * buf,qdf_size_t len,int * proto_bitmap,int * nr_records,int * verbosity,int * num_records_to_dump)684*5113495bSYour Name static int ol_txrx_conv_str_to_int_debugfs(char *buf, qdf_size_t len,
685*5113495bSYour Name 					   int *proto_bitmap,
686*5113495bSYour Name 					   int *nr_records,
687*5113495bSYour Name 					   int *verbosity,
688*5113495bSYour Name 					   int *num_records_to_dump)
689*5113495bSYour Name {
690*5113495bSYour Name 	int num_value = DPT_SET_PARAM_PROTO_BITMAP;
691*5113495bSYour Name 	int ret, param_value = 0;
692*5113495bSYour Name 	char *buf_param = buf;
693*5113495bSYour Name 	int i;
694*5113495bSYour Name 
695*5113495bSYour Name 	for (i = 1; i < DPT_SET_PARAM_MAX; i++) {
696*5113495bSYour Name 		/* Loop till you reach space as kstrtoint operates till
697*5113495bSYour Name 		 * null character. Replace space with null character
698*5113495bSYour Name 		 * to read each value.
699*5113495bSYour Name 		 * terminate the loop either at null terminated char or
700*5113495bSYour Name 		 * len is 0.
701*5113495bSYour Name 		 */
702*5113495bSYour Name 		while (*buf && len) {
703*5113495bSYour Name 			if (*buf == ' ') {
704*5113495bSYour Name 				*buf = '\0';
705*5113495bSYour Name 				buf++;
706*5113495bSYour Name 				len--;
707*5113495bSYour Name 				break;
708*5113495bSYour Name 			}
709*5113495bSYour Name 			buf++;
710*5113495bSYour Name 			len--;
711*5113495bSYour Name 		}
712*5113495bSYour Name 		/* get the parameter */
713*5113495bSYour Name 		ret = qdf_kstrtoint(buf_param,
714*5113495bSYour Name 				    DPT_DEBUGFS_NUMBER_BASE,
715*5113495bSYour Name 				    &param_value);
716*5113495bSYour Name 		if (ret) {
717*5113495bSYour Name 			QDF_TRACE(QDF_MODULE_ID_TXRX,
718*5113495bSYour Name 				  QDF_TRACE_LEVEL_ERROR,
719*5113495bSYour Name 				  "%s: Error while parsing buffer. ret %d",
720*5113495bSYour Name 				  __func__, ret);
721*5113495bSYour Name 			return ret;
722*5113495bSYour Name 		}
723*5113495bSYour Name 		switch (num_value) {
724*5113495bSYour Name 		case DPT_SET_PARAM_PROTO_BITMAP:
725*5113495bSYour Name 			*proto_bitmap = param_value;
726*5113495bSYour Name 			break;
727*5113495bSYour Name 		case DPT_SET_PARAM_NR_RECORDS:
728*5113495bSYour Name 			*nr_records = param_value;
729*5113495bSYour Name 			break;
730*5113495bSYour Name 		case DPT_SET_PARAM_VERBOSITY:
731*5113495bSYour Name 			*verbosity = param_value;
732*5113495bSYour Name 			break;
733*5113495bSYour Name 		case DPT_SET_PARAM_NUM_RECORDS_TO_DUMP:
734*5113495bSYour Name 			if (param_value > MAX_QDF_DP_TRACE_RECORDS)
735*5113495bSYour Name 				param_value = MAX_QDF_DP_TRACE_RECORDS;
736*5113495bSYour Name 			*num_records_to_dump = param_value;
737*5113495bSYour Name 			break;
738*5113495bSYour Name 		default:
739*5113495bSYour Name 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
740*5113495bSYour Name 				  "%s %d: :Set command needs exactly 4 arguments in format <proto_bitmap> <number of record> <Verbosity> <number of records to dump>.",
741*5113495bSYour Name 				__func__, __LINE__);
742*5113495bSYour Name 			break;
743*5113495bSYour Name 		}
744*5113495bSYour Name 		num_value++;
745*5113495bSYour Name 		/*buf_param should now point to the next param value. */
746*5113495bSYour Name 		buf_param = buf;
747*5113495bSYour Name 	}
748*5113495bSYour Name 
749*5113495bSYour Name 	/* buf is not yet NULL implies more than 4 params are passed. */
750*5113495bSYour Name 	if (*buf) {
751*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
752*5113495bSYour Name 			  "%s %d: :Set command needs exactly 4 arguments in format <proto_bitmap> <number of record> <Verbosity> <number of records to dump>.",
753*5113495bSYour Name 			__func__, __LINE__);
754*5113495bSYour Name 		return -EINVAL;
755*5113495bSYour Name 	}
756*5113495bSYour Name 	return 0;
757*5113495bSYour Name }
758*5113495bSYour Name 
759*5113495bSYour Name /**
760*5113495bSYour Name  * ol_txrx_write_dpt_buff_debugfs() - set dp trace parameters
761*5113495bSYour Name  * @priv: pdev object
762*5113495bSYour Name  * @buf: buff to get value for dpt parameters
763*5113495bSYour Name  * @len: buf length
764*5113495bSYour Name  *
765*5113495bSYour Name  * Return: QDF_STATUS
766*5113495bSYour Name  */
ol_txrx_write_dpt_buff_debugfs(void * priv,const char * buf,qdf_size_t len)767*5113495bSYour Name static QDF_STATUS ol_txrx_write_dpt_buff_debugfs(void *priv,
768*5113495bSYour Name 					      const char *buf,
769*5113495bSYour Name 					      qdf_size_t len)
770*5113495bSYour Name {
771*5113495bSYour Name 	int ret;
772*5113495bSYour Name 	int proto_bitmap = 0;
773*5113495bSYour Name 	int nr_records = 0;
774*5113495bSYour Name 	int verbosity = 0;
775*5113495bSYour Name 	int num_records_to_dump = 0;
776*5113495bSYour Name 	char *buf1 = NULL;
777*5113495bSYour Name 
778*5113495bSYour Name 	if (!buf || !len) {
779*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
780*5113495bSYour Name 			  "%s: null buffer or len. len %u",
781*5113495bSYour Name 				__func__, (uint8_t)len);
782*5113495bSYour Name 		return QDF_STATUS_E_FAULT;
783*5113495bSYour Name 	}
784*5113495bSYour Name 
785*5113495bSYour Name 	buf1 = (char *)qdf_mem_malloc(len);
786*5113495bSYour Name 	if (!buf1)
787*5113495bSYour Name 		return QDF_STATUS_E_FAULT;
788*5113495bSYour Name 
789*5113495bSYour Name 	qdf_mem_copy(buf1, buf, len);
790*5113495bSYour Name 	ret = ol_txrx_conv_str_to_int_debugfs(buf1, len, &proto_bitmap,
791*5113495bSYour Name 					      &nr_records, &verbosity,
792*5113495bSYour Name 					      &num_records_to_dump);
793*5113495bSYour Name 	if (ret) {
794*5113495bSYour Name 		qdf_mem_free(buf1);
795*5113495bSYour Name 		return QDF_STATUS_E_INVAL;
796*5113495bSYour Name 	}
797*5113495bSYour Name 
798*5113495bSYour Name 	qdf_dpt_set_value_debugfs(proto_bitmap, nr_records, verbosity,
799*5113495bSYour Name 				  num_records_to_dump);
800*5113495bSYour Name 	qdf_mem_free(buf1);
801*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
802*5113495bSYour Name }
803*5113495bSYour Name 
ol_txrx_debugfs_init(struct ol_txrx_pdev_t * pdev)804*5113495bSYour Name static int ol_txrx_debugfs_init(struct ol_txrx_pdev_t *pdev)
805*5113495bSYour Name {
806*5113495bSYour Name 	pdev->dpt_debugfs_fops.show = ol_txrx_read_dpt_buff_debugfs;
807*5113495bSYour Name 	pdev->dpt_debugfs_fops.write = ol_txrx_write_dpt_buff_debugfs;
808*5113495bSYour Name 	pdev->dpt_debugfs_fops.priv = pdev;
809*5113495bSYour Name 
810*5113495bSYour Name 	pdev->dpt_stats_log_dir = qdf_debugfs_create_dir("dpt_stats", NULL);
811*5113495bSYour Name 
812*5113495bSYour Name 	if (!pdev->dpt_stats_log_dir) {
813*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
814*5113495bSYour Name 				"%s: error while creating debugfs dir for %s",
815*5113495bSYour Name 				__func__, "dpt_stats");
816*5113495bSYour Name 		pdev->state = QDF_DPT_DEBUGFS_STATE_SHOW_STATE_INVALID;
817*5113495bSYour Name 		return -EBUSY;
818*5113495bSYour Name 	}
819*5113495bSYour Name 
820*5113495bSYour Name 	if (!qdf_debugfs_create_file("dump_set_dpt_logs", DPT_DEBUGFS_PERMS,
821*5113495bSYour Name 				     pdev->dpt_stats_log_dir,
822*5113495bSYour Name 				     &pdev->dpt_debugfs_fops)) {
823*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
824*5113495bSYour Name 				"%s: debug Entry creation failed!",
825*5113495bSYour Name 				__func__);
826*5113495bSYour Name 		pdev->state = QDF_DPT_DEBUGFS_STATE_SHOW_STATE_INVALID;
827*5113495bSYour Name 		return -EBUSY;
828*5113495bSYour Name 	}
829*5113495bSYour Name 
830*5113495bSYour Name 	pdev->state = QDF_DPT_DEBUGFS_STATE_SHOW_STATE_INIT;
831*5113495bSYour Name 	return 0;
832*5113495bSYour Name }
833*5113495bSYour Name 
ol_txrx_debugfs_exit(ol_txrx_pdev_handle pdev)834*5113495bSYour Name static void ol_txrx_debugfs_exit(ol_txrx_pdev_handle pdev)
835*5113495bSYour Name {
836*5113495bSYour Name 	qdf_debugfs_remove_dir_recursive(pdev->dpt_stats_log_dir);
837*5113495bSYour Name }
838*5113495bSYour Name #else
ol_txrx_debugfs_init(struct ol_txrx_pdev_t * pdev)839*5113495bSYour Name static inline int ol_txrx_debugfs_init(struct ol_txrx_pdev_t *pdev)
840*5113495bSYour Name {
841*5113495bSYour Name 	return 0;
842*5113495bSYour Name }
843*5113495bSYour Name 
ol_txrx_debugfs_exit(ol_txrx_pdev_handle pdev)844*5113495bSYour Name static inline void ol_txrx_debugfs_exit(ol_txrx_pdev_handle pdev)
845*5113495bSYour Name {
846*5113495bSYour Name }
847*5113495bSYour Name #endif
848*5113495bSYour Name 
849*5113495bSYour Name /**
850*5113495bSYour Name  * ol_txrx_pdev_attach() - allocate txrx pdev
851*5113495bSYour Name  * @soc_hdl: datapath soc handle
852*5113495bSYour Name  * @htc_pdev: HTC pdev
853*5113495bSYour Name  * @osdev: os dev
854*5113495bSYour Name  * @pdev_id: pdev identifier for pdev attach
855*5113495bSYour Name  *
856*5113495bSYour Name  * Return: QDF_STATUS_SUCCESS on success
857*5113495bSYour Name  *		QDF error code for failure
858*5113495bSYour Name  */
859*5113495bSYour Name static QDF_STATUS
ol_txrx_pdev_attach(ol_txrx_soc_handle soc,struct cdp_pdev_attach_params * params)860*5113495bSYour Name ol_txrx_pdev_attach(ol_txrx_soc_handle soc,
861*5113495bSYour Name 		    struct cdp_pdev_attach_params *params)
862*5113495bSYour Name {
863*5113495bSYour Name 	struct ol_txrx_soc_t *ol_soc = cdp_soc_t_to_ol_txrx_soc_t(soc);
864*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev;
865*5113495bSYour Name 	struct cdp_cfg *cfg_pdev = cds_get_context(QDF_MODULE_ID_CFG);
866*5113495bSYour Name 	QDF_STATUS status;
867*5113495bSYour Name 	int i, tid;
868*5113495bSYour Name 
869*5113495bSYour Name 	if (params->pdev_id == OL_TXRX_INVALID_PDEV_ID)
870*5113495bSYour Name 		return QDF_STATUS_E_INVAL;
871*5113495bSYour Name 
872*5113495bSYour Name 	pdev = qdf_mem_malloc(sizeof(*pdev));
873*5113495bSYour Name 	if (!pdev) {
874*5113495bSYour Name 		status = QDF_STATUS_E_NOMEM;
875*5113495bSYour Name 		goto fail0;
876*5113495bSYour Name 	}
877*5113495bSYour Name 
878*5113495bSYour Name 	/* init LL/HL cfg here */
879*5113495bSYour Name 	pdev->cfg.is_high_latency = ol_cfg_is_high_latency(cfg_pdev);
880*5113495bSYour Name 	/*
881*5113495bSYour Name 	 * Credit reporting through HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND
882*5113495bSYour Name 	 * enabled or not.
883*5113495bSYour Name 	 */
884*5113495bSYour Name 	pdev->cfg.credit_update_enabled =
885*5113495bSYour Name 		ol_cfg_is_credit_update_enabled(cfg_pdev);
886*5113495bSYour Name 
887*5113495bSYour Name 	/* Explicitly request TX Completions from FW */
888*5113495bSYour Name 	pdev->cfg.request_tx_comp = cds_is_ptp_rx_opt_enabled() ||
889*5113495bSYour Name 		cds_is_packet_log_enabled();
890*5113495bSYour Name 
891*5113495bSYour Name 	pdev->cfg.default_tx_comp_req = !ol_cfg_tx_free_at_download(cfg_pdev);
892*5113495bSYour Name 
893*5113495bSYour Name 	/* store provided params */
894*5113495bSYour Name 	pdev->ctrl_pdev = cfg_pdev;
895*5113495bSYour Name 	pdev->osdev = params->qdf_osdev;
896*5113495bSYour Name 	pdev->id = params->pdev_id;
897*5113495bSYour Name 	pdev->soc = ol_soc;
898*5113495bSYour Name 	ol_soc->pdev_list[params->pdev_id] = pdev;
899*5113495bSYour Name 
900*5113495bSYour Name 	for (i = 0; i < htt_num_sec_types; i++)
901*5113495bSYour Name 		pdev->sec_types[i] = (enum ol_sec_type)i;
902*5113495bSYour Name 
903*5113495bSYour Name 	TXRX_STATS_INIT(pdev);
904*5113495bSYour Name 	ol_txrx_tso_stats_init(pdev);
905*5113495bSYour Name 	ol_txrx_fw_stats_desc_pool_init(pdev, FW_STATS_DESC_POOL_SIZE);
906*5113495bSYour Name 
907*5113495bSYour Name 	TAILQ_INIT(&pdev->vdev_list);
908*5113495bSYour Name 
909*5113495bSYour Name 	TAILQ_INIT(&pdev->inactive_peer_list);
910*5113495bSYour Name 
911*5113495bSYour Name 	TAILQ_INIT(&pdev->req_list);
912*5113495bSYour Name 	pdev->req_list_depth = 0;
913*5113495bSYour Name 	qdf_spinlock_create(&pdev->req_list_spinlock);
914*5113495bSYour Name 	qdf_spinlock_create(&pdev->tx_mutex);
915*5113495bSYour Name 
916*5113495bSYour Name 	/* do initial set up of the peer ID -> peer object lookup map */
917*5113495bSYour Name 	if (ol_txrx_peer_find_attach(pdev)) {
918*5113495bSYour Name 		status = QDF_STATUS_E_FAILURE;
919*5113495bSYour Name 		goto fail1;
920*5113495bSYour Name 	}
921*5113495bSYour Name 
922*5113495bSYour Name 	/* initialize the counter of the target's tx buffer availability */
923*5113495bSYour Name 	qdf_atomic_init(&pdev->target_tx_credit);
924*5113495bSYour Name 	qdf_atomic_init(&pdev->orig_target_tx_credit);
925*5113495bSYour Name 	qdf_atomic_init(&pdev->pad_reserve_tx_credit);
926*5113495bSYour Name 	qdf_atomic_add(1, &pdev->pad_reserve_tx_credit);
927*5113495bSYour Name 
928*5113495bSYour Name 	if (ol_cfg_is_high_latency(cfg_pdev)) {
929*5113495bSYour Name 		qdf_spinlock_create(&pdev->tx_queue_spinlock);
930*5113495bSYour Name 		pdev->tx_sched.scheduler = ol_tx_sched_attach(pdev);
931*5113495bSYour Name 		if (!pdev->tx_sched.scheduler) {
932*5113495bSYour Name 			status = QDF_STATUS_E_FAILURE;
933*5113495bSYour Name 			goto fail2;
934*5113495bSYour Name 		}
935*5113495bSYour Name 	}
936*5113495bSYour Name 	ol_txrx_pdev_txq_log_init(pdev);
937*5113495bSYour Name 	ol_txrx_pdev_grp_stats_init(pdev);
938*5113495bSYour Name 
939*5113495bSYour Name 	pdev->htt_pdev =
940*5113495bSYour Name 		htt_pdev_alloc(pdev, cfg_pdev,
941*5113495bSYour Name 			       params->htc_handle, params->qdf_osdev);
942*5113495bSYour Name 	if (!pdev->htt_pdev) {
943*5113495bSYour Name 		status = QDF_STATUS_E_FAILURE;
944*5113495bSYour Name 		goto fail3;
945*5113495bSYour Name 	}
946*5113495bSYour Name 
947*5113495bSYour Name 	htt_register_rx_pkt_dump_callback(pdev->htt_pdev,
948*5113495bSYour Name 			ol_rx_pkt_dump_call);
949*5113495bSYour Name 
950*5113495bSYour Name 	/*
951*5113495bSYour Name 	 * Init the tid --> category table.
952*5113495bSYour Name 	 * Regular tids (0-15) map to their AC.
953*5113495bSYour Name 	 * Extension tids get their own categories.
954*5113495bSYour Name 	 */
955*5113495bSYour Name 	for (tid = 0; tid < OL_TX_NUM_QOS_TIDS; tid++) {
956*5113495bSYour Name 		int ac = TXRX_TID_TO_WMM_AC(tid);
957*5113495bSYour Name 
958*5113495bSYour Name 		pdev->tid_to_ac[tid] = ac;
959*5113495bSYour Name 	}
960*5113495bSYour Name 	pdev->tid_to_ac[OL_TX_NON_QOS_TID] =
961*5113495bSYour Name 		OL_TX_SCHED_WRR_ADV_CAT_NON_QOS_DATA;
962*5113495bSYour Name 	pdev->tid_to_ac[OL_TX_MGMT_TID] =
963*5113495bSYour Name 		OL_TX_SCHED_WRR_ADV_CAT_UCAST_MGMT;
964*5113495bSYour Name 	pdev->tid_to_ac[OL_TX_NUM_TIDS + OL_TX_VDEV_MCAST_BCAST] =
965*5113495bSYour Name 		OL_TX_SCHED_WRR_ADV_CAT_MCAST_DATA;
966*5113495bSYour Name 	pdev->tid_to_ac[OL_TX_NUM_TIDS + OL_TX_VDEV_DEFAULT_MGMT] =
967*5113495bSYour Name 		OL_TX_SCHED_WRR_ADV_CAT_MCAST_MGMT;
968*5113495bSYour Name 
969*5113495bSYour Name 	if (ol_cfg_is_flow_steering_enabled(pdev->ctrl_pdev))
970*5113495bSYour Name 		pdev->peer_id_unmap_ref_cnt =
971*5113495bSYour Name 			TXRX_RFS_ENABLE_PEER_ID_UNMAP_COUNT;
972*5113495bSYour Name 	else
973*5113495bSYour Name 		pdev->peer_id_unmap_ref_cnt =
974*5113495bSYour Name 			TXRX_RFS_DISABLE_PEER_ID_UNMAP_COUNT;
975*5113495bSYour Name 
976*5113495bSYour Name 	if (cds_get_conparam() == QDF_GLOBAL_MONITOR_MODE)
977*5113495bSYour Name 		pdev->chan_noise_floor = NORMALIZED_TO_NOISE_FLOOR;
978*5113495bSYour Name 
979*5113495bSYour Name 	ol_txrx_debugfs_init(pdev);
980*5113495bSYour Name 
981*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
982*5113495bSYour Name 
983*5113495bSYour Name fail3:
984*5113495bSYour Name 	ol_txrx_peer_find_detach(pdev);
985*5113495bSYour Name 
986*5113495bSYour Name fail2:
987*5113495bSYour Name 	if (ol_cfg_is_high_latency(cfg_pdev))
988*5113495bSYour Name 		qdf_spinlock_destroy(&pdev->tx_queue_spinlock);
989*5113495bSYour Name 
990*5113495bSYour Name fail1:
991*5113495bSYour Name 	qdf_spinlock_destroy(&pdev->req_list_spinlock);
992*5113495bSYour Name 	qdf_spinlock_destroy(&pdev->tx_mutex);
993*5113495bSYour Name 	ol_txrx_tso_stats_deinit(pdev);
994*5113495bSYour Name 	ol_txrx_fw_stats_desc_pool_deinit(pdev);
995*5113495bSYour Name 	qdf_mem_free(pdev);
996*5113495bSYour Name 
997*5113495bSYour Name fail0:
998*5113495bSYour Name 	return status;
999*5113495bSYour Name }
1000*5113495bSYour Name 
1001*5113495bSYour Name #if !defined(REMOVE_PKT_LOG) && !defined(QVIT)
1002*5113495bSYour Name /**
1003*5113495bSYour Name  * htt_pkt_log_init() - API to initialize packet log
1004*5113495bSYour Name  * @soc_hdl: Datapath soc handle
1005*5113495bSYour Name  * @pdev_id: id of data path pdev handle
1006*5113495bSYour Name  * @scn: HIF context
1007*5113495bSYour Name  *
1008*5113495bSYour Name  * Return: void
1009*5113495bSYour Name  */
htt_pkt_log_init(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,void * scn)1010*5113495bSYour Name void htt_pkt_log_init(struct cdp_soc_t *soc_hdl, uint8_t pdev_id, void *scn)
1011*5113495bSYour Name {
1012*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
1013*5113495bSYour Name 	ol_txrx_pdev_handle handle =
1014*5113495bSYour Name 				ol_txrx_get_pdev_from_pdev_id(soc, pdev_id);
1015*5113495bSYour Name 
1016*5113495bSYour Name 	if (handle->pkt_log_init) {
1017*5113495bSYour Name 		ol_txrx_err("pktlog already initialized");
1018*5113495bSYour Name 		return;
1019*5113495bSYour Name 	}
1020*5113495bSYour Name 
1021*5113495bSYour Name 	if (cds_get_conparam() != QDF_GLOBAL_FTM_MODE &&
1022*5113495bSYour Name 			!QDF_IS_EPPING_ENABLED(cds_get_conparam())) {
1023*5113495bSYour Name 		pktlog_sethandle(&handle->pl_dev, scn);
1024*5113495bSYour Name 		pktlog_set_pdev_id(handle->pl_dev, pdev_id);
1025*5113495bSYour Name 		pktlog_set_callback_regtype(PKTLOG_DEFAULT_CALLBACK_REGISTRATION);
1026*5113495bSYour Name 		if (pktlogmod_init(scn))
1027*5113495bSYour Name 			qdf_print(" pktlogmod_init failed");
1028*5113495bSYour Name 		else
1029*5113495bSYour Name 			handle->pkt_log_init = true;
1030*5113495bSYour Name 	} else {
1031*5113495bSYour Name 		ol_txrx_err("Invalid conn mode: %d", cds_get_conparam());
1032*5113495bSYour Name 	}
1033*5113495bSYour Name }
1034*5113495bSYour Name 
1035*5113495bSYour Name /**
1036*5113495bSYour Name  * htt_pktlogmod_exit() - API to cleanup pktlog info
1037*5113495bSYour Name  * @handle: Pdev handle
1038*5113495bSYour Name  * @scn: HIF Context
1039*5113495bSYour Name  *
1040*5113495bSYour Name  * Return: void
1041*5113495bSYour Name  */
htt_pktlogmod_exit(struct ol_txrx_pdev_t * handle)1042*5113495bSYour Name static void htt_pktlogmod_exit(struct ol_txrx_pdev_t *handle)
1043*5113495bSYour Name {
1044*5113495bSYour Name 	if (!handle->pkt_log_init) {
1045*5113495bSYour Name 		ol_txrx_err("pktlog is not initialized");
1046*5113495bSYour Name 		return;
1047*5113495bSYour Name 	}
1048*5113495bSYour Name 
1049*5113495bSYour Name 	if (cds_get_conparam() != QDF_GLOBAL_FTM_MODE &&
1050*5113495bSYour Name 		!QDF_IS_EPPING_ENABLED(cds_get_conparam())) {
1051*5113495bSYour Name 		pktlogmod_exit(handle);
1052*5113495bSYour Name 		handle->pkt_log_init = false;
1053*5113495bSYour Name 	} else {
1054*5113495bSYour Name 		ol_txrx_err("Invalid conn mode: %d", cds_get_conparam());
1055*5113495bSYour Name 	}
1056*5113495bSYour Name }
1057*5113495bSYour Name 
1058*5113495bSYour Name #else
htt_pkt_log_init(struct cdp_soc_t * soc_hdl,uint8_t pdev,void * scn)1059*5113495bSYour Name void htt_pkt_log_init(struct cdp_soc_t *soc_hdl, uint8_t pdev, void *scn) { }
htt_pktlogmod_exit(ol_txrx_pdev_handle handle)1060*5113495bSYour Name static void htt_pktlogmod_exit(ol_txrx_pdev_handle handle)  { }
1061*5113495bSYour Name #endif
1062*5113495bSYour Name 
1063*5113495bSYour Name #ifdef QCA_LL_PDEV_TX_FLOW_CONTROL
1064*5113495bSYour Name /**
1065*5113495bSYour Name  * ol_txrx_pdev_set_threshold() - set pdev pool stop/start threshold
1066*5113495bSYour Name  * @pdev: txrx pdev
1067*5113495bSYour Name  *
1068*5113495bSYour Name  * Return: void
1069*5113495bSYour Name  */
ol_txrx_pdev_set_threshold(struct ol_txrx_pdev_t * pdev)1070*5113495bSYour Name static void ol_txrx_pdev_set_threshold(struct ol_txrx_pdev_t *pdev)
1071*5113495bSYour Name {
1072*5113495bSYour Name 	uint32_t stop_threshold;
1073*5113495bSYour Name 	uint32_t start_threshold;
1074*5113495bSYour Name 	uint16_t desc_pool_size = pdev->tx_desc.pool_size;
1075*5113495bSYour Name 
1076*5113495bSYour Name 	stop_threshold = ol_cfg_get_tx_flow_stop_queue_th(pdev->ctrl_pdev);
1077*5113495bSYour Name 	start_threshold = stop_threshold +
1078*5113495bSYour Name 		ol_cfg_get_tx_flow_start_queue_offset(pdev->ctrl_pdev);
1079*5113495bSYour Name 	pdev->tx_desc.start_th = (start_threshold * desc_pool_size) / 100;
1080*5113495bSYour Name 	pdev->tx_desc.stop_th = (stop_threshold * desc_pool_size) / 100;
1081*5113495bSYour Name 	pdev->tx_desc.stop_priority_th =
1082*5113495bSYour Name 		(TX_PRIORITY_TH * pdev->tx_desc.stop_th) / 100;
1083*5113495bSYour Name 	if (pdev->tx_desc.stop_priority_th >= MAX_TSO_SEGMENT_DESC)
1084*5113495bSYour Name 		pdev->tx_desc.stop_priority_th -= MAX_TSO_SEGMENT_DESC;
1085*5113495bSYour Name 
1086*5113495bSYour Name 	pdev->tx_desc.start_priority_th =
1087*5113495bSYour Name 		(TX_PRIORITY_TH * pdev->tx_desc.start_th) / 100;
1088*5113495bSYour Name 	if (pdev->tx_desc.start_priority_th >= MAX_TSO_SEGMENT_DESC)
1089*5113495bSYour Name 		pdev->tx_desc.start_priority_th -= MAX_TSO_SEGMENT_DESC;
1090*5113495bSYour Name 	pdev->tx_desc.status = FLOW_POOL_ACTIVE_UNPAUSED;
1091*5113495bSYour Name }
1092*5113495bSYour Name #else
ol_txrx_pdev_set_threshold(struct ol_txrx_pdev_t * pdev)1093*5113495bSYour Name static inline void ol_txrx_pdev_set_threshold(struct ol_txrx_pdev_t *pdev)
1094*5113495bSYour Name {
1095*5113495bSYour Name }
1096*5113495bSYour Name #endif
1097*5113495bSYour Name 
1098*5113495bSYour Name /**
1099*5113495bSYour Name  * ol_txrx_pdev_post_attach() - attach txrx pdev
1100*5113495bSYour Name  * @soc_hdl: datapath soc handle
1101*5113495bSYour Name  * @pdev_id: physical device instance id
1102*5113495bSYour Name  *
1103*5113495bSYour Name  * Return: 0 for success
1104*5113495bSYour Name  */
1105*5113495bSYour Name int
ol_txrx_pdev_post_attach(struct cdp_soc_t * soc_hdl,uint8_t pdev_id)1106*5113495bSYour Name ol_txrx_pdev_post_attach(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
1107*5113495bSYour Name {
1108*5113495bSYour Name 	uint16_t i;
1109*5113495bSYour Name 	uint16_t fail_idx = 0;
1110*5113495bSYour Name 	int ret = 0;
1111*5113495bSYour Name 	uint16_t desc_pool_size;
1112*5113495bSYour Name 	struct hif_opaque_softc *osc =  cds_get_context(QDF_MODULE_ID_HIF);
1113*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
1114*5113495bSYour Name 	ol_txrx_pdev_handle pdev = ol_txrx_get_pdev_from_pdev_id(soc, pdev_id);
1115*5113495bSYour Name 
1116*5113495bSYour Name 	uint16_t desc_element_size = sizeof(union ol_tx_desc_list_elem_t);
1117*5113495bSYour Name 	union ol_tx_desc_list_elem_t *c_element;
1118*5113495bSYour Name 	unsigned int sig_bit;
1119*5113495bSYour Name 	uint16_t desc_per_page;
1120*5113495bSYour Name 
1121*5113495bSYour Name 	if (!osc || !pdev) {
1122*5113495bSYour Name 		ret = -EINVAL;
1123*5113495bSYour Name 		goto ol_attach_fail;
1124*5113495bSYour Name 	}
1125*5113495bSYour Name 
1126*5113495bSYour Name 	/*
1127*5113495bSYour Name 	 * For LL, limit the number of host's tx descriptors to match
1128*5113495bSYour Name 	 * the number of target FW tx descriptors.
1129*5113495bSYour Name 	 * This simplifies the FW, by ensuring the host will never
1130*5113495bSYour Name 	 * download more tx descriptors than the target has space for.
1131*5113495bSYour Name 	 * The FW will drop/free low-priority tx descriptors when it
1132*5113495bSYour Name 	 * starts to run low, so that in theory the host should never
1133*5113495bSYour Name 	 * run out of tx descriptors.
1134*5113495bSYour Name 	 */
1135*5113495bSYour Name 
1136*5113495bSYour Name 	/*
1137*5113495bSYour Name 	 * LL - initialize the target credit ourselves.
1138*5113495bSYour Name 	 * HL - wait for a HTT target credit initialization
1139*5113495bSYour Name 	 * during htt_attach.
1140*5113495bSYour Name 	 */
1141*5113495bSYour Name 	desc_pool_size = ol_tx_get_desc_global_pool_size(pdev);
1142*5113495bSYour Name 	ol_tx_init_pdev(pdev);
1143*5113495bSYour Name 
1144*5113495bSYour Name 	ol_tx_desc_dup_detect_init(pdev, desc_pool_size);
1145*5113495bSYour Name 
1146*5113495bSYour Name 	ol_tx_setup_fastpath_ce_handles(osc, pdev);
1147*5113495bSYour Name 
1148*5113495bSYour Name 	if ((ol_txrx_get_new_htt_msg_format(pdev)))
1149*5113495bSYour Name 		ol_set_cfg_new_htt_format(pdev->ctrl_pdev, true);
1150*5113495bSYour Name 	else
1151*5113495bSYour Name 		ol_set_cfg_new_htt_format(pdev->ctrl_pdev, false);
1152*5113495bSYour Name 
1153*5113495bSYour Name 	ret = htt_attach(pdev->htt_pdev, desc_pool_size);
1154*5113495bSYour Name 	if (ret)
1155*5113495bSYour Name 		goto htt_attach_fail;
1156*5113495bSYour Name 
1157*5113495bSYour Name 	/* Attach micro controller data path offload resource */
1158*5113495bSYour Name 	if (ol_cfg_ipa_uc_offload_enabled(pdev->ctrl_pdev)) {
1159*5113495bSYour Name 		ret = htt_ipa_uc_attach(pdev->htt_pdev);
1160*5113495bSYour Name 		if (ret)
1161*5113495bSYour Name 			goto uc_attach_fail;
1162*5113495bSYour Name 	}
1163*5113495bSYour Name 
1164*5113495bSYour Name 	/* Calculate single element reserved size power of 2 */
1165*5113495bSYour Name 	pdev->tx_desc.desc_reserved_size = qdf_get_pwr2(desc_element_size);
1166*5113495bSYour Name 	qdf_mem_multi_pages_alloc(pdev->osdev, &pdev->tx_desc.desc_pages,
1167*5113495bSYour Name 		pdev->tx_desc.desc_reserved_size, desc_pool_size, 0, true);
1168*5113495bSYour Name 	if ((0 == pdev->tx_desc.desc_pages.num_pages) ||
1169*5113495bSYour Name 		(!pdev->tx_desc.desc_pages.cacheable_pages)) {
1170*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1171*5113495bSYour Name 			"Page alloc fail");
1172*5113495bSYour Name 		ret = -ENOMEM;
1173*5113495bSYour Name 		goto page_alloc_fail;
1174*5113495bSYour Name 	}
1175*5113495bSYour Name 	desc_per_page = pdev->tx_desc.desc_pages.num_element_per_page;
1176*5113495bSYour Name 	pdev->tx_desc.offset_filter = desc_per_page - 1;
1177*5113495bSYour Name 	/* Calculate page divider to find page number */
1178*5113495bSYour Name 	sig_bit = 0;
1179*5113495bSYour Name 	while (desc_per_page) {
1180*5113495bSYour Name 		sig_bit++;
1181*5113495bSYour Name 		desc_per_page = desc_per_page >> 1;
1182*5113495bSYour Name 	}
1183*5113495bSYour Name 	pdev->tx_desc.page_divider = (sig_bit - 1);
1184*5113495bSYour Name 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
1185*5113495bSYour Name 		"page_divider 0x%x, offset_filter 0x%x num elem %d, ol desc num page %d, ol desc per page %d",
1186*5113495bSYour Name 		pdev->tx_desc.page_divider, pdev->tx_desc.offset_filter,
1187*5113495bSYour Name 		desc_pool_size, pdev->tx_desc.desc_pages.num_pages,
1188*5113495bSYour Name 		pdev->tx_desc.desc_pages.num_element_per_page);
1189*5113495bSYour Name 
1190*5113495bSYour Name 	/*
1191*5113495bSYour Name 	 * Each SW tx desc (used only within the tx datapath SW) has a
1192*5113495bSYour Name 	 * matching HTT tx desc (used for downloading tx meta-data to FW/HW).
1193*5113495bSYour Name 	 * Go ahead and allocate the HTT tx desc and link it with the SW tx
1194*5113495bSYour Name 	 * desc now, to avoid doing it during time-critical transmit.
1195*5113495bSYour Name 	 */
1196*5113495bSYour Name 	pdev->tx_desc.pool_size = desc_pool_size;
1197*5113495bSYour Name 	pdev->tx_desc.freelist =
1198*5113495bSYour Name 		(union ol_tx_desc_list_elem_t *)
1199*5113495bSYour Name 		(*pdev->tx_desc.desc_pages.cacheable_pages);
1200*5113495bSYour Name 	c_element = pdev->tx_desc.freelist;
1201*5113495bSYour Name 	for (i = 0; i < desc_pool_size; i++) {
1202*5113495bSYour Name 		void *htt_tx_desc;
1203*5113495bSYour Name 		void *htt_frag_desc = NULL;
1204*5113495bSYour Name 		qdf_dma_addr_t frag_paddr = 0;
1205*5113495bSYour Name 		qdf_dma_addr_t paddr;
1206*5113495bSYour Name 
1207*5113495bSYour Name 		if (i == (desc_pool_size - 1))
1208*5113495bSYour Name 			c_element->next = NULL;
1209*5113495bSYour Name 		else
1210*5113495bSYour Name 			c_element->next = (union ol_tx_desc_list_elem_t *)
1211*5113495bSYour Name 				ol_tx_desc_find(pdev, i + 1);
1212*5113495bSYour Name 
1213*5113495bSYour Name 		htt_tx_desc = htt_tx_desc_alloc(pdev->htt_pdev, &paddr, i);
1214*5113495bSYour Name 		if (!htt_tx_desc) {
1215*5113495bSYour Name 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_FATAL,
1216*5113495bSYour Name 				  "%s: failed to alloc HTT tx desc (%d of %d)",
1217*5113495bSYour Name 				__func__, i, desc_pool_size);
1218*5113495bSYour Name 			fail_idx = i;
1219*5113495bSYour Name 			ret = -ENOMEM;
1220*5113495bSYour Name 			goto desc_alloc_fail;
1221*5113495bSYour Name 		}
1222*5113495bSYour Name 
1223*5113495bSYour Name 		c_element->tx_desc.htt_tx_desc = htt_tx_desc;
1224*5113495bSYour Name 		c_element->tx_desc.htt_tx_desc_paddr = paddr;
1225*5113495bSYour Name 		ret = htt_tx_frag_alloc(pdev->htt_pdev,
1226*5113495bSYour Name 					i, &frag_paddr, &htt_frag_desc);
1227*5113495bSYour Name 		if (ret) {
1228*5113495bSYour Name 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1229*5113495bSYour Name 				"%s: failed to alloc HTT frag dsc (%d/%d)",
1230*5113495bSYour Name 				__func__, i, desc_pool_size);
1231*5113495bSYour Name 			/* Is there a leak here, is this handling correct? */
1232*5113495bSYour Name 			fail_idx = i;
1233*5113495bSYour Name 			goto desc_alloc_fail;
1234*5113495bSYour Name 		}
1235*5113495bSYour Name 		if (!ret && htt_frag_desc) {
1236*5113495bSYour Name 			/*
1237*5113495bSYour Name 			 * Initialize the first 6 words (TSO flags)
1238*5113495bSYour Name 			 * of the frag descriptor
1239*5113495bSYour Name 			 */
1240*5113495bSYour Name 			memset(htt_frag_desc, 0, 6 * sizeof(uint32_t));
1241*5113495bSYour Name 			c_element->tx_desc.htt_frag_desc = htt_frag_desc;
1242*5113495bSYour Name 			c_element->tx_desc.htt_frag_desc_paddr = frag_paddr;
1243*5113495bSYour Name 		}
1244*5113495bSYour Name #ifdef QCA_SUPPORT_TXDESC_SANITY_CHECKS
1245*5113495bSYour Name 		c_element->tx_desc.pkt_type = 0xff;
1246*5113495bSYour Name #ifdef QCA_COMPUTE_TX_DELAY
1247*5113495bSYour Name 		c_element->tx_desc.entry_timestamp_ticks =
1248*5113495bSYour Name 			0xffffffff;
1249*5113495bSYour Name #endif
1250*5113495bSYour Name #endif
1251*5113495bSYour Name 		c_element->tx_desc.id = i;
1252*5113495bSYour Name 		qdf_atomic_init(&c_element->tx_desc.ref_cnt);
1253*5113495bSYour Name 		c_element = c_element->next;
1254*5113495bSYour Name 		fail_idx = i;
1255*5113495bSYour Name 	}
1256*5113495bSYour Name 
1257*5113495bSYour Name 	/* link SW tx descs into a freelist */
1258*5113495bSYour Name 	pdev->tx_desc.num_free = desc_pool_size;
1259*5113495bSYour Name 	ol_txrx_dbg("first tx_desc:0x%pK Last tx desc:0x%pK",
1260*5113495bSYour Name 		    (uint32_t *)pdev->tx_desc.freelist,
1261*5113495bSYour Name 		    (uint32_t *)(pdev->tx_desc.freelist + desc_pool_size));
1262*5113495bSYour Name 
1263*5113495bSYour Name 	ol_txrx_pdev_set_threshold(pdev);
1264*5113495bSYour Name 
1265*5113495bSYour Name 	/* check what format of frames are expected to be delivered by the OS */
1266*5113495bSYour Name 	pdev->frame_format = ol_cfg_frame_type(pdev->ctrl_pdev);
1267*5113495bSYour Name 	if (pdev->frame_format == wlan_frm_fmt_native_wifi)
1268*5113495bSYour Name 		pdev->htt_pkt_type = htt_pkt_type_native_wifi;
1269*5113495bSYour Name 	else if (pdev->frame_format == wlan_frm_fmt_802_3) {
1270*5113495bSYour Name 		if (ol_cfg_is_ce_classify_enabled(pdev->ctrl_pdev))
1271*5113495bSYour Name 			pdev->htt_pkt_type = htt_pkt_type_eth2;
1272*5113495bSYour Name 		else
1273*5113495bSYour Name 			pdev->htt_pkt_type = htt_pkt_type_ethernet;
1274*5113495bSYour Name 	} else {
1275*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1276*5113495bSYour Name 			  "%s Invalid standard frame type: %d",
1277*5113495bSYour Name 			  __func__, pdev->frame_format);
1278*5113495bSYour Name 		ret = -EINVAL;
1279*5113495bSYour Name 		goto control_init_fail;
1280*5113495bSYour Name 	}
1281*5113495bSYour Name 
1282*5113495bSYour Name 	/* setup the global rx defrag waitlist */
1283*5113495bSYour Name 	TAILQ_INIT(&pdev->rx.defrag.waitlist);
1284*5113495bSYour Name 
1285*5113495bSYour Name 	/* configure where defrag timeout and duplicate detection is handled */
1286*5113495bSYour Name 	pdev->rx.flags.defrag_timeout_check =
1287*5113495bSYour Name 		pdev->rx.flags.dup_check =
1288*5113495bSYour Name 		ol_cfg_rx_host_defrag_timeout_duplicate_check(pdev->ctrl_pdev);
1289*5113495bSYour Name 
1290*5113495bSYour Name #ifdef QCA_SUPPORT_SW_TXRX_ENCAP
1291*5113495bSYour Name 	/* Need to revisit this part. Currently,hardcode to riva's caps */
1292*5113495bSYour Name 	pdev->target_tx_tran_caps = wlan_frm_tran_cap_raw;
1293*5113495bSYour Name 	pdev->target_rx_tran_caps = wlan_frm_tran_cap_raw;
1294*5113495bSYour Name 	/*
1295*5113495bSYour Name 	 * The Riva HW de-aggregate doesn't have capability to generate 802.11
1296*5113495bSYour Name 	 * header for non-first subframe of A-MSDU.
1297*5113495bSYour Name 	 */
1298*5113495bSYour Name 	pdev->sw_subfrm_hdr_recovery_enable = 1;
1299*5113495bSYour Name 	/*
1300*5113495bSYour Name 	 * The Riva HW doesn't have the capability to set Protected Frame bit
1301*5113495bSYour Name 	 * in the MAC header for encrypted data frame.
1302*5113495bSYour Name 	 */
1303*5113495bSYour Name 	pdev->sw_pf_proc_enable = 1;
1304*5113495bSYour Name 
1305*5113495bSYour Name 	if (pdev->frame_format == wlan_frm_fmt_802_3) {
1306*5113495bSYour Name 		/*
1307*5113495bSYour Name 		 * sw llc process is only needed in
1308*5113495bSYour Name 		 * 802.3 to 802.11 transform case
1309*5113495bSYour Name 		 */
1310*5113495bSYour Name 		pdev->sw_tx_llc_proc_enable = 1;
1311*5113495bSYour Name 		pdev->sw_rx_llc_proc_enable = 1;
1312*5113495bSYour Name 	} else {
1313*5113495bSYour Name 		pdev->sw_tx_llc_proc_enable = 0;
1314*5113495bSYour Name 		pdev->sw_rx_llc_proc_enable = 0;
1315*5113495bSYour Name 	}
1316*5113495bSYour Name 
1317*5113495bSYour Name 	switch (pdev->frame_format) {
1318*5113495bSYour Name 	case wlan_frm_fmt_raw:
1319*5113495bSYour Name 		pdev->sw_tx_encap =
1320*5113495bSYour Name 			pdev->target_tx_tran_caps & wlan_frm_tran_cap_raw
1321*5113495bSYour Name 			? 0 : 1;
1322*5113495bSYour Name 		pdev->sw_rx_decap =
1323*5113495bSYour Name 			pdev->target_rx_tran_caps & wlan_frm_tran_cap_raw
1324*5113495bSYour Name 			? 0 : 1;
1325*5113495bSYour Name 		break;
1326*5113495bSYour Name 	case wlan_frm_fmt_native_wifi:
1327*5113495bSYour Name 		pdev->sw_tx_encap =
1328*5113495bSYour Name 			pdev->
1329*5113495bSYour Name 			target_tx_tran_caps & wlan_frm_tran_cap_native_wifi
1330*5113495bSYour Name 			? 0 : 1;
1331*5113495bSYour Name 		pdev->sw_rx_decap =
1332*5113495bSYour Name 			pdev->
1333*5113495bSYour Name 			target_rx_tran_caps & wlan_frm_tran_cap_native_wifi
1334*5113495bSYour Name 			? 0 : 1;
1335*5113495bSYour Name 		break;
1336*5113495bSYour Name 	case wlan_frm_fmt_802_3:
1337*5113495bSYour Name 		pdev->sw_tx_encap =
1338*5113495bSYour Name 			pdev->target_tx_tran_caps & wlan_frm_tran_cap_8023
1339*5113495bSYour Name 			? 0 : 1;
1340*5113495bSYour Name 		pdev->sw_rx_decap =
1341*5113495bSYour Name 			pdev->target_rx_tran_caps & wlan_frm_tran_cap_8023
1342*5113495bSYour Name 			? 0 : 1;
1343*5113495bSYour Name 		break;
1344*5113495bSYour Name 	default:
1345*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
1346*5113495bSYour Name 			  "Invalid std frame type; [en/de]cap: f:%x t:%x r:%x",
1347*5113495bSYour Name 			  pdev->frame_format,
1348*5113495bSYour Name 			  pdev->target_tx_tran_caps, pdev->target_rx_tran_caps);
1349*5113495bSYour Name 		ret = -EINVAL;
1350*5113495bSYour Name 		goto control_init_fail;
1351*5113495bSYour Name 	}
1352*5113495bSYour Name #endif
1353*5113495bSYour Name 
1354*5113495bSYour Name 	/*
1355*5113495bSYour Name 	 * Determine what rx processing steps are done within the host.
1356*5113495bSYour Name 	 * Possibilities:
1357*5113495bSYour Name 	 * 1.  Nothing - rx->tx forwarding and rx PN entirely within target.
1358*5113495bSYour Name 	 *     (This is unlikely; even if the target is doing rx->tx forwarding,
1359*5113495bSYour Name 	 *     the host should be doing rx->tx forwarding too, as a back up for
1360*5113495bSYour Name 	 *     the target's rx->tx forwarding, in case the target runs short on
1361*5113495bSYour Name 	 *     memory, and can't store rx->tx frames that are waiting for
1362*5113495bSYour Name 	 *     missing prior rx frames to arrive.)
1363*5113495bSYour Name 	 * 2.  Just rx -> tx forwarding.
1364*5113495bSYour Name 	 *     This is the typical configuration for HL, and a likely
1365*5113495bSYour Name 	 *     configuration for LL STA or small APs (e.g. retail APs).
1366*5113495bSYour Name 	 * 3.  Both PN check and rx -> tx forwarding.
1367*5113495bSYour Name 	 *     This is the typical configuration for large LL APs.
1368*5113495bSYour Name 	 * Host-side PN check without rx->tx forwarding is not a valid
1369*5113495bSYour Name 	 * configuration, since the PN check needs to be done prior to
1370*5113495bSYour Name 	 * the rx->tx forwarding.
1371*5113495bSYour Name 	 */
1372*5113495bSYour Name 	if (ol_cfg_is_full_reorder_offload(pdev->ctrl_pdev)) {
1373*5113495bSYour Name 		/*
1374*5113495bSYour Name 		 * PN check, rx-tx forwarding and rx reorder is done by
1375*5113495bSYour Name 		 * the target
1376*5113495bSYour Name 		 */
1377*5113495bSYour Name 		if (ol_cfg_rx_fwd_disabled(pdev->ctrl_pdev))
1378*5113495bSYour Name 			pdev->rx_opt_proc = ol_rx_in_order_deliver;
1379*5113495bSYour Name 		else
1380*5113495bSYour Name 			pdev->rx_opt_proc = ol_rx_fwd_check;
1381*5113495bSYour Name 	} else {
1382*5113495bSYour Name 		if (ol_cfg_rx_pn_check(pdev->ctrl_pdev)) {
1383*5113495bSYour Name 			if (ol_cfg_rx_fwd_disabled(pdev->ctrl_pdev)) {
1384*5113495bSYour Name 				/*
1385*5113495bSYour Name 				 * PN check done on host,
1386*5113495bSYour Name 				 * rx->tx forwarding not done at all.
1387*5113495bSYour Name 				 */
1388*5113495bSYour Name 				pdev->rx_opt_proc = ol_rx_pn_check_only;
1389*5113495bSYour Name 			} else if (ol_cfg_rx_fwd_check(pdev->ctrl_pdev)) {
1390*5113495bSYour Name 				/*
1391*5113495bSYour Name 				 * Both PN check and rx->tx forwarding done
1392*5113495bSYour Name 				 * on host.
1393*5113495bSYour Name 				 */
1394*5113495bSYour Name 				pdev->rx_opt_proc = ol_rx_pn_check;
1395*5113495bSYour Name 			} else {
1396*5113495bSYour Name #define TRACESTR01 "invalid config: if rx PN check is on the host,"\
1397*5113495bSYour Name "rx->tx forwarding check needs to also be on the host"
1398*5113495bSYour Name 				QDF_TRACE(QDF_MODULE_ID_TXRX,
1399*5113495bSYour Name 					  QDF_TRACE_LEVEL_ERROR,
1400*5113495bSYour Name 					  "%s: %s", __func__, TRACESTR01);
1401*5113495bSYour Name #undef TRACESTR01
1402*5113495bSYour Name 				ret = -EINVAL;
1403*5113495bSYour Name 				goto control_init_fail;
1404*5113495bSYour Name 			}
1405*5113495bSYour Name 		} else {
1406*5113495bSYour Name 			/* PN check done on target */
1407*5113495bSYour Name 			if ((!ol_cfg_rx_fwd_disabled(pdev->ctrl_pdev)) &&
1408*5113495bSYour Name 			    ol_cfg_rx_fwd_check(pdev->ctrl_pdev)) {
1409*5113495bSYour Name 				/*
1410*5113495bSYour Name 				 * rx->tx forwarding done on host (possibly as
1411*5113495bSYour Name 				 * back-up for target-side primary rx->tx
1412*5113495bSYour Name 				 * forwarding)
1413*5113495bSYour Name 				 */
1414*5113495bSYour Name 				pdev->rx_opt_proc = ol_rx_fwd_check;
1415*5113495bSYour Name 			} else {
1416*5113495bSYour Name 				/*
1417*5113495bSYour Name 				 * rx->tx forwarding either done in target,
1418*5113495bSYour Name 				 * or not done at all
1419*5113495bSYour Name 				 */
1420*5113495bSYour Name 				pdev->rx_opt_proc = ol_rx_deliver;
1421*5113495bSYour Name 			}
1422*5113495bSYour Name 		}
1423*5113495bSYour Name 	}
1424*5113495bSYour Name 
1425*5113495bSYour Name 	/* initialize mutexes for tx desc alloc and peer lookup */
1426*5113495bSYour Name 	qdf_spinlock_create(&pdev->peer_ref_mutex);
1427*5113495bSYour Name 	qdf_spinlock_create(&pdev->rx.mutex);
1428*5113495bSYour Name 	qdf_spinlock_create(&pdev->last_real_peer_mutex);
1429*5113495bSYour Name 	qdf_spinlock_create(&pdev->peer_map_unmap_lock);
1430*5113495bSYour Name 	OL_TXRX_PEER_STATS_MUTEX_INIT(pdev);
1431*5113495bSYour Name 
1432*5113495bSYour Name 	if (OL_RX_REORDER_TRACE_ATTACH(pdev) != A_OK) {
1433*5113495bSYour Name 		ret = -ENOMEM;
1434*5113495bSYour Name 		goto reorder_trace_attach_fail;
1435*5113495bSYour Name 	}
1436*5113495bSYour Name 
1437*5113495bSYour Name 	if (OL_RX_PN_TRACE_ATTACH(pdev) != A_OK) {
1438*5113495bSYour Name 		ret = -ENOMEM;
1439*5113495bSYour Name 		goto pn_trace_attach_fail;
1440*5113495bSYour Name 	}
1441*5113495bSYour Name 
1442*5113495bSYour Name 	/*
1443*5113495bSYour Name 	 * WDI event attach
1444*5113495bSYour Name 	 */
1445*5113495bSYour Name 	wdi_event_attach(pdev);
1446*5113495bSYour Name 
1447*5113495bSYour Name 	/*
1448*5113495bSYour Name 	 * Initialize rx PN check characteristics for different security types.
1449*5113495bSYour Name 	 */
1450*5113495bSYour Name 	qdf_mem_zero(&pdev->rx_pn[0], sizeof(pdev->rx_pn));
1451*5113495bSYour Name 
1452*5113495bSYour Name 	/* TKIP: 48-bit TSC, CCMP: 48-bit PN */
1453*5113495bSYour Name 	pdev->rx_pn[htt_sec_type_tkip].len =
1454*5113495bSYour Name 		pdev->rx_pn[htt_sec_type_tkip_nomic].len =
1455*5113495bSYour Name 			pdev->rx_pn[htt_sec_type_aes_ccmp].len = 48;
1456*5113495bSYour Name 
1457*5113495bSYour Name 	pdev->rx_pn[htt_sec_type_aes_ccmp_256].len =
1458*5113495bSYour Name 		pdev->rx_pn[htt_sec_type_aes_gcmp].len =
1459*5113495bSYour Name 			pdev->rx_pn[htt_sec_type_aes_gcmp_256].len = 48;
1460*5113495bSYour Name 
1461*5113495bSYour Name 	pdev->rx_pn[htt_sec_type_tkip].cmp =
1462*5113495bSYour Name 		pdev->rx_pn[htt_sec_type_tkip_nomic].cmp =
1463*5113495bSYour Name 			pdev->rx_pn[htt_sec_type_aes_ccmp].cmp = ol_rx_pn_cmp48;
1464*5113495bSYour Name 
1465*5113495bSYour Name 	pdev->rx_pn[htt_sec_type_aes_ccmp_256].cmp =
1466*5113495bSYour Name 		pdev->rx_pn[htt_sec_type_aes_gcmp].cmp =
1467*5113495bSYour Name 		    pdev->rx_pn[htt_sec_type_aes_gcmp_256].cmp = ol_rx_pn_cmp48;
1468*5113495bSYour Name 
1469*5113495bSYour Name 	/* WAPI: 128-bit PN */
1470*5113495bSYour Name 	pdev->rx_pn[htt_sec_type_wapi].len = 128;
1471*5113495bSYour Name 	pdev->rx_pn[htt_sec_type_wapi].cmp = ol_rx_pn_wapi_cmp;
1472*5113495bSYour Name 
1473*5113495bSYour Name 	OL_RX_REORDER_TIMEOUT_INIT(pdev);
1474*5113495bSYour Name 
1475*5113495bSYour Name 	ol_txrx_dbg("Created pdev %pK", pdev);
1476*5113495bSYour Name 
1477*5113495bSYour Name 	pdev->cfg.host_addba = ol_cfg_host_addba(pdev->ctrl_pdev);
1478*5113495bSYour Name 
1479*5113495bSYour Name #ifdef QCA_SUPPORT_PEER_DATA_RX_RSSI
1480*5113495bSYour Name #define OL_TXRX_RSSI_UPDATE_SHIFT_DEFAULT 3
1481*5113495bSYour Name 
1482*5113495bSYour Name /* #if 1 -- TODO: clean this up */
1483*5113495bSYour Name #define OL_TXRX_RSSI_NEW_WEIGHT_DEFAULT	\
1484*5113495bSYour Name 	/* avg = 100% * new + 0% * old */ \
1485*5113495bSYour Name 	(1 << OL_TXRX_RSSI_UPDATE_SHIFT_DEFAULT)
1486*5113495bSYour Name /*
1487*5113495bSYour Name  * #else
1488*5113495bSYour Name  * #define OL_TXRX_RSSI_NEW_WEIGHT_DEFAULT
1489*5113495bSYour Name  *	//avg = 25% * new + 25% * old
1490*5113495bSYour Name  *	(1 << (OL_TXRX_RSSI_UPDATE_SHIFT_DEFAULT-2))
1491*5113495bSYour Name  * #endif
1492*5113495bSYour Name  */
1493*5113495bSYour Name 	pdev->rssi_update_shift = OL_TXRX_RSSI_UPDATE_SHIFT_DEFAULT;
1494*5113495bSYour Name 	pdev->rssi_new_weight = OL_TXRX_RSSI_NEW_WEIGHT_DEFAULT;
1495*5113495bSYour Name #endif
1496*5113495bSYour Name 
1497*5113495bSYour Name 	ol_txrx_local_peer_id_pool_init(pdev);
1498*5113495bSYour Name 
1499*5113495bSYour Name 	pdev->cfg.ll_pause_txq_limit =
1500*5113495bSYour Name 		ol_tx_cfg_max_tx_queue_depth_ll(pdev->ctrl_pdev);
1501*5113495bSYour Name 
1502*5113495bSYour Name 	/* TX flow control for peer who is in very bad link status */
1503*5113495bSYour Name 	ol_tx_badpeer_flow_cl_init(pdev);
1504*5113495bSYour Name 
1505*5113495bSYour Name #ifdef QCA_COMPUTE_TX_DELAY
1506*5113495bSYour Name 	qdf_mem_zero(&pdev->tx_delay, sizeof(pdev->tx_delay));
1507*5113495bSYour Name 	qdf_spinlock_create(&pdev->tx_delay.mutex);
1508*5113495bSYour Name 
1509*5113495bSYour Name 	/* initialize compute interval with 5 seconds (ESE default) */
1510*5113495bSYour Name 	pdev->tx_delay.avg_period_ticks = qdf_system_msecs_to_ticks(5000);
1511*5113495bSYour Name 	{
1512*5113495bSYour Name 		uint32_t bin_width_1000ticks;
1513*5113495bSYour Name 
1514*5113495bSYour Name 		bin_width_1000ticks =
1515*5113495bSYour Name 			qdf_system_msecs_to_ticks
1516*5113495bSYour Name 				(QCA_TX_DELAY_HIST_INTERNAL_BIN_WIDTH_MS
1517*5113495bSYour Name 				 * 1000);
1518*5113495bSYour Name 		/*
1519*5113495bSYour Name 		 * Compute a factor and shift that together are equal to the
1520*5113495bSYour Name 		 * inverse of the bin_width time, so that rather than dividing
1521*5113495bSYour Name 		 * by the bin width time, approximately the same result can be
1522*5113495bSYour Name 		 * obtained much more efficiently by a multiply + shift.
1523*5113495bSYour Name 		 * multiply_factor >> shift = 1 / bin_width_time, so
1524*5113495bSYour Name 		 * multiply_factor = (1 << shift) / bin_width_time.
1525*5113495bSYour Name 		 *
1526*5113495bSYour Name 		 * Pick the shift semi-arbitrarily.
1527*5113495bSYour Name 		 * If we knew statically what the bin_width would be, we could
1528*5113495bSYour Name 		 * choose a shift that minimizes the error.
1529*5113495bSYour Name 		 * Since the bin_width is determined dynamically, simply use a
1530*5113495bSYour Name 		 * shift that is about half of the uint32_t size.  This should
1531*5113495bSYour Name 		 * result in a relatively large multiplier value, which
1532*5113495bSYour Name 		 * minimizes error from rounding the multiplier to an integer.
1533*5113495bSYour Name 		 * The rounding error only becomes significant if the tick units
1534*5113495bSYour Name 		 * are on the order of 1 microsecond.  In most systems, it is
1535*5113495bSYour Name 		 * expected that the tick units will be relatively low-res,
1536*5113495bSYour Name 		 * on the order of 1 millisecond.  In such systems the rounding
1537*5113495bSYour Name 		 * error is negligible.
1538*5113495bSYour Name 		 * It would be more accurate to dynamically try out different
1539*5113495bSYour Name 		 * shifts and choose the one that results in the smallest
1540*5113495bSYour Name 		 * rounding error, but that extra level of fidelity is
1541*5113495bSYour Name 		 * not needed.
1542*5113495bSYour Name 		 */
1543*5113495bSYour Name 		pdev->tx_delay.hist_internal_bin_width_shift = 16;
1544*5113495bSYour Name 		pdev->tx_delay.hist_internal_bin_width_mult =
1545*5113495bSYour Name 			((1 << pdev->tx_delay.hist_internal_bin_width_shift) *
1546*5113495bSYour Name 			 1000 + (bin_width_1000ticks >> 1)) /
1547*5113495bSYour Name 			bin_width_1000ticks;
1548*5113495bSYour Name 	}
1549*5113495bSYour Name #endif /* QCA_COMPUTE_TX_DELAY */
1550*5113495bSYour Name 
1551*5113495bSYour Name 	/* Thermal Mitigation */
1552*5113495bSYour Name 	ol_tx_throttle_init(pdev);
1553*5113495bSYour Name 
1554*5113495bSYour Name 	ol_tso_seg_list_init(pdev, desc_pool_size);
1555*5113495bSYour Name 
1556*5113495bSYour Name 	ol_tso_num_seg_list_init(pdev, desc_pool_size);
1557*5113495bSYour Name 
1558*5113495bSYour Name 	ol_tx_register_flow_control(pdev);
1559*5113495bSYour Name 
1560*5113495bSYour Name 	return 0;            /* success */
1561*5113495bSYour Name 
1562*5113495bSYour Name pn_trace_attach_fail:
1563*5113495bSYour Name 	OL_RX_REORDER_TRACE_DETACH(pdev);
1564*5113495bSYour Name 
1565*5113495bSYour Name reorder_trace_attach_fail:
1566*5113495bSYour Name 	qdf_spinlock_destroy(&pdev->peer_ref_mutex);
1567*5113495bSYour Name 	qdf_spinlock_destroy(&pdev->rx.mutex);
1568*5113495bSYour Name 	qdf_spinlock_destroy(&pdev->last_real_peer_mutex);
1569*5113495bSYour Name 	qdf_spinlock_destroy(&pdev->peer_map_unmap_lock);
1570*5113495bSYour Name 	OL_TXRX_PEER_STATS_MUTEX_DESTROY(pdev);
1571*5113495bSYour Name 
1572*5113495bSYour Name control_init_fail:
1573*5113495bSYour Name desc_alloc_fail:
1574*5113495bSYour Name 	for (i = 0; i < fail_idx; i++)
1575*5113495bSYour Name 		htt_tx_desc_free(pdev->htt_pdev,
1576*5113495bSYour Name 			(ol_tx_desc_find(pdev, i))->htt_tx_desc);
1577*5113495bSYour Name 
1578*5113495bSYour Name 	qdf_mem_multi_pages_free(pdev->osdev,
1579*5113495bSYour Name 		&pdev->tx_desc.desc_pages, 0, true);
1580*5113495bSYour Name 
1581*5113495bSYour Name page_alloc_fail:
1582*5113495bSYour Name 	if (ol_cfg_ipa_uc_offload_enabled(pdev->ctrl_pdev))
1583*5113495bSYour Name 		htt_ipa_uc_detach(pdev->htt_pdev);
1584*5113495bSYour Name uc_attach_fail:
1585*5113495bSYour Name 	htt_detach(pdev->htt_pdev);
1586*5113495bSYour Name htt_attach_fail:
1587*5113495bSYour Name 	ol_tx_desc_dup_detect_deinit(pdev);
1588*5113495bSYour Name ol_attach_fail:
1589*5113495bSYour Name 	return ret;            /* fail */
1590*5113495bSYour Name }
1591*5113495bSYour Name 
1592*5113495bSYour Name /**
1593*5113495bSYour Name  * ol_txrx_pdev_attach_target() - send target configuration
1594*5113495bSYour Name  *
1595*5113495bSYour Name  * @soc_hdl - data path soc handle
1596*5113495bSYour Name  * @pdev_id - device instance id
1597*5113495bSYour Name  *
1598*5113495bSYour Name  * The majority of the data SW setup are done by the pdev_attach
1599*5113495bSYour Name  * functions, but this function completes the data SW setup by
1600*5113495bSYour Name  * sending datapath configuration messages to the target.
1601*5113495bSYour Name  *
1602*5113495bSYour Name  * Return: 0 - success 1 - failure
1603*5113495bSYour Name  */
ol_txrx_pdev_attach_target(struct cdp_soc_t * soc_hdl,uint8_t pdev_id)1604*5113495bSYour Name static int ol_txrx_pdev_attach_target(struct cdp_soc_t *soc_hdl,
1605*5113495bSYour Name 				      uint8_t pdev_id)
1606*5113495bSYour Name {
1607*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
1608*5113495bSYour Name 	ol_txrx_pdev_handle pdev = ol_txrx_get_pdev_from_pdev_id(soc, pdev_id);
1609*5113495bSYour Name 
1610*5113495bSYour Name 	if (!pdev)
1611*5113495bSYour Name 		return QDF_STATUS_E_FAULT;
1612*5113495bSYour Name 
1613*5113495bSYour Name 	return htt_attach_target(pdev->htt_pdev) == QDF_STATUS_SUCCESS ? 0:1;
1614*5113495bSYour Name }
1615*5113495bSYour Name 
1616*5113495bSYour Name /**
1617*5113495bSYour Name  * ol_tx_free_descs_inuse - free tx descriptors which are in use
1618*5113495bSYour Name  * @pdev - the physical device for which tx descs need to be freed
1619*5113495bSYour Name  *
1620*5113495bSYour Name  * Cycle through the list of TX descriptors (for a pdev) which are in use,
1621*5113495bSYour Name  * for which TX completion has not been received and free them. Should be
1622*5113495bSYour Name  * called only when the interrupts are off and all lower layer RX is stopped.
1623*5113495bSYour Name  * Otherwise there may be a race condition with TX completions.
1624*5113495bSYour Name  *
1625*5113495bSYour Name  * Return: None
1626*5113495bSYour Name  */
ol_tx_free_descs_inuse(ol_txrx_pdev_handle pdev)1627*5113495bSYour Name static void ol_tx_free_descs_inuse(ol_txrx_pdev_handle pdev)
1628*5113495bSYour Name {
1629*5113495bSYour Name 	int i;
1630*5113495bSYour Name 	void *htt_tx_desc;
1631*5113495bSYour Name 	struct ol_tx_desc_t *tx_desc;
1632*5113495bSYour Name 	int num_freed_tx_desc = 0;
1633*5113495bSYour Name 
1634*5113495bSYour Name 	for (i = 0; i < pdev->tx_desc.pool_size; i++) {
1635*5113495bSYour Name 		tx_desc = ol_tx_desc_find(pdev, i);
1636*5113495bSYour Name 		/*
1637*5113495bSYour Name 		 * Confirm that each tx descriptor is "empty", i.e. it has
1638*5113495bSYour Name 		 * no tx frame attached.
1639*5113495bSYour Name 		 * In particular, check that there are no frames that have
1640*5113495bSYour Name 		 * been given to the target to transmit, for which the
1641*5113495bSYour Name 		 * target has never provided a response.
1642*5113495bSYour Name 		 *
1643*5113495bSYour Name 		 * Rome supports mgmt Tx via HTT interface, not via WMI.
1644*5113495bSYour Name 		 * When mgmt frame is sent, 2 tx desc is allocated:
1645*5113495bSYour Name 		 * mgmt_txrx_desc is allocated in wlan_mgmt_txrx_mgmt_frame_tx,
1646*5113495bSYour Name 		 * ol_tx_desc is allocated in ol_txrx_mgmt_send_ext.
1647*5113495bSYour Name 		 * They point to same net buffer.
1648*5113495bSYour Name 		 * net buffer is mapped in htt_tx_desc_init.
1649*5113495bSYour Name 		 *
1650*5113495bSYour Name 		 * When SSR during Rome STA connected, deauth frame is sent,
1651*5113495bSYour Name 		 * but no tx complete since firmware hung already.
1652*5113495bSYour Name 		 * Pending mgmt frames are unmapped and freed when destroy
1653*5113495bSYour Name 		 * vdev.
1654*5113495bSYour Name 		 * hdd_reset_all_adapters->hdd_stop_adapter->hdd_vdev_destroy
1655*5113495bSYour Name 		 * ->wma_handle_vdev_detach->wlan_mgmt_txrx_vdev_drain
1656*5113495bSYour Name 		 * ->wma_mgmt_frame_fill_peer_cb
1657*5113495bSYour Name 		 * ->mgmt_txrx_tx_completion_handler.
1658*5113495bSYour Name 		 *
1659*5113495bSYour Name 		 * Don't need unmap and free net buffer of mgmt frames again
1660*5113495bSYour Name 		 * during data path clean up, just free ol_tx_desc.
1661*5113495bSYour Name 		 * hdd_wlan_stop_modules->cds_post_disable->cdp_pdev_pre_detach
1662*5113495bSYour Name 		 * ->ol_txrx_pdev_pre_detach->ol_tx_free_descs_inuse.
1663*5113495bSYour Name 		 */
1664*5113495bSYour Name 		if (qdf_atomic_read(&tx_desc->ref_cnt)) {
1665*5113495bSYour Name 			if (!ol_tx_get_is_mgmt_over_wmi_enabled() &&
1666*5113495bSYour Name 			    tx_desc->pkt_type >= OL_TXRX_MGMT_TYPE_BASE) {
1667*5113495bSYour Name 				qdf_atomic_init(&tx_desc->ref_cnt);
1668*5113495bSYour Name 				ol_txrx_dbg("Pending mgmt frames nbuf unmapped and freed already when vdev destroyed");
1669*5113495bSYour Name 				/* free the tx desc */
1670*5113495bSYour Name 				ol_tx_desc_free(pdev, tx_desc);
1671*5113495bSYour Name 			} else {
1672*5113495bSYour Name 				ol_txrx_dbg("Warning: freeing tx frame (no compltn)");
1673*5113495bSYour Name 				ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 1);
1674*5113495bSYour Name 			}
1675*5113495bSYour Name 			num_freed_tx_desc++;
1676*5113495bSYour Name 		}
1677*5113495bSYour Name 		htt_tx_desc = tx_desc->htt_tx_desc;
1678*5113495bSYour Name 		htt_tx_desc_free(pdev->htt_pdev, htt_tx_desc);
1679*5113495bSYour Name 	}
1680*5113495bSYour Name 
1681*5113495bSYour Name 	if (num_freed_tx_desc)
1682*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
1683*5113495bSYour Name 		"freed %d tx frames for which no resp from target",
1684*5113495bSYour Name 		num_freed_tx_desc);
1685*5113495bSYour Name 
1686*5113495bSYour Name }
1687*5113495bSYour Name 
1688*5113495bSYour Name /**
1689*5113495bSYour Name  * ol_txrx_pdev_pre_detach() - detach the data SW state
1690*5113495bSYour Name  * @soc_hdl - datapath soc handle
1691*5113495bSYour Name  * @pdev_id - the data physical device id being removed
1692*5113495bSYour Name  * @force - delete the pdev (and its vdevs and peers) even if
1693*5113495bSYour Name  * there are outstanding references by the target to the vdevs
1694*5113495bSYour Name  * and peers within the pdev
1695*5113495bSYour Name  *
1696*5113495bSYour Name  * This function is used when the WLAN driver is being removed to
1697*5113495bSYour Name  * detach the host data component within the driver.
1698*5113495bSYour Name  *
1699*5113495bSYour Name  * Return: none
1700*5113495bSYour Name  */
ol_txrx_pdev_pre_detach(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,int force)1701*5113495bSYour Name static void ol_txrx_pdev_pre_detach(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
1702*5113495bSYour Name 					  int force)
1703*5113495bSYour Name {
1704*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
1705*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev = ol_txrx_get_pdev_from_pdev_id(soc,
1706*5113495bSYour Name 								    pdev_id);
1707*5113495bSYour Name 
1708*5113495bSYour Name 	/* preconditions */
1709*5113495bSYour Name 	TXRX_ASSERT2(pdev);
1710*5113495bSYour Name 
1711*5113495bSYour Name 	/* check that the pdev has no vdevs allocated */
1712*5113495bSYour Name 	TXRX_ASSERT1(TAILQ_EMPTY(&pdev->vdev_list));
1713*5113495bSYour Name 
1714*5113495bSYour Name #ifdef QCA_SUPPORT_TX_THROTTLE
1715*5113495bSYour Name 	/* Thermal Mitigation */
1716*5113495bSYour Name 	qdf_timer_stop(&pdev->tx_throttle.phase_timer);
1717*5113495bSYour Name 	qdf_timer_free(&pdev->tx_throttle.phase_timer);
1718*5113495bSYour Name #ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
1719*5113495bSYour Name 	qdf_timer_stop(&pdev->tx_throttle.tx_timer);
1720*5113495bSYour Name 	qdf_timer_free(&pdev->tx_throttle.tx_timer);
1721*5113495bSYour Name #endif
1722*5113495bSYour Name #endif
1723*5113495bSYour Name 
1724*5113495bSYour Name 	if (force) {
1725*5113495bSYour Name 		/*
1726*5113495bSYour Name 		 * The assertion above confirms that all vdevs within this pdev
1727*5113495bSYour Name 		 * were detached.  However, they may not have actually been
1728*5113495bSYour Name 		 * deleted.
1729*5113495bSYour Name 		 * If the vdev had peers which never received a PEER_UNMAP msg
1730*5113495bSYour Name 		 * from the target, then there are still zombie peer objects,
1731*5113495bSYour Name 		 * and the vdev parents of the zombie peers are also zombies,
1732*5113495bSYour Name 		 * hanging around until their final peer gets deleted.
1733*5113495bSYour Name 		 * Go through the peer hash table and delete any peers left.
1734*5113495bSYour Name 		 * As a side effect, this will complete the deletion of any
1735*5113495bSYour Name 		 * vdevs that are waiting for their peers to finish deletion.
1736*5113495bSYour Name 		 */
1737*5113495bSYour Name 		ol_txrx_dbg("Force delete for pdev %pK",
1738*5113495bSYour Name 			   pdev);
1739*5113495bSYour Name 		ol_txrx_peer_find_hash_erase(pdev);
1740*5113495bSYour Name 		ol_txrx_peer_free_inactive_list(pdev);
1741*5113495bSYour Name 	}
1742*5113495bSYour Name 
1743*5113495bSYour Name 	/* to get flow pool status before freeing descs */
1744*5113495bSYour Name 	ol_tx_dump_flow_pool_info(cds_get_context(QDF_MODULE_ID_SOC));
1745*5113495bSYour Name 	ol_tx_free_descs_inuse(pdev);
1746*5113495bSYour Name 	ol_tx_deregister_flow_control(pdev);
1747*5113495bSYour Name 
1748*5113495bSYour Name 	/*
1749*5113495bSYour Name 	 * ol_tso_seg_list_deinit should happen after
1750*5113495bSYour Name 	 * ol_tx_deinit_tx_desc_inuse as it tries to access the tso seg freelist
1751*5113495bSYour Name 	 * which is being de-initilized in ol_tso_seg_list_deinit
1752*5113495bSYour Name 	 */
1753*5113495bSYour Name 	ol_tso_seg_list_deinit(pdev);
1754*5113495bSYour Name 	ol_tso_num_seg_list_deinit(pdev);
1755*5113495bSYour Name 
1756*5113495bSYour Name 	/* Stop the communication between HTT and target at first */
1757*5113495bSYour Name 	htt_detach_target(pdev->htt_pdev);
1758*5113495bSYour Name 
1759*5113495bSYour Name 	qdf_mem_multi_pages_free(pdev->osdev,
1760*5113495bSYour Name 		&pdev->tx_desc.desc_pages, 0, true);
1761*5113495bSYour Name 	pdev->tx_desc.freelist = NULL;
1762*5113495bSYour Name 
1763*5113495bSYour Name 	/* Detach micro controller data path offload resource */
1764*5113495bSYour Name 	if (ol_cfg_ipa_uc_offload_enabled(pdev->ctrl_pdev))
1765*5113495bSYour Name 		htt_ipa_uc_detach(pdev->htt_pdev);
1766*5113495bSYour Name 
1767*5113495bSYour Name 	htt_detach(pdev->htt_pdev);
1768*5113495bSYour Name 	ol_tx_desc_dup_detect_deinit(pdev);
1769*5113495bSYour Name 
1770*5113495bSYour Name 	qdf_spinlock_destroy(&pdev->peer_ref_mutex);
1771*5113495bSYour Name 	qdf_spinlock_destroy(&pdev->last_real_peer_mutex);
1772*5113495bSYour Name 	qdf_spinlock_destroy(&pdev->rx.mutex);
1773*5113495bSYour Name 	qdf_spinlock_destroy(&pdev->peer_map_unmap_lock);
1774*5113495bSYour Name #ifdef QCA_SUPPORT_TX_THROTTLE
1775*5113495bSYour Name 	/* Thermal Mitigation */
1776*5113495bSYour Name 	qdf_spinlock_destroy(&pdev->tx_throttle.mutex);
1777*5113495bSYour Name #endif
1778*5113495bSYour Name 
1779*5113495bSYour Name 	/* TX flow control for peer who is in very bad link status */
1780*5113495bSYour Name 	ol_tx_badpeer_flow_cl_deinit(pdev);
1781*5113495bSYour Name 
1782*5113495bSYour Name 	OL_TXRX_PEER_STATS_MUTEX_DESTROY(pdev);
1783*5113495bSYour Name 
1784*5113495bSYour Name 	OL_RX_REORDER_TRACE_DETACH(pdev);
1785*5113495bSYour Name 	OL_RX_PN_TRACE_DETACH(pdev);
1786*5113495bSYour Name 
1787*5113495bSYour Name 	htt_pktlogmod_exit(pdev);
1788*5113495bSYour Name 
1789*5113495bSYour Name 	/*
1790*5113495bSYour Name 	 * WDI event detach
1791*5113495bSYour Name 	 */
1792*5113495bSYour Name 	wdi_event_detach(pdev);
1793*5113495bSYour Name 
1794*5113495bSYour Name 	ol_txrx_local_peer_id_cleanup(pdev);
1795*5113495bSYour Name 
1796*5113495bSYour Name #ifdef QCA_COMPUTE_TX_DELAY
1797*5113495bSYour Name 	qdf_spinlock_destroy(&pdev->tx_delay.mutex);
1798*5113495bSYour Name #endif
1799*5113495bSYour Name 
1800*5113495bSYour Name 	return;
1801*5113495bSYour Name }
1802*5113495bSYour Name 
1803*5113495bSYour Name /**
1804*5113495bSYour Name  * ol_txrx_pdev_detach() - delete the data SW state
1805*5113495bSYour Name  * @soc_hdl - data path soc handle
1806*5113495bSYour Name  * @pdev_id - device instance id
1807*5113495bSYour Name  * @force - delete the pdev (and its vdevs and peers) even if
1808*5113495bSYour Name  * there are outstanding references by the target to the vdevs
1809*5113495bSYour Name  * and peers within the pdev
1810*5113495bSYour Name  *
1811*5113495bSYour Name  * This function is used when the WLAN driver is being removed to
1812*5113495bSYour Name  * remove the host data component within the driver.
1813*5113495bSYour Name  * All virtual devices within the physical device need to be deleted
1814*5113495bSYour Name  * (ol_txrx_vdev_detach) before the physical device itself is deleted.
1815*5113495bSYour Name  *
1816*5113495bSYour Name  * Return: Success or Failure
1817*5113495bSYour Name  */
ol_txrx_pdev_detach(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,int force)1818*5113495bSYour Name static QDF_STATUS ol_txrx_pdev_detach(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
1819*5113495bSYour Name 				      int force)
1820*5113495bSYour Name {
1821*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
1822*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev = ol_txrx_get_pdev_from_pdev_id(soc,
1823*5113495bSYour Name 								    pdev_id);
1824*5113495bSYour Name 	struct ol_txrx_stats_req_internal *req, *temp_req;
1825*5113495bSYour Name 	int i = 0;
1826*5113495bSYour Name 
1827*5113495bSYour Name 	if (!soc) {
1828*5113495bSYour Name 		ol_txrx_err("soc is NULL");
1829*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
1830*5113495bSYour Name 	}
1831*5113495bSYour Name 
1832*5113495bSYour Name 	/*checking to ensure txrx pdev structure is not NULL */
1833*5113495bSYour Name 	if (!pdev) {
1834*5113495bSYour Name 		ol_txrx_err("pdev is NULL");
1835*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
1836*5113495bSYour Name 	}
1837*5113495bSYour Name 
1838*5113495bSYour Name 	qdf_spin_lock_bh(&pdev->req_list_spinlock);
1839*5113495bSYour Name 	if (pdev->req_list_depth > 0)
1840*5113495bSYour Name 		ol_txrx_err(
1841*5113495bSYour Name 			"Warning: the txrx req list is not empty, depth=%d",
1842*5113495bSYour Name 			pdev->req_list_depth
1843*5113495bSYour Name 			);
1844*5113495bSYour Name 	TAILQ_FOREACH_SAFE(req, &pdev->req_list, req_list_elem, temp_req) {
1845*5113495bSYour Name 		TAILQ_REMOVE(&pdev->req_list, req, req_list_elem);
1846*5113495bSYour Name 		pdev->req_list_depth--;
1847*5113495bSYour Name 		ol_txrx_err(
1848*5113495bSYour Name 			"%d: %pK,verbose(%d), concise(%d), up_m(0x%x), reset_m(0x%x)",
1849*5113495bSYour Name 			i++,
1850*5113495bSYour Name 			req,
1851*5113495bSYour Name 			req->base.print.verbose,
1852*5113495bSYour Name 			req->base.print.concise,
1853*5113495bSYour Name 			req->base.stats_type_upload_mask,
1854*5113495bSYour Name 			req->base.stats_type_reset_mask
1855*5113495bSYour Name 			);
1856*5113495bSYour Name 		qdf_mem_free(req);
1857*5113495bSYour Name 	}
1858*5113495bSYour Name 	qdf_spin_unlock_bh(&pdev->req_list_spinlock);
1859*5113495bSYour Name 
1860*5113495bSYour Name 	qdf_spinlock_destroy(&pdev->req_list_spinlock);
1861*5113495bSYour Name 	qdf_spinlock_destroy(&pdev->tx_mutex);
1862*5113495bSYour Name 
1863*5113495bSYour Name 	OL_RX_REORDER_TIMEOUT_CLEANUP(pdev);
1864*5113495bSYour Name 
1865*5113495bSYour Name 	if (pdev->cfg.is_high_latency)
1866*5113495bSYour Name 		ol_tx_sched_detach(pdev);
1867*5113495bSYour Name 
1868*5113495bSYour Name 	htt_deregister_rx_pkt_dump_callback(pdev->htt_pdev);
1869*5113495bSYour Name 
1870*5113495bSYour Name 	htt_pdev_free(pdev->htt_pdev);
1871*5113495bSYour Name 	ol_txrx_peer_find_detach(pdev);
1872*5113495bSYour Name 	ol_txrx_tso_stats_deinit(pdev);
1873*5113495bSYour Name 	ol_txrx_fw_stats_desc_pool_deinit(pdev);
1874*5113495bSYour Name 
1875*5113495bSYour Name 	ol_txrx_pdev_txq_log_destroy(pdev);
1876*5113495bSYour Name 	ol_txrx_pdev_grp_stat_destroy(pdev);
1877*5113495bSYour Name 
1878*5113495bSYour Name 	ol_txrx_debugfs_exit(pdev);
1879*5113495bSYour Name 	ol_unregister_peer_recovery_notifier();
1880*5113495bSYour Name 
1881*5113495bSYour Name 	soc->pdev_list[pdev->id] = NULL;
1882*5113495bSYour Name 	qdf_mem_free(pdev);
1883*5113495bSYour Name 
1884*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
1885*5113495bSYour Name }
1886*5113495bSYour Name 
1887*5113495bSYour Name #if defined(QCA_HL_NETDEV_FLOW_CONTROL)
1888*5113495bSYour Name 
1889*5113495bSYour Name /**
1890*5113495bSYour Name  * ol_txrx_vdev_per_vdev_tx_desc_init() - initialise per vdev tx desc count
1891*5113495bSYour Name  * related variables.
1892*5113495bSYour Name  * @vdev: the virtual device object
1893*5113495bSYour Name  *
1894*5113495bSYour Name  * Return: None
1895*5113495bSYour Name  */
1896*5113495bSYour Name static inline void
ol_txrx_vdev_per_vdev_tx_desc_init(struct ol_txrx_vdev_t * vdev)1897*5113495bSYour Name ol_txrx_vdev_per_vdev_tx_desc_init(struct ol_txrx_vdev_t *vdev)
1898*5113495bSYour Name {
1899*5113495bSYour Name 	qdf_atomic_init(&vdev->tx_desc_count);
1900*5113495bSYour Name 	vdev->tx_desc_limit = 0;
1901*5113495bSYour Name 	vdev->queue_restart_th = 0;
1902*5113495bSYour Name 	vdev->prio_q_paused = 0;
1903*5113495bSYour Name 	vdev->queue_stop_th = 0;
1904*5113495bSYour Name }
1905*5113495bSYour Name #else
1906*5113495bSYour Name 
1907*5113495bSYour Name static inline void
ol_txrx_vdev_per_vdev_tx_desc_init(struct ol_txrx_vdev_t * vdev)1908*5113495bSYour Name ol_txrx_vdev_per_vdev_tx_desc_init(struct ol_txrx_vdev_t *vdev)
1909*5113495bSYour Name {
1910*5113495bSYour Name }
1911*5113495bSYour Name #endif /* QCA_HL_NETDEV_FLOW_CONTROL */
1912*5113495bSYour Name 
1913*5113495bSYour Name /**
1914*5113495bSYour Name  * ol_txrx_vdev_attach - Allocate and initialize the data object
1915*5113495bSYour Name  * for a new virtual device.
1916*5113495bSYour Name  *
1917*5113495bSYour Name  * @@soc_hdl - data path soc handle
1918*5113495bSYour Name  * @pdev_id - physical device instance id
1919*5113495bSYour Name  * @vdev_mac_addr - the MAC address of the virtual device
1920*5113495bSYour Name  * @vdev_id - the ID used to identify the virtual device to the target
1921*5113495bSYour Name  * @op_mode - whether this virtual device is operating as an AP,
1922*5113495bSYour Name  * an IBSS, or a STA
1923*5113495bSYour Name  * @subtype:  Subtype of the operating vdev
1924*5113495bSYour Name  *
1925*5113495bSYour Name  * Return: QDF_STATUS_SUCCESS on success,
1926*5113495bSYour Name 	   QDF error code on failure
1927*5113495bSYour Name  */
1928*5113495bSYour Name static QDF_STATUS
ol_txrx_vdev_attach(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,struct cdp_vdev_info * vdev_info)1929*5113495bSYour Name ol_txrx_vdev_attach(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
1930*5113495bSYour Name 		    struct cdp_vdev_info *vdev_info)
1931*5113495bSYour Name {
1932*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
1933*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev = ol_txrx_get_pdev_from_pdev_id(soc,
1934*5113495bSYour Name 								    pdev_id);
1935*5113495bSYour Name 	uint8_t *vdev_mac_addr = vdev_info->vdev_mac_addr;
1936*5113495bSYour Name 	uint8_t vdev_id = vdev_info->vdev_id;
1937*5113495bSYour Name 	enum wlan_op_mode op_mode = vdev_info->op_mode;
1938*5113495bSYour Name 	enum wlan_op_subtype subtype = vdev_info->subtype;
1939*5113495bSYour Name 	enum QDF_OPMODE qdf_opmode = vdev_info->qdf_opmode;
1940*5113495bSYour Name 
1941*5113495bSYour Name 	struct ol_txrx_vdev_t *vdev;
1942*5113495bSYour Name 	QDF_STATUS qdf_status;
1943*5113495bSYour Name 
1944*5113495bSYour Name 	/* preconditions */
1945*5113495bSYour Name 	TXRX_ASSERT2(pdev);
1946*5113495bSYour Name 	TXRX_ASSERT2(vdev_mac_addr);
1947*5113495bSYour Name 
1948*5113495bSYour Name 	if (qdf_unlikely(!soc)) {
1949*5113495bSYour Name 		ol_txrx_err("soc is NULL");
1950*5113495bSYour Name 		return QDF_STATUS_E_INVAL;
1951*5113495bSYour Name 	}
1952*5113495bSYour Name 
1953*5113495bSYour Name 	vdev = qdf_mem_malloc(sizeof(*vdev));
1954*5113495bSYour Name 	if (!vdev)
1955*5113495bSYour Name 		return QDF_STATUS_E_NOMEM;    /* failure */
1956*5113495bSYour Name 
1957*5113495bSYour Name 	/* store provided params */
1958*5113495bSYour Name 	vdev->pdev = pdev;
1959*5113495bSYour Name 	vdev->vdev_id = vdev_id;
1960*5113495bSYour Name 	vdev->opmode = op_mode;
1961*5113495bSYour Name 	vdev->subtype = subtype;
1962*5113495bSYour Name 	vdev->qdf_opmode = qdf_opmode;
1963*5113495bSYour Name 
1964*5113495bSYour Name 	vdev->delete.pending = 0;
1965*5113495bSYour Name 	vdev->safemode = 0;
1966*5113495bSYour Name 	vdev->drop_unenc = 1;
1967*5113495bSYour Name 	vdev->num_filters = 0;
1968*5113495bSYour Name 	vdev->fwd_tx_packets = 0;
1969*5113495bSYour Name 	vdev->fwd_rx_packets = 0;
1970*5113495bSYour Name 
1971*5113495bSYour Name 	ol_txrx_vdev_per_vdev_tx_desc_init(vdev);
1972*5113495bSYour Name 
1973*5113495bSYour Name 	qdf_mem_copy(&vdev->mac_addr.raw[0], vdev_mac_addr,
1974*5113495bSYour Name 		     QDF_MAC_ADDR_SIZE);
1975*5113495bSYour Name 
1976*5113495bSYour Name 	TAILQ_INIT(&vdev->peer_list);
1977*5113495bSYour Name 	vdev->last_real_peer = NULL;
1978*5113495bSYour Name 
1979*5113495bSYour Name #ifdef QCA_IBSS_SUPPORT
1980*5113495bSYour Name 	vdev->ibss_peer_num = 0;
1981*5113495bSYour Name 	vdev->ibss_peer_heart_beat_timer = 0;
1982*5113495bSYour Name #endif
1983*5113495bSYour Name 
1984*5113495bSYour Name 	ol_txrx_vdev_txqs_init(vdev);
1985*5113495bSYour Name 
1986*5113495bSYour Name 	qdf_spinlock_create(&vdev->ll_pause.mutex);
1987*5113495bSYour Name 	vdev->ll_pause.paused_reason = 0;
1988*5113495bSYour Name 	vdev->ll_pause.txq.head = vdev->ll_pause.txq.tail = NULL;
1989*5113495bSYour Name 	vdev->ll_pause.txq.depth = 0;
1990*5113495bSYour Name 	qdf_atomic_init(&vdev->delete.detaching);
1991*5113495bSYour Name 	qdf_timer_init(pdev->osdev,
1992*5113495bSYour Name 			       &vdev->ll_pause.timer,
1993*5113495bSYour Name 			       ol_tx_vdev_ll_pause_queue_send, vdev,
1994*5113495bSYour Name 			       QDF_TIMER_TYPE_SW);
1995*5113495bSYour Name 	qdf_atomic_init(&vdev->os_q_paused);
1996*5113495bSYour Name 	qdf_atomic_set(&vdev->os_q_paused, 0);
1997*5113495bSYour Name 	vdev->tx_fl_lwm = 0;
1998*5113495bSYour Name 	vdev->tx_fl_hwm = 0;
1999*5113495bSYour Name 	vdev->rx = NULL;
2000*5113495bSYour Name 	vdev->wait_on_peer_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
2001*5113495bSYour Name 	qdf_mem_zero(&vdev->last_peer_mac_addr,
2002*5113495bSYour Name 			sizeof(union ol_txrx_align_mac_addr_t));
2003*5113495bSYour Name 	qdf_spinlock_create(&vdev->flow_control_lock);
2004*5113495bSYour Name 	vdev->osif_flow_control_cb = NULL;
2005*5113495bSYour Name 	vdev->osif_flow_control_is_pause = NULL;
2006*5113495bSYour Name 	vdev->osif_fc_ctx = NULL;
2007*5113495bSYour Name 
2008*5113495bSYour Name 	vdev->txrx_stats.txack_success = 0;
2009*5113495bSYour Name 	vdev->txrx_stats.txack_failed = 0;
2010*5113495bSYour Name 
2011*5113495bSYour Name 	vdev->bundling_required = false;
2012*5113495bSYour Name 	qdf_spinlock_create(&vdev->bundle_queue.mutex);
2013*5113495bSYour Name 	vdev->bundle_queue.txq.head = NULL;
2014*5113495bSYour Name 	vdev->bundle_queue.txq.tail = NULL;
2015*5113495bSYour Name 	vdev->bundle_queue.txq.depth = 0;
2016*5113495bSYour Name 	qdf_timer_init(
2017*5113495bSYour Name 		pdev->osdev,
2018*5113495bSYour Name 		&vdev->bundle_queue.timer,
2019*5113495bSYour Name 		ol_tx_hl_vdev_bundle_timer,
2020*5113495bSYour Name 		vdev, QDF_TIMER_TYPE_SW);
2021*5113495bSYour Name 
2022*5113495bSYour Name 	/* Default MAX Q depth for every VDEV */
2023*5113495bSYour Name 	vdev->ll_pause.max_q_depth =
2024*5113495bSYour Name 		ol_tx_cfg_max_tx_queue_depth_ll(vdev->pdev->ctrl_pdev);
2025*5113495bSYour Name 	qdf_status = qdf_event_create(&vdev->wait_delete_comp);
2026*5113495bSYour Name 
2027*5113495bSYour Name 	ol_txrx_vdev_init_tcp_del_ack(vdev);
2028*5113495bSYour Name 
2029*5113495bSYour Name 	/* add this vdev into the pdev's list */
2030*5113495bSYour Name 	TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
2031*5113495bSYour Name 	if (QDF_GLOBAL_MONITOR_MODE == cds_get_conparam())
2032*5113495bSYour Name 		pdev->monitor_vdev = vdev;
2033*5113495bSYour Name 
2034*5113495bSYour Name 	ol_txrx_hl_tdls_flag_reset(soc_hdl, vdev_id, false);
2035*5113495bSYour Name 
2036*5113495bSYour Name 	ol_txrx_dbg(
2037*5113495bSYour Name 		   "Created vdev %pK ("QDF_MAC_ADDR_FMT")",
2038*5113495bSYour Name 		   vdev,
2039*5113495bSYour Name 		   QDF_MAC_ADDR_REF(vdev->mac_addr.raw));
2040*5113495bSYour Name 
2041*5113495bSYour Name 	/*
2042*5113495bSYour Name 	 * We've verified that htt_op_mode == wlan_op_mode,
2043*5113495bSYour Name 	 * so no translation is needed.
2044*5113495bSYour Name 	 */
2045*5113495bSYour Name 	htt_vdev_attach(pdev->htt_pdev, vdev_id, op_mode);
2046*5113495bSYour Name 
2047*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
2048*5113495bSYour Name }
2049*5113495bSYour Name 
2050*5113495bSYour Name /**
2051*5113495bSYour Name  * ol_txrx_vdev_register - Link a vdev's data object with the
2052*5113495bSYour Name  * matching OS shim vdev object.
2053*5113495bSYour Name  *
2054*5113495bSYour Name  * @soc_hdl: datapath soc handle
2055*5113495bSYour Name  * @vdev_id: the virtual device's id
2056*5113495bSYour Name  * @osif_vdev: the virtual device's OS shim object
2057*5113495bSYour Name  * @txrx_ops: (pointers to)functions used for tx and rx data xfer
2058*5113495bSYour Name  *
2059*5113495bSYour Name  *  The data object for a virtual device is created by the
2060*5113495bSYour Name  *  function ol_txrx_vdev_attach.  However, rather than fully
2061*5113495bSYour Name  *  linking the data vdev object with the vdev objects from the
2062*5113495bSYour Name  *  other subsystems that the data vdev object interacts with,
2063*5113495bSYour Name  *  the txrx_vdev_attach function focuses primarily on creating
2064*5113495bSYour Name  *  the data vdev object. After the creation of both the data
2065*5113495bSYour Name  *  vdev object and the OS shim vdev object, this
2066*5113495bSYour Name  *  txrx_osif_vdev_attach function is used to connect the two
2067*5113495bSYour Name  *  vdev objects, so the data SW can use the OS shim vdev handle
2068*5113495bSYour Name  *  when passing rx data received by a vdev up to the OS shim.
2069*5113495bSYour Name  */
ol_txrx_vdev_register(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,ol_osif_vdev_handle osif_vdev,struct ol_txrx_ops * txrx_ops)2070*5113495bSYour Name static QDF_STATUS ol_txrx_vdev_register(struct cdp_soc_t *soc_hdl,
2071*5113495bSYour Name 					uint8_t vdev_id,
2072*5113495bSYour Name 					ol_osif_vdev_handle osif_vdev,
2073*5113495bSYour Name 					struct ol_txrx_ops *txrx_ops)
2074*5113495bSYour Name {
2075*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
2076*5113495bSYour Name 	ol_txrx_vdev_handle vdev = ol_txrx_get_vdev_from_soc_vdev_id(soc,
2077*5113495bSYour Name 								     vdev_id);
2078*5113495bSYour Name 
2079*5113495bSYour Name 	if (qdf_unlikely(!vdev) || qdf_unlikely(!txrx_ops)) {
2080*5113495bSYour Name 		qdf_print("vdev/txrx_ops is NULL!");
2081*5113495bSYour Name 		qdf_assert(0);
2082*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
2083*5113495bSYour Name 	}
2084*5113495bSYour Name 
2085*5113495bSYour Name 	vdev->osif_dev = osif_vdev;
2086*5113495bSYour Name 	vdev->rx = txrx_ops->rx.rx;
2087*5113495bSYour Name 	vdev->stats_rx = txrx_ops->rx.stats_rx;
2088*5113495bSYour Name 	vdev->tx_comp = txrx_ops->tx.tx_comp;
2089*5113495bSYour Name 	vdev->vdev_del_notify = txrx_ops->vdev_del_notify;
2090*5113495bSYour Name 	txrx_ops->tx.tx = ol_tx_data;
2091*5113495bSYour Name 
2092*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
2093*5113495bSYour Name }
2094*5113495bSYour Name 
2095*5113495bSYour Name /**
2096*5113495bSYour Name  * ol_txrx_set_privacy_filters - set the privacy filter
2097*5113495bSYour Name  * @vdev - the data virtual device object
2098*5113495bSYour Name  * @filter - filters to be set
2099*5113495bSYour Name  * @num - the number of filters
2100*5113495bSYour Name  *
2101*5113495bSYour Name  * Rx related. Set the privacy filters. When rx packets, check
2102*5113495bSYour Name  * the ether type, filter type and packet type to decide whether
2103*5113495bSYour Name  * discard these packets.
2104*5113495bSYour Name  */
2105*5113495bSYour Name static void
ol_txrx_set_privacy_filters(ol_txrx_vdev_handle vdev,void * filters,uint32_t num)2106*5113495bSYour Name ol_txrx_set_privacy_filters(ol_txrx_vdev_handle vdev,
2107*5113495bSYour Name 			    void *filters, uint32_t num)
2108*5113495bSYour Name {
2109*5113495bSYour Name 	qdf_mem_copy(vdev->privacy_filters, filters,
2110*5113495bSYour Name 		     num * sizeof(struct privacy_exemption));
2111*5113495bSYour Name 	vdev->num_filters = num;
2112*5113495bSYour Name }
2113*5113495bSYour Name 
2114*5113495bSYour Name #if defined(CONFIG_HL_SUPPORT) || defined(QCA_LL_LEGACY_TX_FLOW_CONTROL)
2115*5113495bSYour Name 
2116*5113495bSYour Name static void
ol_txrx_tx_desc_reset_vdev(ol_txrx_vdev_handle vdev)2117*5113495bSYour Name ol_txrx_tx_desc_reset_vdev(ol_txrx_vdev_handle vdev)
2118*5113495bSYour Name {
2119*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev = vdev->pdev;
2120*5113495bSYour Name 	int i;
2121*5113495bSYour Name 	struct ol_tx_desc_t *tx_desc;
2122*5113495bSYour Name 
2123*5113495bSYour Name 	qdf_spin_lock_bh(&pdev->tx_mutex);
2124*5113495bSYour Name 	for (i = 0; i < pdev->tx_desc.pool_size; i++) {
2125*5113495bSYour Name 		tx_desc = ol_tx_desc_find(pdev, i);
2126*5113495bSYour Name 		if (tx_desc->vdev == vdev)
2127*5113495bSYour Name 			tx_desc->vdev = NULL;
2128*5113495bSYour Name 	}
2129*5113495bSYour Name 	qdf_spin_unlock_bh(&pdev->tx_mutex);
2130*5113495bSYour Name }
2131*5113495bSYour Name 
2132*5113495bSYour Name #else
2133*5113495bSYour Name #ifdef QCA_LL_TX_FLOW_CONTROL_V2
ol_txrx_tx_desc_reset_vdev(ol_txrx_vdev_handle vdev)2134*5113495bSYour Name static void ol_txrx_tx_desc_reset_vdev(ol_txrx_vdev_handle vdev)
2135*5113495bSYour Name {
2136*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev = vdev->pdev;
2137*5113495bSYour Name 	struct ol_tx_flow_pool_t *pool;
2138*5113495bSYour Name 	int i;
2139*5113495bSYour Name 	struct ol_tx_desc_t *tx_desc;
2140*5113495bSYour Name 
2141*5113495bSYour Name 	qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
2142*5113495bSYour Name 	for (i = 0; i < pdev->tx_desc.pool_size; i++) {
2143*5113495bSYour Name 		tx_desc = ol_tx_desc_find(pdev, i);
2144*5113495bSYour Name 		if (!qdf_atomic_read(&tx_desc->ref_cnt))
2145*5113495bSYour Name 			/* not in use */
2146*5113495bSYour Name 			continue;
2147*5113495bSYour Name 
2148*5113495bSYour Name 		pool = tx_desc->pool;
2149*5113495bSYour Name 		qdf_spin_lock_bh(&pool->flow_pool_lock);
2150*5113495bSYour Name 		if (tx_desc->vdev == vdev)
2151*5113495bSYour Name 			tx_desc->vdev = NULL;
2152*5113495bSYour Name 		qdf_spin_unlock_bh(&pool->flow_pool_lock);
2153*5113495bSYour Name 	}
2154*5113495bSYour Name 	qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
2155*5113495bSYour Name }
2156*5113495bSYour Name 
2157*5113495bSYour Name #else
2158*5113495bSYour Name static void
ol_txrx_tx_desc_reset_vdev(ol_txrx_vdev_handle vdev)2159*5113495bSYour Name ol_txrx_tx_desc_reset_vdev(ol_txrx_vdev_handle vdev)
2160*5113495bSYour Name {
2161*5113495bSYour Name }
2162*5113495bSYour Name #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
2163*5113495bSYour Name #endif /* CONFIG_HL_SUPPORT */
2164*5113495bSYour Name 
2165*5113495bSYour Name /**
2166*5113495bSYour Name  * ol_txrx_vdev_detach - Deallocate the specified data virtual
2167*5113495bSYour Name  * device object.
2168*5113495bSYour Name  * @soc_hdl - data path soc handle
2169*5113495bSYour Name  * @vdev_id: vdev id
2170*5113495bSYour Name  * @callback: function to call (if non-NULL) once the vdev has
2171*5113495bSYour Name  * been wholly deleted
2172*5113495bSYour Name  * @callback_context: context to provide in the callback
2173*5113495bSYour Name  *
2174*5113495bSYour Name  * All peers associated with the virtual device need to be deleted
2175*5113495bSYour Name  * (ol_txrx_peer_detach) before the virtual device itself is deleted.
2176*5113495bSYour Name  * However, for the peers to be fully deleted, the peer deletion has to
2177*5113495bSYour Name  * percolate through the target data FW and back up to the host data SW.
2178*5113495bSYour Name  * Thus, even though the host control SW may have issued a peer_detach
2179*5113495bSYour Name  * call for each of the vdev's peers, the peer objects may still be
2180*5113495bSYour Name  * allocated, pending removal of all references to them by the target FW.
2181*5113495bSYour Name  * In this case, though the vdev_detach function call will still return
2182*5113495bSYour Name  * immediately, the vdev itself won't actually be deleted, until the
2183*5113495bSYour Name  * deletions of all its peers complete.
2184*5113495bSYour Name  * The caller can provide a callback function pointer to be notified when
2185*5113495bSYour Name  * the vdev deletion actually happens - whether it's directly within the
2186*5113495bSYour Name  * vdev_detach call, or if it's deferred until all in-progress peer
2187*5113495bSYour Name  * deletions have completed.
2188*5113495bSYour Name  */
2189*5113495bSYour Name static QDF_STATUS
ol_txrx_vdev_detach(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,ol_txrx_vdev_delete_cb callback,void * context)2190*5113495bSYour Name ol_txrx_vdev_detach(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
2191*5113495bSYour Name 		    ol_txrx_vdev_delete_cb callback, void *context)
2192*5113495bSYour Name {
2193*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
2194*5113495bSYour Name 	ol_txrx_vdev_handle vdev = ol_txrx_get_vdev_from_soc_vdev_id(soc,
2195*5113495bSYour Name 								     vdev_id);
2196*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev;
2197*5113495bSYour Name 	ol_txrx_vdev_delete_cb vdev_del_notify;
2198*5113495bSYour Name 	void *vdev_del_context;
2199*5113495bSYour Name 
2200*5113495bSYour Name 	if (qdf_unlikely(!vdev))
2201*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
2202*5113495bSYour Name 
2203*5113495bSYour Name 	/* preconditions */
2204*5113495bSYour Name 	TXRX_ASSERT2(vdev);
2205*5113495bSYour Name 	pdev = vdev->pdev;
2206*5113495bSYour Name 
2207*5113495bSYour Name 	/* prevent anyone from restarting the ll_pause timer again */
2208*5113495bSYour Name 	qdf_atomic_set(&vdev->delete.detaching, 1);
2209*5113495bSYour Name 
2210*5113495bSYour Name 	vdev_del_notify = vdev->vdev_del_notify;
2211*5113495bSYour Name 	vdev_del_context = vdev->osif_dev;
2212*5113495bSYour Name 	ol_txrx_vdev_tx_queue_free(vdev);
2213*5113495bSYour Name 
2214*5113495bSYour Name 	qdf_spin_lock_bh(&vdev->ll_pause.mutex);
2215*5113495bSYour Name 	qdf_timer_stop(&vdev->ll_pause.timer);
2216*5113495bSYour Name 	vdev->ll_pause.is_q_timer_on = false;
2217*5113495bSYour Name 	while (vdev->ll_pause.txq.head) {
2218*5113495bSYour Name 		qdf_nbuf_t next = qdf_nbuf_next(vdev->ll_pause.txq.head);
2219*5113495bSYour Name 
2220*5113495bSYour Name 		qdf_nbuf_set_next(vdev->ll_pause.txq.head, NULL);
2221*5113495bSYour Name 		qdf_nbuf_tx_free(vdev->ll_pause.txq.head, QDF_NBUF_PKT_ERROR);
2222*5113495bSYour Name 		vdev->ll_pause.txq.head = next;
2223*5113495bSYour Name 	}
2224*5113495bSYour Name 	qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
2225*5113495bSYour Name 
2226*5113495bSYour Name 	/* ll_pause timer should be deleted without any locks held, and
2227*5113495bSYour Name 	 * no timer function should be executed after this point because
2228*5113495bSYour Name 	 * qdf_timer_free is deleting the timer synchronously.
2229*5113495bSYour Name 	 */
2230*5113495bSYour Name 	qdf_timer_free(&vdev->ll_pause.timer);
2231*5113495bSYour Name 	qdf_spinlock_destroy(&vdev->ll_pause.mutex);
2232*5113495bSYour Name 
2233*5113495bSYour Name 	qdf_timer_free(&vdev->bundle_queue.timer);
2234*5113495bSYour Name 	qdf_spinlock_destroy(&vdev->bundle_queue.mutex);
2235*5113495bSYour Name 
2236*5113495bSYour Name 	qdf_spin_lock_bh(&vdev->flow_control_lock);
2237*5113495bSYour Name 	vdev->osif_flow_control_cb = NULL;
2238*5113495bSYour Name 	vdev->osif_flow_control_is_pause = NULL;
2239*5113495bSYour Name 	vdev->osif_fc_ctx = NULL;
2240*5113495bSYour Name 	qdf_spin_unlock_bh(&vdev->flow_control_lock);
2241*5113495bSYour Name 	qdf_spinlock_destroy(&vdev->flow_control_lock);
2242*5113495bSYour Name 
2243*5113495bSYour Name 	/* remove the vdev from its parent pdev's list */
2244*5113495bSYour Name 	TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
2245*5113495bSYour Name 
2246*5113495bSYour Name 	/*
2247*5113495bSYour Name 	 * Use peer_ref_mutex while accessing peer_list, in case
2248*5113495bSYour Name 	 * a peer is in the process of being removed from the list.
2249*5113495bSYour Name 	 */
2250*5113495bSYour Name 	qdf_spin_lock_bh(&pdev->peer_ref_mutex);
2251*5113495bSYour Name 	/* check that the vdev has no peers allocated */
2252*5113495bSYour Name 	if (!TAILQ_EMPTY(&vdev->peer_list)) {
2253*5113495bSYour Name 		/* debug print - will be removed later */
2254*5113495bSYour Name 		ol_txrx_dbg(
2255*5113495bSYour Name 			   "not deleting vdev object %pK ("QDF_MAC_ADDR_FMT") until deletion finishes for all its peers",
2256*5113495bSYour Name 			   vdev,
2257*5113495bSYour Name 			   QDF_MAC_ADDR_REF(vdev->mac_addr.raw));
2258*5113495bSYour Name 		/* indicate that the vdev needs to be deleted */
2259*5113495bSYour Name 		vdev->delete.pending = 1;
2260*5113495bSYour Name 		vdev->delete.callback = callback;
2261*5113495bSYour Name 		vdev->delete.context = context;
2262*5113495bSYour Name 		qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
2263*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
2264*5113495bSYour Name 	}
2265*5113495bSYour Name 	qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
2266*5113495bSYour Name 	qdf_event_destroy(&vdev->wait_delete_comp);
2267*5113495bSYour Name 
2268*5113495bSYour Name 	ol_txrx_dbg(
2269*5113495bSYour Name 		   "deleting vdev obj %pK ("QDF_MAC_ADDR_FMT")",
2270*5113495bSYour Name 		   vdev,
2271*5113495bSYour Name 		   QDF_MAC_ADDR_REF(vdev->mac_addr.raw));
2272*5113495bSYour Name 
2273*5113495bSYour Name 	htt_vdev_detach(pdev->htt_pdev, vdev->vdev_id);
2274*5113495bSYour Name 
2275*5113495bSYour Name 	/*
2276*5113495bSYour Name 	 * The ol_tx_desc_free might access the invalid content of vdev referred
2277*5113495bSYour Name 	 * by tx desc, since this vdev might be detached in another thread
2278*5113495bSYour Name 	 * asynchronous.
2279*5113495bSYour Name 	 *
2280*5113495bSYour Name 	 * Go through tx desc pool to set corresponding tx desc's vdev to NULL
2281*5113495bSYour Name 	 * when detach this vdev, and add vdev checking in the ol_tx_desc_free
2282*5113495bSYour Name 	 * to avoid crash.
2283*5113495bSYour Name 	 *
2284*5113495bSYour Name 	 */
2285*5113495bSYour Name 	ol_txrx_tx_desc_reset_vdev(vdev);
2286*5113495bSYour Name 
2287*5113495bSYour Name 	/*
2288*5113495bSYour Name 	 * Doesn't matter if there are outstanding tx frames -
2289*5113495bSYour Name 	 * they will be freed once the target sends a tx completion
2290*5113495bSYour Name 	 * message for them.
2291*5113495bSYour Name 	 */
2292*5113495bSYour Name 	qdf_mem_free(vdev);
2293*5113495bSYour Name 	if (callback)
2294*5113495bSYour Name 		callback(context);
2295*5113495bSYour Name 
2296*5113495bSYour Name 	if (vdev_del_notify)
2297*5113495bSYour Name 		vdev_del_notify(vdev_del_context);
2298*5113495bSYour Name 
2299*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
2300*5113495bSYour Name }
2301*5113495bSYour Name 
2302*5113495bSYour Name /**
2303*5113495bSYour Name  * ol_txrx_flush_rx_frames() - flush cached rx frames
2304*5113495bSYour Name  * @peer: peer
2305*5113495bSYour Name  * @drop: set flag to drop frames
2306*5113495bSYour Name  *
2307*5113495bSYour Name  * Return: None
2308*5113495bSYour Name  */
ol_txrx_flush_rx_frames(struct ol_txrx_peer_t * peer,bool drop)2309*5113495bSYour Name void ol_txrx_flush_rx_frames(struct ol_txrx_peer_t *peer,
2310*5113495bSYour Name 			     bool drop)
2311*5113495bSYour Name {
2312*5113495bSYour Name 	struct ol_txrx_cached_bufq_t *bufqi;
2313*5113495bSYour Name 	struct ol_rx_cached_buf *cache_buf;
2314*5113495bSYour Name 	QDF_STATUS ret;
2315*5113495bSYour Name 	ol_txrx_rx_fp data_rx = NULL;
2316*5113495bSYour Name 
2317*5113495bSYour Name 	if (qdf_atomic_inc_return(&peer->flush_in_progress) > 1) {
2318*5113495bSYour Name 		qdf_atomic_dec(&peer->flush_in_progress);
2319*5113495bSYour Name 		return;
2320*5113495bSYour Name 	}
2321*5113495bSYour Name 
2322*5113495bSYour Name 	qdf_assert(peer->vdev);
2323*5113495bSYour Name 	qdf_spin_lock_bh(&peer->peer_info_lock);
2324*5113495bSYour Name 	bufqi = &peer->bufq_info;
2325*5113495bSYour Name 
2326*5113495bSYour Name 	if (peer->state >= OL_TXRX_PEER_STATE_CONN && peer->vdev->rx)
2327*5113495bSYour Name 		data_rx = peer->vdev->rx;
2328*5113495bSYour Name 	else
2329*5113495bSYour Name 		drop = true;
2330*5113495bSYour Name 	qdf_spin_unlock_bh(&peer->peer_info_lock);
2331*5113495bSYour Name 
2332*5113495bSYour Name 	qdf_spin_lock_bh(&bufqi->bufq_lock);
2333*5113495bSYour Name 	cache_buf = list_entry((&bufqi->cached_bufq)->next,
2334*5113495bSYour Name 				typeof(*cache_buf), list);
2335*5113495bSYour Name 	while (!list_empty(&bufqi->cached_bufq)) {
2336*5113495bSYour Name 		list_del(&cache_buf->list);
2337*5113495bSYour Name 		bufqi->curr--;
2338*5113495bSYour Name 		qdf_assert(bufqi->curr >= 0);
2339*5113495bSYour Name 		qdf_spin_unlock_bh(&bufqi->bufq_lock);
2340*5113495bSYour Name 		if (drop) {
2341*5113495bSYour Name 			qdf_nbuf_free(cache_buf->buf);
2342*5113495bSYour Name 		} else {
2343*5113495bSYour Name 			/* Flush the cached frames to HDD */
2344*5113495bSYour Name 			ret = data_rx(peer->vdev->osif_dev, cache_buf->buf);
2345*5113495bSYour Name 			if (ret != QDF_STATUS_SUCCESS)
2346*5113495bSYour Name 				qdf_nbuf_free(cache_buf->buf);
2347*5113495bSYour Name 		}
2348*5113495bSYour Name 		qdf_mem_free(cache_buf);
2349*5113495bSYour Name 		qdf_spin_lock_bh(&bufqi->bufq_lock);
2350*5113495bSYour Name 		cache_buf = list_entry((&bufqi->cached_bufq)->next,
2351*5113495bSYour Name 				typeof(*cache_buf), list);
2352*5113495bSYour Name 	}
2353*5113495bSYour Name 	bufqi->qdepth_no_thresh = bufqi->curr;
2354*5113495bSYour Name 	qdf_spin_unlock_bh(&bufqi->bufq_lock);
2355*5113495bSYour Name 	qdf_atomic_dec(&peer->flush_in_progress);
2356*5113495bSYour Name }
2357*5113495bSYour Name 
ol_txrx_flush_cache_rx_queue(void)2358*5113495bSYour Name static void ol_txrx_flush_cache_rx_queue(void)
2359*5113495bSYour Name {
2360*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
2361*5113495bSYour Name 	struct ol_txrx_peer_t *peer;
2362*5113495bSYour Name 	struct ol_txrx_vdev_t *vdev;
2363*5113495bSYour Name 	ol_txrx_pdev_handle pdev;
2364*5113495bSYour Name 
2365*5113495bSYour Name 	if (qdf_unlikely(!soc))
2366*5113495bSYour Name 		return;
2367*5113495bSYour Name 
2368*5113495bSYour Name 	pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
2369*5113495bSYour Name 	if (!pdev)
2370*5113495bSYour Name 		return;
2371*5113495bSYour Name 
2372*5113495bSYour Name 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
2373*5113495bSYour Name 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
2374*5113495bSYour Name 			ol_txrx_flush_rx_frames(peer, 1);
2375*5113495bSYour Name 		}
2376*5113495bSYour Name 	}
2377*5113495bSYour Name }
2378*5113495bSYour Name 
2379*5113495bSYour Name /* Define short name to use in cds_trigger_recovery */
2380*5113495bSYour Name #define PEER_DEL_TIMEOUT QDF_PEER_DELETION_TIMEDOUT
2381*5113495bSYour Name 
2382*5113495bSYour Name /**
2383*5113495bSYour Name  * ol_txrx_dump_peer_access_list() - dump peer access list
2384*5113495bSYour Name  * @peer: peer handle
2385*5113495bSYour Name  *
2386*5113495bSYour Name  * This function will dump if any peer debug ids are still accessing peer
2387*5113495bSYour Name  *
2388*5113495bSYour Name  * Return: None
2389*5113495bSYour Name  */
ol_txrx_dump_peer_access_list(ol_txrx_peer_handle peer)2390*5113495bSYour Name static void ol_txrx_dump_peer_access_list(ol_txrx_peer_handle peer)
2391*5113495bSYour Name {
2392*5113495bSYour Name 	u32 i;
2393*5113495bSYour Name 	u32 pending_ref;
2394*5113495bSYour Name 
2395*5113495bSYour Name 	for (i = 0; i < PEER_DEBUG_ID_MAX; i++) {
2396*5113495bSYour Name 		pending_ref = qdf_atomic_read(&peer->access_list[i]);
2397*5113495bSYour Name 		if (pending_ref)
2398*5113495bSYour Name 			ol_txrx_info_high("id %d pending refs %d",
2399*5113495bSYour Name 					  i, pending_ref);
2400*5113495bSYour Name 	}
2401*5113495bSYour Name }
2402*5113495bSYour Name 
2403*5113495bSYour Name /**
2404*5113495bSYour Name  * ol_txrx_peer_attach - Allocate and set up references for a
2405*5113495bSYour Name  * data peer object.
2406*5113495bSYour Name  * @soc_hdl - data path soc handle
2407*5113495bSYour Name  * @vdev_id - virtual device instance id
2408*5113495bSYour Name  * @peer_mac_addr - MAC address of the new peer
2409*5113495bSYour Name  *
2410*5113495bSYour Name  * When an association with a peer starts, the host's control SW
2411*5113495bSYour Name  * uses this function to inform the host data SW.
2412*5113495bSYour Name  * The host data SW allocates its own peer object, and stores a
2413*5113495bSYour Name  * reference to the control peer object within the data peer object.
2414*5113495bSYour Name  * The host data SW also stores a reference to the virtual device
2415*5113495bSYour Name  * that the peer is associated with.  This virtual device handle is
2416*5113495bSYour Name  * used when the data SW delivers rx data frames to the OS shim layer.
2417*5113495bSYour Name  * The host data SW returns a handle to the new peer data object,
2418*5113495bSYour Name  * so a reference within the control peer object can be set to the
2419*5113495bSYour Name  * data peer object.
2420*5113495bSYour Name  *
2421*5113495bSYour Name  * Return: QDF status code
2422*5113495bSYour Name  */
2423*5113495bSYour Name static QDF_STATUS
ol_txrx_peer_attach(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,uint8_t * peer_mac_addr,enum cdp_peer_type peer_type)2424*5113495bSYour Name ol_txrx_peer_attach(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
2425*5113495bSYour Name 		    uint8_t *peer_mac_addr, enum cdp_peer_type peer_type)
2426*5113495bSYour Name {
2427*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
2428*5113495bSYour Name 	ol_txrx_vdev_handle vdev = ol_txrx_get_vdev_from_soc_vdev_id(soc,
2429*5113495bSYour Name 								     vdev_id);
2430*5113495bSYour Name 	struct ol_txrx_peer_t *peer;
2431*5113495bSYour Name 	struct ol_txrx_peer_t *temp_peer;
2432*5113495bSYour Name 	uint8_t i;
2433*5113495bSYour Name 	bool wait_on_deletion = false;
2434*5113495bSYour Name 	unsigned long rc;
2435*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev;
2436*5113495bSYour Name 	bool cmp_wait_mac = false;
2437*5113495bSYour Name 	uint8_t zero_mac_addr[QDF_MAC_ADDR_SIZE] = { 0, 0, 0, 0, 0, 0 };
2438*5113495bSYour Name 	u8 check_valid = 0;
2439*5113495bSYour Name 
2440*5113495bSYour Name 	/* preconditions */
2441*5113495bSYour Name 	TXRX_ASSERT2(vdev);
2442*5113495bSYour Name 	TXRX_ASSERT2(peer_mac_addr);
2443*5113495bSYour Name 
2444*5113495bSYour Name 	pdev = vdev->pdev;
2445*5113495bSYour Name 	TXRX_ASSERT2(pdev);
2446*5113495bSYour Name 
2447*5113495bSYour Name 	if (pdev->enable_peer_unmap_conf_support)
2448*5113495bSYour Name 		check_valid = 1;
2449*5113495bSYour Name 
2450*5113495bSYour Name 	if (qdf_mem_cmp(&zero_mac_addr, &vdev->last_peer_mac_addr,
2451*5113495bSYour Name 				QDF_MAC_ADDR_SIZE))
2452*5113495bSYour Name 		cmp_wait_mac = true;
2453*5113495bSYour Name 
2454*5113495bSYour Name 	qdf_spin_lock_bh(&pdev->peer_ref_mutex);
2455*5113495bSYour Name 	/* check for duplicate existing peer */
2456*5113495bSYour Name 	TAILQ_FOREACH(temp_peer, &vdev->peer_list, peer_list_elem) {
2457*5113495bSYour Name 		if (!ol_txrx_peer_find_mac_addr_cmp(&temp_peer->mac_addr,
2458*5113495bSYour Name 			(union ol_txrx_align_mac_addr_t *)peer_mac_addr) &&
2459*5113495bSYour Name 			(check_valid == 0 || temp_peer->valid)) {
2460*5113495bSYour Name 			ol_txrx_info_high(
2461*5113495bSYour Name 				"vdev_id %d ("QDF_MAC_ADDR_FMT") already exists",
2462*5113495bSYour Name 				vdev->vdev_id,
2463*5113495bSYour Name 				QDF_MAC_ADDR_REF(peer_mac_addr));
2464*5113495bSYour Name 			if (qdf_atomic_read(&temp_peer->delete_in_progress)) {
2465*5113495bSYour Name 				vdev->wait_on_peer_id = temp_peer->local_id;
2466*5113495bSYour Name 				qdf_event_reset(&vdev->wait_delete_comp);
2467*5113495bSYour Name 				wait_on_deletion = true;
2468*5113495bSYour Name 				break;
2469*5113495bSYour Name 			} else {
2470*5113495bSYour Name 				qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
2471*5113495bSYour Name 				return QDF_STATUS_E_FAILURE;
2472*5113495bSYour Name 			}
2473*5113495bSYour Name 		}
2474*5113495bSYour Name 		if (cmp_wait_mac && !ol_txrx_peer_find_mac_addr_cmp(
2475*5113495bSYour Name 					&temp_peer->mac_addr,
2476*5113495bSYour Name 					&vdev->last_peer_mac_addr) &&
2477*5113495bSYour Name 					(check_valid == 0 ||
2478*5113495bSYour Name 					 temp_peer->valid)) {
2479*5113495bSYour Name 			ol_txrx_info_high(
2480*5113495bSYour Name 				"vdev_id %d ("QDF_MAC_ADDR_FMT") old peer exists",
2481*5113495bSYour Name 				vdev->vdev_id,
2482*5113495bSYour Name 				QDF_MAC_ADDR_REF(vdev->last_peer_mac_addr.raw));
2483*5113495bSYour Name 			if (qdf_atomic_read(&temp_peer->delete_in_progress)) {
2484*5113495bSYour Name 				vdev->wait_on_peer_id = temp_peer->local_id;
2485*5113495bSYour Name 				qdf_event_reset(&vdev->wait_delete_comp);
2486*5113495bSYour Name 				wait_on_deletion = true;
2487*5113495bSYour Name 				break;
2488*5113495bSYour Name 			} else {
2489*5113495bSYour Name 				qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
2490*5113495bSYour Name 				ol_txrx_err("peer not found");
2491*5113495bSYour Name 				return QDF_STATUS_E_FAILURE;
2492*5113495bSYour Name 			}
2493*5113495bSYour Name 		}
2494*5113495bSYour Name 	}
2495*5113495bSYour Name 	qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
2496*5113495bSYour Name 
2497*5113495bSYour Name 	qdf_mem_zero(&vdev->last_peer_mac_addr,
2498*5113495bSYour Name 			sizeof(union ol_txrx_align_mac_addr_t));
2499*5113495bSYour Name 	if (wait_on_deletion) {
2500*5113495bSYour Name 		/* wait for peer deletion */
2501*5113495bSYour Name 		rc = qdf_wait_for_event_completion(&vdev->wait_delete_comp,
2502*5113495bSYour Name 					   PEER_DELETION_TIMEOUT);
2503*5113495bSYour Name 		if (QDF_STATUS_SUCCESS != rc) {
2504*5113495bSYour Name 			ol_txrx_err("error waiting for peer_id(%d) deletion, status %d",
2505*5113495bSYour Name 				    vdev->wait_on_peer_id, (int) rc);
2506*5113495bSYour Name 			/* Added for debugging only */
2507*5113495bSYour Name 			ol_txrx_dump_peer_access_list(temp_peer);
2508*5113495bSYour Name 			wlan_roam_debug_dump_table();
2509*5113495bSYour Name 			vdev->wait_on_peer_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
2510*5113495bSYour Name 
2511*5113495bSYour Name 			return QDF_STATUS_E_FAILURE;
2512*5113495bSYour Name 		}
2513*5113495bSYour Name 	}
2514*5113495bSYour Name 
2515*5113495bSYour Name 	peer = qdf_mem_malloc(sizeof(*peer));
2516*5113495bSYour Name 	if (!peer)
2517*5113495bSYour Name 		return QDF_STATUS_E_NOMEM;
2518*5113495bSYour Name 
2519*5113495bSYour Name 	/* store provided params */
2520*5113495bSYour Name 	peer->vdev = vdev;
2521*5113495bSYour Name 	qdf_mem_copy(&peer->mac_addr.raw[0], peer_mac_addr,
2522*5113495bSYour Name 		     QDF_MAC_ADDR_SIZE);
2523*5113495bSYour Name 
2524*5113495bSYour Name 	ol_txrx_peer_txqs_init(pdev, peer);
2525*5113495bSYour Name 
2526*5113495bSYour Name 	INIT_LIST_HEAD(&peer->bufq_info.cached_bufq);
2527*5113495bSYour Name 	qdf_spin_lock_bh(&pdev->peer_ref_mutex);
2528*5113495bSYour Name 	/* add this peer into the vdev's list */
2529*5113495bSYour Name 	TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
2530*5113495bSYour Name 	qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
2531*5113495bSYour Name 	/* check whether this is a real peer (peer mac addr != vdev mac addr) */
2532*5113495bSYour Name 	if (ol_txrx_peer_find_mac_addr_cmp(&vdev->mac_addr, &peer->mac_addr)) {
2533*5113495bSYour Name 		qdf_spin_lock_bh(&pdev->last_real_peer_mutex);
2534*5113495bSYour Name 		vdev->last_real_peer = peer;
2535*5113495bSYour Name 		qdf_spin_unlock_bh(&pdev->last_real_peer_mutex);
2536*5113495bSYour Name 	}
2537*5113495bSYour Name 
2538*5113495bSYour Name 	peer->rx_opt_proc = pdev->rx_opt_proc;
2539*5113495bSYour Name 
2540*5113495bSYour Name 	ol_rx_peer_init(pdev, peer);
2541*5113495bSYour Name 
2542*5113495bSYour Name 	/* initialize the peer_id */
2543*5113495bSYour Name 	for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
2544*5113495bSYour Name 		peer->peer_ids[i] = HTT_INVALID_PEER;
2545*5113495bSYour Name 
2546*5113495bSYour Name 	qdf_spinlock_create(&peer->peer_info_lock);
2547*5113495bSYour Name 	qdf_spinlock_create(&peer->bufq_info.bufq_lock);
2548*5113495bSYour Name 
2549*5113495bSYour Name 	peer->bufq_info.thresh = OL_TXRX_CACHED_BUFQ_THRESH;
2550*5113495bSYour Name 
2551*5113495bSYour Name 	qdf_atomic_init(&peer->delete_in_progress);
2552*5113495bSYour Name 	qdf_atomic_init(&peer->flush_in_progress);
2553*5113495bSYour Name 	qdf_atomic_init(&peer->ref_cnt);
2554*5113495bSYour Name 	qdf_atomic_init(&peer->del_ref_cnt);
2555*5113495bSYour Name 
2556*5113495bSYour Name 	for (i = 0; i < PEER_DEBUG_ID_MAX; i++)
2557*5113495bSYour Name 		qdf_atomic_init(&peer->access_list[i]);
2558*5113495bSYour Name 
2559*5113495bSYour Name 	/* keep one reference for attach */
2560*5113495bSYour Name 	ol_txrx_peer_get_ref(peer, PEER_DEBUG_ID_OL_PEER_ATTACH);
2561*5113495bSYour Name 
2562*5113495bSYour Name 	/* Set a flag to indicate peer create is pending in firmware */
2563*5113495bSYour Name 	qdf_atomic_init(&peer->fw_create_pending);
2564*5113495bSYour Name 	qdf_atomic_set(&peer->fw_create_pending, 1);
2565*5113495bSYour Name 
2566*5113495bSYour Name 	peer->valid = 1;
2567*5113495bSYour Name 	qdf_timer_init(pdev->osdev, &peer->peer_unmap_timer,
2568*5113495bSYour Name 		       peer_unmap_timer_handler, peer, QDF_TIMER_TYPE_SW);
2569*5113495bSYour Name 
2570*5113495bSYour Name 	ol_txrx_peer_find_hash_add(pdev, peer);
2571*5113495bSYour Name 
2572*5113495bSYour Name 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
2573*5113495bSYour Name 		   "vdev %pK created peer %pK ref_cnt %d ("QDF_MAC_ADDR_FMT")",
2574*5113495bSYour Name 		   vdev, peer, qdf_atomic_read(&peer->ref_cnt),
2575*5113495bSYour Name 		   QDF_MAC_ADDR_REF(peer->mac_addr.raw));
2576*5113495bSYour Name 	/*
2577*5113495bSYour Name 	 * For every peer MAp message search and set if bss_peer
2578*5113495bSYour Name 	 */
2579*5113495bSYour Name 	if (qdf_mem_cmp(peer->mac_addr.raw, vdev->mac_addr.raw,
2580*5113495bSYour Name 				QDF_MAC_ADDR_SIZE))
2581*5113495bSYour Name 		peer->bss_peer = 1;
2582*5113495bSYour Name 
2583*5113495bSYour Name 	/*
2584*5113495bSYour Name 	 * The peer starts in the "disc" state while association is in progress.
2585*5113495bSYour Name 	 * Once association completes, the peer will get updated to "auth" state
2586*5113495bSYour Name 	 * by a call to ol_txrx_peer_state_update if the peer is in open mode,
2587*5113495bSYour Name 	 * or else to the "conn" state. For non-open mode, the peer will
2588*5113495bSYour Name 	 * progress to "auth" state once the authentication completes.
2589*5113495bSYour Name 	 */
2590*5113495bSYour Name 	peer->state = OL_TXRX_PEER_STATE_INVALID;
2591*5113495bSYour Name 	ol_txrx_peer_state_update(soc_hdl, peer->mac_addr.raw,
2592*5113495bSYour Name 				  OL_TXRX_PEER_STATE_DISC);
2593*5113495bSYour Name 
2594*5113495bSYour Name #ifdef QCA_SUPPORT_PEER_DATA_RX_RSSI
2595*5113495bSYour Name 	peer->rssi_dbm = HTT_RSSI_INVALID;
2596*5113495bSYour Name #endif
2597*5113495bSYour Name 	if ((QDF_GLOBAL_MONITOR_MODE == cds_get_conparam()) &&
2598*5113495bSYour Name 	    !pdev->self_peer) {
2599*5113495bSYour Name 		pdev->self_peer = peer;
2600*5113495bSYour Name 		/*
2601*5113495bSYour Name 		 * No Tx in monitor mode, otherwise results in target assert.
2602*5113495bSYour Name 		 * Setting disable_intrabss_fwd to true
2603*5113495bSYour Name 		 */
2604*5113495bSYour Name 		ol_vdev_rx_set_intrabss_fwd(soc_hdl, vdev_id, true);
2605*5113495bSYour Name 	}
2606*5113495bSYour Name 
2607*5113495bSYour Name 	ol_txrx_local_peer_id_alloc(pdev, peer);
2608*5113495bSYour Name 
2609*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
2610*5113495bSYour Name }
2611*5113495bSYour Name 
2612*5113495bSYour Name #undef PEER_DEL_TIMEOUT
2613*5113495bSYour Name 
2614*5113495bSYour Name /*
2615*5113495bSYour Name  * Discarding tx filter - removes all data frames (disconnected state)
2616*5113495bSYour Name  */
ol_tx_filter_discard(struct ol_txrx_msdu_info_t * tx_msdu_info)2617*5113495bSYour Name static A_STATUS ol_tx_filter_discard(struct ol_txrx_msdu_info_t *tx_msdu_info)
2618*5113495bSYour Name {
2619*5113495bSYour Name 	return A_ERROR;
2620*5113495bSYour Name }
2621*5113495bSYour Name 
2622*5113495bSYour Name /*
2623*5113495bSYour Name  * Non-autentication tx filter - filters out data frames that are not
2624*5113495bSYour Name  * related to authentication, but allows EAPOL (PAE) or WAPI (WAI)
2625*5113495bSYour Name  * data frames (connected state)
2626*5113495bSYour Name  */
ol_tx_filter_non_auth(struct ol_txrx_msdu_info_t * tx_msdu_info)2627*5113495bSYour Name static A_STATUS ol_tx_filter_non_auth(struct ol_txrx_msdu_info_t *tx_msdu_info)
2628*5113495bSYour Name {
2629*5113495bSYour Name 	return
2630*5113495bSYour Name 		(tx_msdu_info->htt.info.ethertype == ETHERTYPE_PAE ||
2631*5113495bSYour Name 		 tx_msdu_info->htt.info.ethertype ==
2632*5113495bSYour Name 		 ETHERTYPE_WAI) ? A_OK : A_ERROR;
2633*5113495bSYour Name }
2634*5113495bSYour Name 
2635*5113495bSYour Name /*
2636*5113495bSYour Name  * Pass-through tx filter - lets all data frames through (authenticated state)
2637*5113495bSYour Name  */
ol_tx_filter_pass_thru(struct ol_txrx_msdu_info_t * tx_msdu_info)2638*5113495bSYour Name static A_STATUS ol_tx_filter_pass_thru(struct ol_txrx_msdu_info_t *tx_msdu_info)
2639*5113495bSYour Name {
2640*5113495bSYour Name 	return A_OK;
2641*5113495bSYour Name }
2642*5113495bSYour Name 
2643*5113495bSYour Name /**
2644*5113495bSYour Name  * ol_txrx_peer_get_peer_mac_addr() - return mac_addr from peer handle.
2645*5113495bSYour Name  * @peer: handle to peer
2646*5113495bSYour Name  *
2647*5113495bSYour Name  * returns mac addrs for module which do not know peer type
2648*5113495bSYour Name  *
2649*5113495bSYour Name  * Return: the mac_addr from peer
2650*5113495bSYour Name  */
2651*5113495bSYour Name static uint8_t *
ol_txrx_peer_get_peer_mac_addr(void * ppeer)2652*5113495bSYour Name ol_txrx_peer_get_peer_mac_addr(void *ppeer)
2653*5113495bSYour Name {
2654*5113495bSYour Name 	ol_txrx_peer_handle peer = ppeer;
2655*5113495bSYour Name 
2656*5113495bSYour Name 	if (!peer)
2657*5113495bSYour Name 		return NULL;
2658*5113495bSYour Name 
2659*5113495bSYour Name 	return peer->mac_addr.raw;
2660*5113495bSYour Name }
2661*5113495bSYour Name 
2662*5113495bSYour Name /**
2663*5113495bSYour Name  * ol_txrx_get_pn_info() - Returns pn info from peer
2664*5113495bSYour Name  * @soc_hdl: soc handle
2665*5113495bSYour Name  * @peer_mac: mac address of the peer
2666*5113495bSYour Name  * @vdev_id: vdev identifier
2667*5113495bSYour Name  * @last_pn_valid: return last_rmf_pn_valid value from peer.
2668*5113495bSYour Name  * @last_pn: return last_rmf_pn value from peer.
2669*5113495bSYour Name  * @rmf_pn_replays: return rmf_pn_replays value from peer.
2670*5113495bSYour Name  *
2671*5113495bSYour Name  * Return: NONE
2672*5113495bSYour Name  */
2673*5113495bSYour Name static void
ol_txrx_get_pn_info(struct cdp_soc_t * soc_hdl,uint8_t * peer_mac,uint8_t vdev_id,uint8_t ** last_pn_valid,uint64_t ** last_pn,uint32_t ** rmf_pn_replays)2674*5113495bSYour Name ol_txrx_get_pn_info(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
2675*5113495bSYour Name 		    uint8_t vdev_id, uint8_t **last_pn_valid,
2676*5113495bSYour Name 		    uint64_t **last_pn, uint32_t **rmf_pn_replays)
2677*5113495bSYour Name {
2678*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
2679*5113495bSYour Name 	ol_txrx_pdev_handle pdev;
2680*5113495bSYour Name 	ol_txrx_peer_handle peer;
2681*5113495bSYour Name 
2682*5113495bSYour Name 	pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
2683*5113495bSYour Name 	if (!pdev) {
2684*5113495bSYour Name 		ol_txrx_err("pdev is NULL");
2685*5113495bSYour Name 		return;
2686*5113495bSYour Name 	}
2687*5113495bSYour Name 
2688*5113495bSYour Name 	peer =  ol_txrx_peer_find_hash_find_get_ref(pdev, peer_mac, 0, 1,
2689*5113495bSYour Name 						    PEER_DEBUG_ID_OL_INTERNAL);
2690*5113495bSYour Name 	if (!peer)
2691*5113495bSYour Name 		return;
2692*5113495bSYour Name 
2693*5113495bSYour Name 	*last_pn_valid = &peer->last_rmf_pn_valid;
2694*5113495bSYour Name 	*last_pn = &peer->last_rmf_pn;
2695*5113495bSYour Name 	*rmf_pn_replays = &peer->rmf_pn_replays;
2696*5113495bSYour Name 
2697*5113495bSYour Name 	ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
2698*5113495bSYour Name }
2699*5113495bSYour Name 
2700*5113495bSYour Name /**
2701*5113495bSYour Name  * ol_txrx_get_opmode() - Return operation mode of vdev
2702*5113495bSYour Name  * @soc_hdl: Datapath soc handle
2703*5113495bSYour Name  * @vdev_id: id of vdev
2704*5113495bSYour Name  *
2705*5113495bSYour Name  * Return: interface opmode if SUCCESS,
2706*5113495bSYour Name  *	   0 if interface does not exist.
2707*5113495bSYour Name  */
ol_txrx_get_opmode(struct cdp_soc_t * soc_hdl,uint8_t vdev_id)2708*5113495bSYour Name static int ol_txrx_get_opmode(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
2709*5113495bSYour Name {
2710*5113495bSYour Name 	struct ol_txrx_vdev_t *vdev;
2711*5113495bSYour Name 
2712*5113495bSYour Name 	vdev = (struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
2713*5113495bSYour Name 	if (!vdev) {
2714*5113495bSYour Name 		ol_txrx_err("vdev for id %d is NULL", vdev_id);
2715*5113495bSYour Name 		return 0;
2716*5113495bSYour Name 	}
2717*5113495bSYour Name 
2718*5113495bSYour Name 	return vdev->opmode;
2719*5113495bSYour Name }
2720*5113495bSYour Name 
2721*5113495bSYour Name /**
2722*5113495bSYour Name  * ol_txrx_get_peer_state() - Return peer state of peer
2723*5113495bSYour Name  * @soc_hdl: datapath soc handle
2724*5113495bSYour Name  * @vdev_id: virtual interface id
2725*5113495bSYour Name  * @peer_mac: peer mac addr
2726*5113495bSYour Name  * @slowpath: called from slow path or not
2727*5113495bSYour Name  *
2728*5113495bSYour Name  * Return: return peer state
2729*5113495bSYour Name  */
ol_txrx_get_peer_state(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,uint8_t * peer_mac,bool slowpath)2730*5113495bSYour Name static int ol_txrx_get_peer_state(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
2731*5113495bSYour Name 				  uint8_t *peer_mac, bool slowpath)
2732*5113495bSYour Name {
2733*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
2734*5113495bSYour Name 	ol_txrx_pdev_handle pdev =
2735*5113495bSYour Name 		ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
2736*5113495bSYour Name 	ol_txrx_peer_handle peer;
2737*5113495bSYour Name 	enum ol_txrx_peer_state peer_state;
2738*5113495bSYour Name 
2739*5113495bSYour Name 	if (!pdev)
2740*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
2741*5113495bSYour Name 
2742*5113495bSYour Name 	peer =  ol_txrx_peer_find_hash_find_get_ref(pdev, peer_mac, 0, 1,
2743*5113495bSYour Name 						    PEER_DEBUG_ID_OL_INTERNAL);
2744*5113495bSYour Name 	if (!peer)
2745*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
2746*5113495bSYour Name 
2747*5113495bSYour Name 	peer_state = peer->state;
2748*5113495bSYour Name 	ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
2749*5113495bSYour Name 	if (peer->vdev->vdev_id != vdev_id)
2750*5113495bSYour Name 		return OL_TXRX_PEER_STATE_INVALID;
2751*5113495bSYour Name 
2752*5113495bSYour Name 	return peer_state;
2753*5113495bSYour Name }
2754*5113495bSYour Name 
2755*5113495bSYour Name /**
2756*5113495bSYour Name  * ol_txrx_get_vdev_mac_addr() - Return mac addr of vdev
2757*5113495bSYour Name  * @soc_hdl: datapath soc handle
2758*5113495bSYour Name  x @vdev_id: virtual interface id
2759*5113495bSYour Name  *
2760*5113495bSYour Name  * Return: vdev mac address
2761*5113495bSYour Name  */
2762*5113495bSYour Name static uint8_t *
ol_txrx_get_vdev_mac_addr(struct cdp_soc_t * soc_hdl,uint8_t vdev_id)2763*5113495bSYour Name ol_txrx_get_vdev_mac_addr(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
2764*5113495bSYour Name {
2765*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
2766*5113495bSYour Name 	ol_txrx_vdev_handle vdev = ol_txrx_get_vdev_from_soc_vdev_id(soc,
2767*5113495bSYour Name 								     vdev_id);
2768*5113495bSYour Name 
2769*5113495bSYour Name 	if (!vdev)
2770*5113495bSYour Name 		return NULL;
2771*5113495bSYour Name 
2772*5113495bSYour Name 	return vdev->mac_addr.raw;
2773*5113495bSYour Name }
2774*5113495bSYour Name 
2775*5113495bSYour Name #ifdef currently_unused
2776*5113495bSYour Name /**
2777*5113495bSYour Name  * ol_txrx_get_vdev_struct_mac_addr() - Return handle to struct qdf_mac_addr of
2778*5113495bSYour Name  * vdev
2779*5113495bSYour Name  * @vdev: vdev handle
2780*5113495bSYour Name  *
2781*5113495bSYour Name  * Return: Handle to struct qdf_mac_addr
2782*5113495bSYour Name  */
2783*5113495bSYour Name struct qdf_mac_addr *
ol_txrx_get_vdev_struct_mac_addr(ol_txrx_vdev_handle vdev)2784*5113495bSYour Name ol_txrx_get_vdev_struct_mac_addr(ol_txrx_vdev_handle vdev)
2785*5113495bSYour Name {
2786*5113495bSYour Name 	return (struct qdf_mac_addr *)&(vdev->mac_addr);
2787*5113495bSYour Name }
2788*5113495bSYour Name #endif
2789*5113495bSYour Name 
2790*5113495bSYour Name #ifdef currently_unused
2791*5113495bSYour Name /**
2792*5113495bSYour Name  * ol_txrx_get_pdev_from_vdev() - Return handle to pdev of vdev
2793*5113495bSYour Name  * @vdev: vdev handle
2794*5113495bSYour Name  *
2795*5113495bSYour Name  * Return: Handle to pdev
2796*5113495bSYour Name  */
ol_txrx_get_pdev_from_vdev(ol_txrx_vdev_handle vdev)2797*5113495bSYour Name ol_txrx_pdev_handle ol_txrx_get_pdev_from_vdev(ol_txrx_vdev_handle vdev)
2798*5113495bSYour Name {
2799*5113495bSYour Name 	return vdev->pdev;
2800*5113495bSYour Name }
2801*5113495bSYour Name #endif
2802*5113495bSYour Name 
2803*5113495bSYour Name /**
2804*5113495bSYour Name  * ol_txrx_get_ctrl_pdev_from_vdev() - Return control pdev of vdev
2805*5113495bSYour Name  * @soc_hdl: datapath soc handle
2806*5113495bSYour Name  * @vdev_id: virtual interface id
2807*5113495bSYour Name  *
2808*5113495bSYour Name  * Return: Handle to control pdev
2809*5113495bSYour Name  */
2810*5113495bSYour Name static struct cdp_cfg *
ol_txrx_get_ctrl_pdev_from_vdev(struct cdp_soc_t * soc_hdl,uint8_t vdev_id)2811*5113495bSYour Name ol_txrx_get_ctrl_pdev_from_vdev(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
2812*5113495bSYour Name {
2813*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
2814*5113495bSYour Name 	ol_txrx_vdev_handle vdev = ol_txrx_get_vdev_from_soc_vdev_id(soc,
2815*5113495bSYour Name 								     vdev_id);
2816*5113495bSYour Name 
2817*5113495bSYour Name 	if (!vdev)
2818*5113495bSYour Name 		return NULL;
2819*5113495bSYour Name 
2820*5113495bSYour Name 	return vdev->pdev->ctrl_pdev;
2821*5113495bSYour Name }
2822*5113495bSYour Name 
2823*5113495bSYour Name /**
2824*5113495bSYour Name  * ol_txrx_is_rx_fwd_disabled() - returns the rx_fwd_disabled status on vdev
2825*5113495bSYour Name  * @vdev: vdev handle
2826*5113495bSYour Name  *
2827*5113495bSYour Name  * Return: Rx Fwd disabled status
2828*5113495bSYour Name  */
2829*5113495bSYour Name static uint8_t
ol_txrx_is_rx_fwd_disabled(struct cdp_vdev * pvdev)2830*5113495bSYour Name ol_txrx_is_rx_fwd_disabled(struct cdp_vdev *pvdev)
2831*5113495bSYour Name {
2832*5113495bSYour Name 	struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
2833*5113495bSYour Name 	struct txrx_pdev_cfg_t *cfg = (struct txrx_pdev_cfg_t *)
2834*5113495bSYour Name 					vdev->pdev->ctrl_pdev;
2835*5113495bSYour Name 	return cfg->rx_fwd_disabled;
2836*5113495bSYour Name }
2837*5113495bSYour Name 
2838*5113495bSYour Name #ifdef QCA_IBSS_SUPPORT
2839*5113495bSYour Name /**
2840*5113495bSYour Name  * ol_txrx_update_ibss_add_peer_num_of_vdev() - update and return peer num
2841*5113495bSYour Name  * @soc_hdl: datapath soc handle
2842*5113495bSYour Name  * @vdev_id: virtual interface id
2843*5113495bSYour Name  * @peer_num_delta: peer nums to be adjusted
2844*5113495bSYour Name  *
2845*5113495bSYour Name  * Return: -1 for failure or total peer nums after adjustment.
2846*5113495bSYour Name  */
2847*5113495bSYour Name static int16_t
ol_txrx_update_ibss_add_peer_num_of_vdev(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,int16_t peer_num_delta)2848*5113495bSYour Name ol_txrx_update_ibss_add_peer_num_of_vdev(struct cdp_soc_t *soc_hdl,
2849*5113495bSYour Name 					 uint8_t vdev_id,
2850*5113495bSYour Name 					 int16_t peer_num_delta)
2851*5113495bSYour Name {
2852*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
2853*5113495bSYour Name 	ol_txrx_vdev_handle vdev = ol_txrx_get_vdev_from_soc_vdev_id(soc,
2854*5113495bSYour Name 								     vdev_id);
2855*5113495bSYour Name 	int16_t new_peer_num;
2856*5113495bSYour Name 
2857*5113495bSYour Name 	if (!vdev)
2858*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
2859*5113495bSYour Name 
2860*5113495bSYour Name 	new_peer_num = vdev->ibss_peer_num + peer_num_delta;
2861*5113495bSYour Name 	if (new_peer_num > MAX_PEERS || new_peer_num < 0)
2862*5113495bSYour Name 		return OL_TXRX_INVALID_NUM_PEERS;
2863*5113495bSYour Name 
2864*5113495bSYour Name 	vdev->ibss_peer_num = new_peer_num;
2865*5113495bSYour Name 
2866*5113495bSYour Name 	return new_peer_num;
2867*5113495bSYour Name }
2868*5113495bSYour Name 
2869*5113495bSYour Name /**
2870*5113495bSYour Name  * ol_txrx_set_ibss_vdev_heart_beat_timer() - Update ibss vdev heart
2871*5113495bSYour Name  * beat timer
2872*5113495bSYour Name  * @soc_hdl: Datapath soc handle
2873*5113495bSYour Name  * @vdev_id: id of vdev
2874*5113495bSYour Name  * @timer_value_sec: new heart beat timer value
2875*5113495bSYour Name  *
2876*5113495bSYour Name  * Return: Old timer value set in vdev.
2877*5113495bSYour Name  */
2878*5113495bSYour Name static uint16_t
ol_txrx_set_ibss_vdev_heart_beat_timer(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,uint16_t timer_value_sec)2879*5113495bSYour Name ol_txrx_set_ibss_vdev_heart_beat_timer(struct cdp_soc_t *soc_hdl,
2880*5113495bSYour Name 				       uint8_t vdev_id,
2881*5113495bSYour Name 				       uint16_t timer_value_sec)
2882*5113495bSYour Name {
2883*5113495bSYour Name 	struct ol_txrx_vdev_t *vdev =
2884*5113495bSYour Name 		(struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
2885*5113495bSYour Name 	uint16_t old_timer_value = vdev->ibss_peer_heart_beat_timer;
2886*5113495bSYour Name 
2887*5113495bSYour Name 	vdev->ibss_peer_heart_beat_timer = timer_value_sec;
2888*5113495bSYour Name 
2889*5113495bSYour Name 	return old_timer_value;
2890*5113495bSYour Name }
2891*5113495bSYour Name #else /* !QCA_IBSS_SUPPORT */
2892*5113495bSYour Name static inline int16_t
ol_txrx_update_ibss_add_peer_num_of_vdev(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,int16_t peer_num_delta)2893*5113495bSYour Name ol_txrx_update_ibss_add_peer_num_of_vdev(struct cdp_soc_t *soc_hdl,
2894*5113495bSYour Name 					 uint8_t vdev_id,
2895*5113495bSYour Name 					 int16_t peer_num_delta)
2896*5113495bSYour Name {
2897*5113495bSYour Name 	return 0;
2898*5113495bSYour Name }
2899*5113495bSYour Name 
ol_txrx_set_ibss_vdev_heart_beat_timer(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,uint16_t timer_value_sec)2900*5113495bSYour Name static uint16_t ol_txrx_set_ibss_vdev_heart_beat_timer(
2901*5113495bSYour Name 				struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
2902*5113495bSYour Name 				uint16_t timer_value_sec)
2903*5113495bSYour Name {
2904*5113495bSYour Name 	return 0;
2905*5113495bSYour Name }
2906*5113495bSYour Name #endif /* QCA_IBSS_SUPPORT */
2907*5113495bSYour Name 
2908*5113495bSYour Name #ifdef WLAN_FEATURE_DSRC
2909*5113495bSYour Name /**
2910*5113495bSYour Name  * ol_txrx_set_ocb_chan_info() - set OCB channel info to vdev.
2911*5113495bSYour Name  * @soc_hdl: Datapath soc handle
2912*5113495bSYour Name  * @vdev_id: id of vdev
2913*5113495bSYour Name  * @ocb_set_chan: OCB channel information to be set in vdev.
2914*5113495bSYour Name  *
2915*5113495bSYour Name  * Return: NONE
2916*5113495bSYour Name  */
2917*5113495bSYour Name static void
ol_txrx_set_ocb_chan_info(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,struct ol_txrx_ocb_set_chan ocb_set_chan)2918*5113495bSYour Name ol_txrx_set_ocb_chan_info(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
2919*5113495bSYour Name 			  struct ol_txrx_ocb_set_chan ocb_set_chan)
2920*5113495bSYour Name {
2921*5113495bSYour Name 	struct ol_txrx_vdev_t *vdev =
2922*5113495bSYour Name 		(struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
2923*5113495bSYour Name 
2924*5113495bSYour Name 	if (qdf_unlikely(!vdev)) {
2925*5113495bSYour Name 		ol_txrx_err("pdev is NULL");
2926*5113495bSYour Name 		return;
2927*5113495bSYour Name 	}
2928*5113495bSYour Name 
2929*5113495bSYour Name 	vdev->ocb_channel_info = ocb_set_chan.ocb_channel_info;
2930*5113495bSYour Name 	vdev->ocb_channel_count = ocb_set_chan.ocb_channel_count;
2931*5113495bSYour Name }
2932*5113495bSYour Name 
2933*5113495bSYour Name /**
2934*5113495bSYour Name  * ol_txrx_get_ocb_chan_info() - return handle to vdev ocb_channel_info
2935*5113495bSYour Name  * @soc_hdl: Datapath soc handle
2936*5113495bSYour Name  * @vdev_id: id of vdev
2937*5113495bSYour Name  *
2938*5113495bSYour Name  * Return: handle to struct ol_txrx_ocb_chan_info
2939*5113495bSYour Name  */
2940*5113495bSYour Name static struct ol_txrx_ocb_chan_info *
ol_txrx_get_ocb_chan_info(struct cdp_soc_t * soc_hdl,uint8_t vdev_id)2941*5113495bSYour Name ol_txrx_get_ocb_chan_info(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
2942*5113495bSYour Name {
2943*5113495bSYour Name 	struct ol_txrx_vdev_t *vdev =
2944*5113495bSYour Name 		(struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
2945*5113495bSYour Name 
2946*5113495bSYour Name 	if (qdf_unlikely(!vdev)) {
2947*5113495bSYour Name 		ol_txrx_err("pdev is NULL");
2948*5113495bSYour Name 		return NULL;
2949*5113495bSYour Name 	}
2950*5113495bSYour Name 
2951*5113495bSYour Name 	return vdev->ocb_channel_info;
2952*5113495bSYour Name }
2953*5113495bSYour Name #endif
2954*5113495bSYour Name 
ol_txrx_peer_state_update(struct cdp_soc_t * soc_hdl,uint8_t * peer_mac,enum ol_txrx_peer_state state)2955*5113495bSYour Name QDF_STATUS ol_txrx_peer_state_update(struct cdp_soc_t *soc_hdl,
2956*5113495bSYour Name 				     uint8_t *peer_mac,
2957*5113495bSYour Name 				     enum ol_txrx_peer_state state)
2958*5113495bSYour Name {
2959*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
2960*5113495bSYour Name 	ol_txrx_pdev_handle pdev =
2961*5113495bSYour Name 		ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
2962*5113495bSYour Name 	struct ol_txrx_peer_t *peer;
2963*5113495bSYour Name 	int    peer_ref_cnt;
2964*5113495bSYour Name 
2965*5113495bSYour Name 	if (qdf_unlikely(!pdev)) {
2966*5113495bSYour Name 		ol_txrx_err("Pdev is NULL");
2967*5113495bSYour Name 		qdf_assert(0);
2968*5113495bSYour Name 		return QDF_STATUS_E_INVAL;
2969*5113495bSYour Name 	}
2970*5113495bSYour Name 
2971*5113495bSYour Name 	peer =  ol_txrx_peer_find_hash_find_get_ref(pdev, peer_mac, 0, 1,
2972*5113495bSYour Name 						    PEER_DEBUG_ID_OL_INTERNAL);
2973*5113495bSYour Name 	if (!peer) {
2974*5113495bSYour Name 		ol_txrx_err(
2975*5113495bSYour Name 			   "peer is null for peer_mac 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x",
2976*5113495bSYour Name 			   peer_mac[0], peer_mac[1], peer_mac[2], peer_mac[3],
2977*5113495bSYour Name 			   peer_mac[4], peer_mac[5]);
2978*5113495bSYour Name 		return QDF_STATUS_E_INVAL;
2979*5113495bSYour Name 	}
2980*5113495bSYour Name 
2981*5113495bSYour Name 	/* TODO: Should we send WMI command of the connection state? */
2982*5113495bSYour Name 	/* avoid multiple auth state change. */
2983*5113495bSYour Name 	if (peer->state == state) {
2984*5113495bSYour Name #ifdef TXRX_PRINT_VERBOSE_ENABLE
2985*5113495bSYour Name 		ol_txrx_dbg("no state change, returns directly");
2986*5113495bSYour Name #endif
2987*5113495bSYour Name 		peer_ref_cnt = ol_txrx_peer_release_ref
2988*5113495bSYour Name 						(peer,
2989*5113495bSYour Name 						 PEER_DEBUG_ID_OL_INTERNAL);
2990*5113495bSYour Name 		return QDF_STATUS_SUCCESS;
2991*5113495bSYour Name 	}
2992*5113495bSYour Name 
2993*5113495bSYour Name 	ol_txrx_dbg("change from %d to %d",
2994*5113495bSYour Name 		    peer->state, state);
2995*5113495bSYour Name 
2996*5113495bSYour Name 	peer->tx_filter = (state == OL_TXRX_PEER_STATE_AUTH)
2997*5113495bSYour Name 		? ol_tx_filter_pass_thru
2998*5113495bSYour Name 		: ((state == OL_TXRX_PEER_STATE_CONN)
2999*5113495bSYour Name 		   ? ol_tx_filter_non_auth
3000*5113495bSYour Name 		   : ol_tx_filter_discard);
3001*5113495bSYour Name 
3002*5113495bSYour Name 	if (peer->vdev->pdev->cfg.host_addba) {
3003*5113495bSYour Name 		if (state == OL_TXRX_PEER_STATE_AUTH) {
3004*5113495bSYour Name 			int tid;
3005*5113495bSYour Name 			/*
3006*5113495bSYour Name 			 * Pause all regular (non-extended) TID tx queues until
3007*5113495bSYour Name 			 * data arrives and ADDBA negotiation has completed.
3008*5113495bSYour Name 			 */
3009*5113495bSYour Name 			ol_txrx_dbg("pause peer and unpause mgmt/non-qos");
3010*5113495bSYour Name 			ol_txrx_peer_pause(peer); /* pause all tx queues */
3011*5113495bSYour Name 			/* unpause mgmt and non-QoS tx queues */
3012*5113495bSYour Name 			for (tid = OL_TX_NUM_QOS_TIDS;
3013*5113495bSYour Name 			     tid < OL_TX_NUM_TIDS; tid++)
3014*5113495bSYour Name 				ol_txrx_peer_tid_unpause(peer, tid);
3015*5113495bSYour Name 		}
3016*5113495bSYour Name 	}
3017*5113495bSYour Name 	peer_ref_cnt = ol_txrx_peer_release_ref(peer,
3018*5113495bSYour Name 						PEER_DEBUG_ID_OL_INTERNAL);
3019*5113495bSYour Name 	/*
3020*5113495bSYour Name 	 * after ol_txrx_peer_release_ref, peer object cannot be accessed
3021*5113495bSYour Name 	 * if the return code was 0
3022*5113495bSYour Name 	 */
3023*5113495bSYour Name 	if (peer_ref_cnt > 0)
3024*5113495bSYour Name 		/*
3025*5113495bSYour Name 		 * Set the state after the Pause to avoid the race condiction
3026*5113495bSYour Name 		 * with ADDBA check in tx path
3027*5113495bSYour Name 		 */
3028*5113495bSYour Name 		peer->state = state;
3029*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
3030*5113495bSYour Name }
3031*5113495bSYour Name 
3032*5113495bSYour Name void
ol_txrx_peer_keyinstalled_state_update(struct ol_txrx_peer_t * peer,uint8_t val)3033*5113495bSYour Name ol_txrx_peer_keyinstalled_state_update(struct ol_txrx_peer_t *peer, uint8_t val)
3034*5113495bSYour Name {
3035*5113495bSYour Name 	peer->keyinstalled = val;
3036*5113495bSYour Name }
3037*5113495bSYour Name 
3038*5113495bSYour Name void
ol_txrx_peer_update(ol_txrx_vdev_handle vdev,uint8_t * peer_mac,union ol_txrx_peer_update_param_t * param,enum ol_txrx_peer_update_select_t select)3039*5113495bSYour Name ol_txrx_peer_update(ol_txrx_vdev_handle vdev,
3040*5113495bSYour Name 		    uint8_t *peer_mac,
3041*5113495bSYour Name 		    union ol_txrx_peer_update_param_t *param,
3042*5113495bSYour Name 		    enum ol_txrx_peer_update_select_t select)
3043*5113495bSYour Name {
3044*5113495bSYour Name 	struct ol_txrx_peer_t *peer;
3045*5113495bSYour Name 
3046*5113495bSYour Name 	peer = ol_txrx_peer_find_hash_find_get_ref(vdev->pdev, peer_mac, 0, 1,
3047*5113495bSYour Name 						   PEER_DEBUG_ID_OL_INTERNAL);
3048*5113495bSYour Name 	if (!peer) {
3049*5113495bSYour Name 		ol_txrx_dbg("peer is null");
3050*5113495bSYour Name 		return;
3051*5113495bSYour Name 	}
3052*5113495bSYour Name 
3053*5113495bSYour Name 	switch (select) {
3054*5113495bSYour Name 	case ol_txrx_peer_update_qos_capable:
3055*5113495bSYour Name 	{
3056*5113495bSYour Name 		/* save qos_capable here txrx peer,
3057*5113495bSYour Name 		 * when HTT_ISOC_T2H_MSG_TYPE_PEER_INFO comes then save.
3058*5113495bSYour Name 		 */
3059*5113495bSYour Name 		peer->qos_capable = param->qos_capable;
3060*5113495bSYour Name 		/*
3061*5113495bSYour Name 		 * The following function call assumes that the peer has a
3062*5113495bSYour Name 		 * single ID. This is currently true, and
3063*5113495bSYour Name 		 * is expected to remain true.
3064*5113495bSYour Name 		 */
3065*5113495bSYour Name 		htt_peer_qos_update(peer->vdev->pdev->htt_pdev,
3066*5113495bSYour Name 				    peer->peer_ids[0],
3067*5113495bSYour Name 				    peer->qos_capable);
3068*5113495bSYour Name 		break;
3069*5113495bSYour Name 	}
3070*5113495bSYour Name 	case ol_txrx_peer_update_uapsdMask:
3071*5113495bSYour Name 	{
3072*5113495bSYour Name 		peer->uapsd_mask = param->uapsd_mask;
3073*5113495bSYour Name 		htt_peer_uapsdmask_update(peer->vdev->pdev->htt_pdev,
3074*5113495bSYour Name 					  peer->peer_ids[0],
3075*5113495bSYour Name 					  peer->uapsd_mask);
3076*5113495bSYour Name 		break;
3077*5113495bSYour Name 	}
3078*5113495bSYour Name 	case ol_txrx_peer_update_peer_security:
3079*5113495bSYour Name 	{
3080*5113495bSYour Name 		enum ol_sec_type sec_type = param->sec_type;
3081*5113495bSYour Name 		enum htt_sec_type peer_sec_type = htt_sec_type_none;
3082*5113495bSYour Name 
3083*5113495bSYour Name 		switch (sec_type) {
3084*5113495bSYour Name 		case ol_sec_type_none:
3085*5113495bSYour Name 			peer_sec_type = htt_sec_type_none;
3086*5113495bSYour Name 			break;
3087*5113495bSYour Name 		case ol_sec_type_wep128:
3088*5113495bSYour Name 			peer_sec_type = htt_sec_type_wep128;
3089*5113495bSYour Name 			break;
3090*5113495bSYour Name 		case ol_sec_type_wep104:
3091*5113495bSYour Name 			peer_sec_type = htt_sec_type_wep104;
3092*5113495bSYour Name 			break;
3093*5113495bSYour Name 		case ol_sec_type_wep40:
3094*5113495bSYour Name 			peer_sec_type = htt_sec_type_wep40;
3095*5113495bSYour Name 			break;
3096*5113495bSYour Name 		case ol_sec_type_tkip:
3097*5113495bSYour Name 			peer_sec_type = htt_sec_type_tkip;
3098*5113495bSYour Name 			break;
3099*5113495bSYour Name 		case ol_sec_type_tkip_nomic:
3100*5113495bSYour Name 			peer_sec_type = htt_sec_type_tkip_nomic;
3101*5113495bSYour Name 			break;
3102*5113495bSYour Name 		case ol_sec_type_aes_ccmp:
3103*5113495bSYour Name 			peer_sec_type = htt_sec_type_aes_ccmp;
3104*5113495bSYour Name 			break;
3105*5113495bSYour Name 		case ol_sec_type_wapi:
3106*5113495bSYour Name 			peer_sec_type = htt_sec_type_wapi;
3107*5113495bSYour Name 			break;
3108*5113495bSYour Name 		default:
3109*5113495bSYour Name 			peer_sec_type = htt_sec_type_none;
3110*5113495bSYour Name 			break;
3111*5113495bSYour Name 		}
3112*5113495bSYour Name 
3113*5113495bSYour Name 		peer->security[txrx_sec_ucast].sec_type =
3114*5113495bSYour Name 			peer->security[txrx_sec_mcast].sec_type =
3115*5113495bSYour Name 				peer_sec_type;
3116*5113495bSYour Name 
3117*5113495bSYour Name 		break;
3118*5113495bSYour Name 	}
3119*5113495bSYour Name 	default:
3120*5113495bSYour Name 	{
3121*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3122*5113495bSYour Name 			  "ERROR: unknown param %d in %s", select,
3123*5113495bSYour Name 			  __func__);
3124*5113495bSYour Name 		break;
3125*5113495bSYour Name 	}
3126*5113495bSYour Name 	} /* switch */
3127*5113495bSYour Name 	ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
3128*5113495bSYour Name }
3129*5113495bSYour Name 
3130*5113495bSYour Name uint8_t
ol_txrx_peer_uapsdmask_get(struct ol_txrx_pdev_t * txrx_pdev,uint16_t peer_id)3131*5113495bSYour Name ol_txrx_peer_uapsdmask_get(struct ol_txrx_pdev_t *txrx_pdev, uint16_t peer_id)
3132*5113495bSYour Name {
3133*5113495bSYour Name 
3134*5113495bSYour Name 	struct ol_txrx_peer_t *peer;
3135*5113495bSYour Name 
3136*5113495bSYour Name 	peer = ol_txrx_peer_find_by_id(txrx_pdev, peer_id);
3137*5113495bSYour Name 	if (peer)
3138*5113495bSYour Name 		return peer->uapsd_mask;
3139*5113495bSYour Name 	return 0;
3140*5113495bSYour Name }
3141*5113495bSYour Name 
3142*5113495bSYour Name uint8_t
ol_txrx_peer_qoscapable_get(struct ol_txrx_pdev_t * txrx_pdev,uint16_t peer_id)3143*5113495bSYour Name ol_txrx_peer_qoscapable_get(struct ol_txrx_pdev_t *txrx_pdev, uint16_t peer_id)
3144*5113495bSYour Name {
3145*5113495bSYour Name 
3146*5113495bSYour Name 	struct ol_txrx_peer_t *peer_t =
3147*5113495bSYour Name 		ol_txrx_peer_find_by_id(txrx_pdev, peer_id);
3148*5113495bSYour Name 	if (peer_t)
3149*5113495bSYour Name 		return peer_t->qos_capable;
3150*5113495bSYour Name 	return 0;
3151*5113495bSYour Name }
3152*5113495bSYour Name 
3153*5113495bSYour Name /**
3154*5113495bSYour Name  * ol_txrx_peer_free_tids() - free tids for the peer
3155*5113495bSYour Name  * @peer: peer handle
3156*5113495bSYour Name  *
3157*5113495bSYour Name  * Return: None
3158*5113495bSYour Name  */
ol_txrx_peer_free_tids(ol_txrx_peer_handle peer)3159*5113495bSYour Name static inline void ol_txrx_peer_free_tids(ol_txrx_peer_handle peer)
3160*5113495bSYour Name {
3161*5113495bSYour Name 	int i = 0;
3162*5113495bSYour Name 	/*
3163*5113495bSYour Name 	 * 'array' is allocated in addba handler and is supposed to be
3164*5113495bSYour Name 	 * freed in delba handler. There is the case (for example, in
3165*5113495bSYour Name 	 * SSR) where delba handler is not called. Because array points
3166*5113495bSYour Name 	 * to address of 'base' by default and is reallocated in addba
3167*5113495bSYour Name 	 * handler later, only free the memory when the array does not
3168*5113495bSYour Name 	 * point to base.
3169*5113495bSYour Name 	 */
3170*5113495bSYour Name 	for (i = 0; i < OL_TXRX_NUM_EXT_TIDS; i++) {
3171*5113495bSYour Name 		if (peer->tids_rx_reorder[i].array !=
3172*5113495bSYour Name 		    &peer->tids_rx_reorder[i].base) {
3173*5113495bSYour Name 			ol_txrx_dbg("delete reorder arr, tid:%d", i);
3174*5113495bSYour Name 			qdf_mem_free(peer->tids_rx_reorder[i].array);
3175*5113495bSYour Name 			ol_rx_reorder_init(&peer->tids_rx_reorder[i],
3176*5113495bSYour Name 					   (uint8_t)i);
3177*5113495bSYour Name 		}
3178*5113495bSYour Name 	}
3179*5113495bSYour Name }
3180*5113495bSYour Name 
3181*5113495bSYour Name /**
3182*5113495bSYour Name  * ol_txrx_peer_drop_pending_frames() - drop pending frames in the RX queue
3183*5113495bSYour Name  * @peer: peer handle
3184*5113495bSYour Name  *
3185*5113495bSYour Name  * Drop pending packets pertaining to the peer from the RX thread queue.
3186*5113495bSYour Name  *
3187*5113495bSYour Name  * Return: None
3188*5113495bSYour Name  */
ol_txrx_peer_drop_pending_frames(struct ol_txrx_peer_t * peer)3189*5113495bSYour Name static void ol_txrx_peer_drop_pending_frames(struct ol_txrx_peer_t *peer)
3190*5113495bSYour Name {
3191*5113495bSYour Name 	p_cds_sched_context sched_ctx = get_cds_sched_ctxt();
3192*5113495bSYour Name 
3193*5113495bSYour Name 	if (sched_ctx)
3194*5113495bSYour Name 		cds_drop_rxpkt_by_staid(sched_ctx, peer->local_id);
3195*5113495bSYour Name }
3196*5113495bSYour Name 
3197*5113495bSYour Name /**
3198*5113495bSYour Name  * ol_txrx_peer_release_ref() - release peer reference
3199*5113495bSYour Name  * @peer: peer handle
3200*5113495bSYour Name  *
3201*5113495bSYour Name  * Release peer reference and delete peer if refcount is 0
3202*5113495bSYour Name  *
3203*5113495bSYour Name  * Return: Resulting peer ref_cnt after this function is invoked
3204*5113495bSYour Name  */
ol_txrx_peer_release_ref(ol_txrx_peer_handle peer,enum peer_debug_id_type debug_id)3205*5113495bSYour Name int ol_txrx_peer_release_ref(ol_txrx_peer_handle peer,
3206*5113495bSYour Name 			     enum peer_debug_id_type debug_id)
3207*5113495bSYour Name {
3208*5113495bSYour Name 	int    rc;
3209*5113495bSYour Name 	struct ol_txrx_vdev_t *vdev;
3210*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev;
3211*5113495bSYour Name 	bool ref_silent = true;
3212*5113495bSYour Name 	int access_list = 0;
3213*5113495bSYour Name 	uint32_t err_code = 0;
3214*5113495bSYour Name 	int del_rc;
3215*5113495bSYour Name 
3216*5113495bSYour Name 	/* preconditions */
3217*5113495bSYour Name 	TXRX_ASSERT2(peer);
3218*5113495bSYour Name 
3219*5113495bSYour Name 	vdev = peer->vdev;
3220*5113495bSYour Name 	if (!vdev) {
3221*5113495bSYour Name 		ol_txrx_err("The vdev is not present anymore");
3222*5113495bSYour Name 		return -EINVAL;
3223*5113495bSYour Name 	}
3224*5113495bSYour Name 
3225*5113495bSYour Name 	pdev = vdev->pdev;
3226*5113495bSYour Name 	if (!pdev) {
3227*5113495bSYour Name 		ol_txrx_err("The pdev is not present anymore");
3228*5113495bSYour Name 		err_code = 0xbad2;
3229*5113495bSYour Name 		goto ERR_STATE;
3230*5113495bSYour Name 	}
3231*5113495bSYour Name 
3232*5113495bSYour Name 	if (debug_id >= PEER_DEBUG_ID_MAX || debug_id < 0) {
3233*5113495bSYour Name 		ol_txrx_err("incorrect debug_id %d ", debug_id);
3234*5113495bSYour Name 		err_code = 0xbad3;
3235*5113495bSYour Name 		goto ERR_STATE;
3236*5113495bSYour Name 	}
3237*5113495bSYour Name 
3238*5113495bSYour Name 	if (debug_id == PEER_DEBUG_ID_OL_RX_THREAD)
3239*5113495bSYour Name 		ref_silent = true;
3240*5113495bSYour Name 
3241*5113495bSYour Name 	if (!ref_silent)
3242*5113495bSYour Name 		wlan_roam_debug_log(vdev->vdev_id, DEBUG_PEER_UNREF_DELETE,
3243*5113495bSYour Name 				    DEBUG_INVALID_PEER_ID, &peer->mac_addr.raw,
3244*5113495bSYour Name 				    peer, 0xdead,
3245*5113495bSYour Name 				    qdf_atomic_read(&peer->ref_cnt));
3246*5113495bSYour Name 
3247*5113495bSYour Name 
3248*5113495bSYour Name 	/*
3249*5113495bSYour Name 	 * Hold the lock all the way from checking if the peer ref count
3250*5113495bSYour Name 	 * is zero until the peer references are removed from the hash
3251*5113495bSYour Name 	 * table and vdev list (if the peer ref count is zero).
3252*5113495bSYour Name 	 * This protects against a new HL tx operation starting to use the
3253*5113495bSYour Name 	 * peer object just after this function concludes it's done being used.
3254*5113495bSYour Name 	 * Furthermore, the lock needs to be held while checking whether the
3255*5113495bSYour Name 	 * vdev's list of peers is empty, to make sure that list is not modified
3256*5113495bSYour Name 	 * concurrently with the empty check.
3257*5113495bSYour Name 	 */
3258*5113495bSYour Name 	qdf_spin_lock_bh(&pdev->peer_ref_mutex);
3259*5113495bSYour Name 
3260*5113495bSYour Name 	/*
3261*5113495bSYour Name 	 * Check for the reference count before deleting the peer
3262*5113495bSYour Name 	 * as we noticed that sometimes we are re-entering this
3263*5113495bSYour Name 	 * function again which is leading to dead-lock.
3264*5113495bSYour Name 	 * (A double-free should never happen, so assert if it does.)
3265*5113495bSYour Name 	 */
3266*5113495bSYour Name 	rc = qdf_atomic_read(&(peer->ref_cnt));
3267*5113495bSYour Name 
3268*5113495bSYour Name 	if (rc == 0) {
3269*5113495bSYour Name 		qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
3270*5113495bSYour Name 		ol_txrx_err("The Peer is not present anymore");
3271*5113495bSYour Name 		qdf_assert(0);
3272*5113495bSYour Name 		return -EACCES;
3273*5113495bSYour Name 	}
3274*5113495bSYour Name 	/*
3275*5113495bSYour Name 	 * now decrement rc; this will be the return code.
3276*5113495bSYour Name 	 * 0 : peer deleted
3277*5113495bSYour Name 	 * >0: peer ref removed, but still has other references
3278*5113495bSYour Name 	 * <0: sanity failed - no changes to the state of the peer
3279*5113495bSYour Name 	 */
3280*5113495bSYour Name 	rc--;
3281*5113495bSYour Name 
3282*5113495bSYour Name 	if (!qdf_atomic_read(&peer->access_list[debug_id])) {
3283*5113495bSYour Name 		qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
3284*5113495bSYour Name 		ol_txrx_err("peer %pK ref was not taken by %d",
3285*5113495bSYour Name 			    peer, debug_id);
3286*5113495bSYour Name 		ol_txrx_dump_peer_access_list(peer);
3287*5113495bSYour Name 		QDF_BUG(0);
3288*5113495bSYour Name 		return -EACCES;
3289*5113495bSYour Name 	}
3290*5113495bSYour Name 	qdf_atomic_dec(&peer->access_list[debug_id]);
3291*5113495bSYour Name 
3292*5113495bSYour Name 	if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
3293*5113495bSYour Name 		u16 peer_id;
3294*5113495bSYour Name 		wlan_roam_debug_log(vdev->vdev_id,
3295*5113495bSYour Name 				    DEBUG_DELETING_PEER_OBJ,
3296*5113495bSYour Name 				    DEBUG_INVALID_PEER_ID,
3297*5113495bSYour Name 				    &peer->mac_addr.raw, peer, 0,
3298*5113495bSYour Name 				    qdf_atomic_read(&peer->ref_cnt));
3299*5113495bSYour Name 		peer_id = peer->local_id;
3300*5113495bSYour Name 
3301*5113495bSYour Name 		/* Drop all pending frames in the rx thread queue */
3302*5113495bSYour Name 		ol_txrx_peer_drop_pending_frames(peer);
3303*5113495bSYour Name 
3304*5113495bSYour Name 		/* remove the reference to the peer from the hash table */
3305*5113495bSYour Name 		ol_txrx_peer_find_hash_remove(pdev, peer);
3306*5113495bSYour Name 
3307*5113495bSYour Name 		/* remove the peer from its parent vdev's list */
3308*5113495bSYour Name 		TAILQ_REMOVE(&peer->vdev->peer_list, peer, peer_list_elem);
3309*5113495bSYour Name 
3310*5113495bSYour Name 		/* cleanup the Rx reorder queues for this peer */
3311*5113495bSYour Name 		ol_rx_peer_cleanup(vdev, peer);
3312*5113495bSYour Name 
3313*5113495bSYour Name 		qdf_spinlock_destroy(&peer->peer_info_lock);
3314*5113495bSYour Name 		qdf_spinlock_destroy(&peer->bufq_info.bufq_lock);
3315*5113495bSYour Name 
3316*5113495bSYour Name 		/* peer is removed from peer_list */
3317*5113495bSYour Name 		qdf_atomic_set(&peer->delete_in_progress, 0);
3318*5113495bSYour Name 
3319*5113495bSYour Name 		/*
3320*5113495bSYour Name 		 * Set wait_delete_comp event if the current peer id matches
3321*5113495bSYour Name 		 * with registered peer id.
3322*5113495bSYour Name 		 */
3323*5113495bSYour Name 		if (peer_id == vdev->wait_on_peer_id) {
3324*5113495bSYour Name 			qdf_event_set(&vdev->wait_delete_comp);
3325*5113495bSYour Name 			vdev->wait_on_peer_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
3326*5113495bSYour Name 		}
3327*5113495bSYour Name 
3328*5113495bSYour Name 		qdf_timer_sync_cancel(&peer->peer_unmap_timer);
3329*5113495bSYour Name 		qdf_timer_free(&peer->peer_unmap_timer);
3330*5113495bSYour Name 
3331*5113495bSYour Name 		/* check whether the parent vdev has no peers left */
3332*5113495bSYour Name 		if (TAILQ_EMPTY(&vdev->peer_list)) {
3333*5113495bSYour Name 			/*
3334*5113495bSYour Name 			 * Check if the parent vdev was waiting for its peers
3335*5113495bSYour Name 			 * to be deleted, in order for it to be deleted too.
3336*5113495bSYour Name 			 */
3337*5113495bSYour Name 			if (vdev->delete.pending) {
3338*5113495bSYour Name 				ol_txrx_vdev_delete_cb vdev_delete_cb =
3339*5113495bSYour Name 					vdev->delete.callback;
3340*5113495bSYour Name 				void *vdev_delete_context =
3341*5113495bSYour Name 					vdev->delete.context;
3342*5113495bSYour Name 				ol_txrx_vdev_delete_cb vdev_del_notify =
3343*5113495bSYour Name 						vdev->vdev_del_notify;
3344*5113495bSYour Name 				void *vdev_del_context = vdev->osif_dev;
3345*5113495bSYour Name 				/*
3346*5113495bSYour Name 				 * Now that there are no references to the peer,
3347*5113495bSYour Name 				 * we can release the peer reference lock.
3348*5113495bSYour Name 				 */
3349*5113495bSYour Name 				qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
3350*5113495bSYour Name 
3351*5113495bSYour Name 				/*
3352*5113495bSYour Name 				 * The ol_tx_desc_free might access the invalid
3353*5113495bSYour Name 				 * content of vdev referred by tx desc, since
3354*5113495bSYour Name 				 * this vdev might be detached in another thread
3355*5113495bSYour Name 				 * asynchronous.
3356*5113495bSYour Name 				 *
3357*5113495bSYour Name 				 * Go through tx desc pool to set corresponding
3358*5113495bSYour Name 				 * tx desc's vdev to NULL when detach this vdev,
3359*5113495bSYour Name 				 * and add vdev checking in the ol_tx_desc_free
3360*5113495bSYour Name 				 * to avoid crash.
3361*5113495bSYour Name 				 */
3362*5113495bSYour Name 				ol_txrx_tx_desc_reset_vdev(vdev);
3363*5113495bSYour Name 				ol_txrx_dbg(
3364*5113495bSYour Name 					"deleting vdev object %pK ("QDF_MAC_ADDR_FMT") - its last peer is done",
3365*5113495bSYour Name 					vdev,
3366*5113495bSYour Name 					QDF_MAC_ADDR_REF(vdev->mac_addr.raw));
3367*5113495bSYour Name 				/* all peers are gone, go ahead and delete it */
3368*5113495bSYour Name 				qdf_mem_free(vdev);
3369*5113495bSYour Name 				if (vdev_delete_cb)
3370*5113495bSYour Name 					vdev_delete_cb(vdev_delete_context);
3371*5113495bSYour Name 
3372*5113495bSYour Name 				if (vdev_del_notify)
3373*5113495bSYour Name 					vdev_del_notify(vdev_del_context);
3374*5113495bSYour Name 			} else {
3375*5113495bSYour Name 				qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
3376*5113495bSYour Name 			}
3377*5113495bSYour Name 		} else {
3378*5113495bSYour Name 			qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
3379*5113495bSYour Name 		}
3380*5113495bSYour Name 
3381*5113495bSYour Name 		del_rc = qdf_atomic_read(&peer->del_ref_cnt);
3382*5113495bSYour Name 
3383*5113495bSYour Name 		ol_txrx_info_high("[%d][%d]: Deleting peer %pK ref_cnt -> %d del_ref_cnt -> %d %s",
3384*5113495bSYour Name 				  debug_id,
3385*5113495bSYour Name 				  qdf_atomic_read(&peer->access_list[debug_id]),
3386*5113495bSYour Name 				  peer, rc, del_rc,
3387*5113495bSYour Name 				  qdf_atomic_read(&peer->fw_create_pending) ==
3388*5113495bSYour Name 				  1 ? "(No Maps received)" : "");
3389*5113495bSYour Name 
3390*5113495bSYour Name 		ol_txrx_peer_tx_queue_free(pdev, peer);
3391*5113495bSYour Name 
3392*5113495bSYour Name 		/* Remove mappings from peer_id to peer object */
3393*5113495bSYour Name 		ol_txrx_peer_clear_map_peer(pdev, peer);
3394*5113495bSYour Name 
3395*5113495bSYour Name 		/* Remove peer pointer from local peer ID map */
3396*5113495bSYour Name 		ol_txrx_local_peer_id_free(pdev, peer);
3397*5113495bSYour Name 
3398*5113495bSYour Name 		ol_txrx_peer_free_tids(peer);
3399*5113495bSYour Name 
3400*5113495bSYour Name 		ol_txrx_dump_peer_access_list(peer);
3401*5113495bSYour Name 
3402*5113495bSYour Name 		if (QDF_GLOBAL_MONITOR_MODE == cds_get_conparam() &&
3403*5113495bSYour Name 		    pdev->self_peer == peer)
3404*5113495bSYour Name 			pdev->self_peer = NULL;
3405*5113495bSYour Name 
3406*5113495bSYour Name 		if (!del_rc)
3407*5113495bSYour Name 			qdf_mem_free(peer);
3408*5113495bSYour Name 	} else {
3409*5113495bSYour Name 		access_list = qdf_atomic_read(&peer->access_list[debug_id]);
3410*5113495bSYour Name 		qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
3411*5113495bSYour Name 		if (!ref_silent)
3412*5113495bSYour Name 			ol_txrx_info_high("[%d][%d]: ref delete peer %pK ref_cnt -> %d",
3413*5113495bSYour Name 					  debug_id, access_list, peer, rc);
3414*5113495bSYour Name 	}
3415*5113495bSYour Name 	return rc;
3416*5113495bSYour Name ERR_STATE:
3417*5113495bSYour Name 	wlan_roam_debug_log(vdev->vdev_id, DEBUG_PEER_UNREF_DELETE,
3418*5113495bSYour Name 			    DEBUG_INVALID_PEER_ID, &peer->mac_addr.raw,
3419*5113495bSYour Name 			    peer, err_code, qdf_atomic_read(&peer->ref_cnt));
3420*5113495bSYour Name 	return -EINVAL;
3421*5113495bSYour Name }
3422*5113495bSYour Name 
3423*5113495bSYour Name /**
3424*5113495bSYour Name  * ol_txrx_clear_peer_internal() - ol internal function to clear peer
3425*5113495bSYour Name  * @peer: pointer to ol txrx peer structure
3426*5113495bSYour Name  *
3427*5113495bSYour Name  * Return: QDF Status
3428*5113495bSYour Name  */
3429*5113495bSYour Name static QDF_STATUS
ol_txrx_clear_peer_internal(struct ol_txrx_peer_t * peer)3430*5113495bSYour Name ol_txrx_clear_peer_internal(struct ol_txrx_peer_t *peer)
3431*5113495bSYour Name {
3432*5113495bSYour Name 	p_cds_sched_context sched_ctx = get_cds_sched_ctxt();
3433*5113495bSYour Name 	/* Drop pending Rx frames in CDS */
3434*5113495bSYour Name 	if (sched_ctx)
3435*5113495bSYour Name 		cds_drop_rxpkt_by_staid(sched_ctx, peer->local_id);
3436*5113495bSYour Name 
3437*5113495bSYour Name 	/* Purge the cached rx frame queue */
3438*5113495bSYour Name 	ol_txrx_flush_rx_frames(peer, 1);
3439*5113495bSYour Name 
3440*5113495bSYour Name 	qdf_spin_lock_bh(&peer->peer_info_lock);
3441*5113495bSYour Name 	peer->state = OL_TXRX_PEER_STATE_DISC;
3442*5113495bSYour Name 	qdf_spin_unlock_bh(&peer->peer_info_lock);
3443*5113495bSYour Name 
3444*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
3445*5113495bSYour Name }
3446*5113495bSYour Name 
3447*5113495bSYour Name /**
3448*5113495bSYour Name  * ol_txrx_clear_peer() - clear peer
3449*5113495bSYour Name  * peer_addr: peer mac address
3450*5113495bSYour Name  *
3451*5113495bSYour Name  * Return: QDF Status
3452*5113495bSYour Name  */
3453*5113495bSYour Name static QDF_STATUS
ol_txrx_clear_peer(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,struct qdf_mac_addr peer_addr)3454*5113495bSYour Name ol_txrx_clear_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
3455*5113495bSYour Name 		   struct qdf_mac_addr peer_addr)
3456*5113495bSYour Name {
3457*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
3458*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev =
3459*5113495bSYour Name 			ol_txrx_get_pdev_from_pdev_id(soc, pdev_id);
3460*5113495bSYour Name 	struct ol_txrx_peer_t *peer;
3461*5113495bSYour Name 	QDF_STATUS status;
3462*5113495bSYour Name 
3463*5113495bSYour Name 	if (!pdev) {
3464*5113495bSYour Name 		ol_txrx_err("Unable to find pdev!");
3465*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
3466*5113495bSYour Name 	}
3467*5113495bSYour Name 
3468*5113495bSYour Name 	peer = ol_txrx_peer_get_ref_by_addr(pdev, peer_addr.bytes,
3469*5113495bSYour Name 					    PEER_DEBUG_ID_OL_INTERNAL);
3470*5113495bSYour Name 
3471*5113495bSYour Name 	/* Return success, if the peer is already cleared by
3472*5113495bSYour Name 	 * data path via peer detach function.
3473*5113495bSYour Name 	 */
3474*5113495bSYour Name 	if (!peer)
3475*5113495bSYour Name 		return QDF_STATUS_SUCCESS;
3476*5113495bSYour Name 
3477*5113495bSYour Name 	ol_txrx_dbg("Clear peer rx frames: " QDF_MAC_ADDR_FMT,
3478*5113495bSYour Name 		    QDF_MAC_ADDR_REF(peer->mac_addr.raw));
3479*5113495bSYour Name 	ol_txrx_clear_peer_internal(peer);
3480*5113495bSYour Name 	status = ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
3481*5113495bSYour Name 
3482*5113495bSYour Name 	return status;
3483*5113495bSYour Name }
3484*5113495bSYour Name 
3485*5113495bSYour Name /**
3486*5113495bSYour Name  * peer_unmap_timer_handler() - peer unmap timer function
3487*5113495bSYour Name  * @data: peer object pointer
3488*5113495bSYour Name  *
3489*5113495bSYour Name  * Return: none
3490*5113495bSYour Name  */
peer_unmap_timer_handler(void * data)3491*5113495bSYour Name void peer_unmap_timer_handler(void *data)
3492*5113495bSYour Name {
3493*5113495bSYour Name 	ol_txrx_peer_handle peer = (ol_txrx_peer_handle)data;
3494*5113495bSYour Name 
3495*5113495bSYour Name 	if (!peer)
3496*5113495bSYour Name 		return;
3497*5113495bSYour Name 
3498*5113495bSYour Name 	ol_txrx_err("all unmap events not received for peer %pK, ref_cnt %d",
3499*5113495bSYour Name 		    peer, qdf_atomic_read(&peer->ref_cnt));
3500*5113495bSYour Name 	ol_txrx_err("peer %pK ("QDF_MAC_ADDR_FMT")",
3501*5113495bSYour Name 		    peer,
3502*5113495bSYour Name 		    QDF_MAC_ADDR_REF(peer->mac_addr.raw));
3503*5113495bSYour Name 	ol_register_peer_recovery_notifier(peer);
3504*5113495bSYour Name 
3505*5113495bSYour Name 	cds_trigger_recovery(QDF_PEER_UNMAP_TIMEDOUT);
3506*5113495bSYour Name }
3507*5113495bSYour Name 
3508*5113495bSYour Name 
3509*5113495bSYour Name /**
3510*5113495bSYour Name  * ol_txrx_peer_detach() - Delete a peer's data object.
3511*5113495bSYour Name 
3512*5113495bSYour Name  * @soc_hdl: datapath soc handle
3513*5113495bSYour Name  * @vdev_id: virtual interface id
3514*5113495bSYour Name  * @peer_mac: peer MAC address
3515*5113495bSYour Name  * @bitmap: bitmap indicating special handling of request.
3516*5113495bSYour Name  * @peer_type: link or mld peer
3517*5113495bSYour Name  * When the host's control SW disassociates a peer, it calls
3518*5113495bSYour Name  * this function to detach and delete the peer. The reference
3519*5113495bSYour Name  * stored in the control peer object to the data peer
3520*5113495bSYour Name  * object (set up by a call to ol_peer_store()) is provided.
3521*5113495bSYour Name  *
3522*5113495bSYour Name  * Return: SUCCESS or Failure
3523*5113495bSYour Name  */
ol_txrx_peer_detach(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,uint8_t * peer_mac,uint32_t bitmap,enum cdp_peer_type peer_type)3524*5113495bSYour Name static QDF_STATUS ol_txrx_peer_detach(struct cdp_soc_t *soc_hdl,
3525*5113495bSYour Name 				      uint8_t vdev_id, uint8_t *peer_mac,
3526*5113495bSYour Name 				      uint32_t bitmap,
3527*5113495bSYour Name 				      enum cdp_peer_type peer_type)
3528*5113495bSYour Name {
3529*5113495bSYour Name 	ol_txrx_peer_handle peer;
3530*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
3531*5113495bSYour Name 	ol_txrx_vdev_handle vdev = ol_txrx_get_vdev_from_soc_vdev_id(soc,
3532*5113495bSYour Name 								     vdev_id);
3533*5113495bSYour Name 
3534*5113495bSYour Name 	if (!vdev)
3535*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
3536*5113495bSYour Name 
3537*5113495bSYour Name 	peer = ol_txrx_find_peer_by_addr((struct cdp_pdev *)vdev->pdev,
3538*5113495bSYour Name 					 peer_mac);
3539*5113495bSYour Name 	if (!peer)
3540*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
3541*5113495bSYour Name 
3542*5113495bSYour Name 	ol_txrx_info_high("peer %pK, peer->ref_cnt %d",
3543*5113495bSYour Name 			  peer, qdf_atomic_read(&peer->ref_cnt));
3544*5113495bSYour Name 
3545*5113495bSYour Name 	/* redirect peer's rx delivery function to point to a discard func */
3546*5113495bSYour Name 	peer->rx_opt_proc = ol_rx_discard;
3547*5113495bSYour Name 
3548*5113495bSYour Name 	peer->valid = 0;
3549*5113495bSYour Name 
3550*5113495bSYour Name 	/* flush all rx packets before clearing up the peer local_id */
3551*5113495bSYour Name 	ol_txrx_clear_peer_internal(peer);
3552*5113495bSYour Name 
3553*5113495bSYour Name 	/* debug print to dump rx reorder state */
3554*5113495bSYour Name 	/* htt_rx_reorder_log_print(vdev->pdev->htt_pdev); */
3555*5113495bSYour Name 
3556*5113495bSYour Name 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
3557*5113495bSYour Name 		   "%s:peer %pK ("QDF_MAC_ADDR_FMT")",
3558*5113495bSYour Name 		   __func__, peer,
3559*5113495bSYour Name 		   QDF_MAC_ADDR_REF(peer->mac_addr.raw));
3560*5113495bSYour Name 
3561*5113495bSYour Name 	qdf_spin_lock_bh(&vdev->pdev->last_real_peer_mutex);
3562*5113495bSYour Name 	if (vdev->last_real_peer == peer)
3563*5113495bSYour Name 		vdev->last_real_peer = NULL;
3564*5113495bSYour Name 	qdf_spin_unlock_bh(&vdev->pdev->last_real_peer_mutex);
3565*5113495bSYour Name 	htt_rx_reorder_log_print(peer->vdev->pdev->htt_pdev);
3566*5113495bSYour Name 
3567*5113495bSYour Name 	/*
3568*5113495bSYour Name 	 * set delete_in_progress to identify that wma
3569*5113495bSYour Name 	 * is waiting for unmap massage for this peer
3570*5113495bSYour Name 	 */
3571*5113495bSYour Name 	qdf_atomic_set(&peer->delete_in_progress, 1);
3572*5113495bSYour Name 
3573*5113495bSYour Name 	if (!(bitmap & (1 << CDP_PEER_DO_NOT_START_UNMAP_TIMER))) {
3574*5113495bSYour Name 		if (vdev->opmode == wlan_op_mode_sta) {
3575*5113495bSYour Name 			qdf_mem_copy(&peer->vdev->last_peer_mac_addr,
3576*5113495bSYour Name 				&peer->mac_addr,
3577*5113495bSYour Name 				sizeof(union ol_txrx_align_mac_addr_t));
3578*5113495bSYour Name 
3579*5113495bSYour Name 			/*
3580*5113495bSYour Name 			 * Create a timer to track unmap events when the
3581*5113495bSYour Name 			 * sta peer gets deleted.
3582*5113495bSYour Name 			 */
3583*5113495bSYour Name 			qdf_timer_start(&peer->peer_unmap_timer,
3584*5113495bSYour Name 					OL_TXRX_PEER_UNMAP_TIMEOUT);
3585*5113495bSYour Name 			ol_txrx_info_high
3586*5113495bSYour Name 				("started peer_unmap_timer for peer %pK",
3587*5113495bSYour Name 				  peer);
3588*5113495bSYour Name 		}
3589*5113495bSYour Name 	}
3590*5113495bSYour Name 
3591*5113495bSYour Name 	/*
3592*5113495bSYour Name 	 * Remove the reference added during peer_attach.
3593*5113495bSYour Name 	 * The peer will still be left allocated until the
3594*5113495bSYour Name 	 * PEER_UNMAP message arrives to remove the other
3595*5113495bSYour Name 	 * reference, added by the PEER_MAP message.
3596*5113495bSYour Name 	 */
3597*5113495bSYour Name 	peer->state = OL_TXRX_PEER_STATE_INVALID;
3598*5113495bSYour Name 	ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_PEER_ATTACH);
3599*5113495bSYour Name 
3600*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
3601*5113495bSYour Name }
3602*5113495bSYour Name 
3603*5113495bSYour Name /**
3604*5113495bSYour Name  * ol_txrx_peer_detach_force_delete() - Detach and delete a peer's data object
3605*5113495bSYour Name  * @soc_hdl - datapath soc handle
3606*5113495bSYour Name  * @vdev_id - virtual interface id
3607*5113495bSYour Name  * @peer_mac - peer mac address
3608*5113495bSYour Name  *
3609*5113495bSYour Name  * Detach a peer and force peer object to be removed. It is called during
3610*5113495bSYour Name  * roaming scenario when the firmware has already deleted a peer.
3611*5113495bSYour Name  * Remove it from the peer_id_to_object map. Peer object is actually freed
3612*5113495bSYour Name  * when last reference is deleted.
3613*5113495bSYour Name  *
3614*5113495bSYour Name  * Return: None
3615*5113495bSYour Name  */
ol_txrx_peer_detach_force_delete(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,uint8_t * peer_mac)3616*5113495bSYour Name static void ol_txrx_peer_detach_force_delete(struct cdp_soc_t *soc_hdl,
3617*5113495bSYour Name 					     uint8_t vdev_id, uint8_t *peer_mac)
3618*5113495bSYour Name {
3619*5113495bSYour Name 	struct ol_txrx_peer_t *peer;
3620*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev;
3621*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
3622*5113495bSYour Name 	ol_txrx_vdev_handle vdev = ol_txrx_get_vdev_from_soc_vdev_id(soc,
3623*5113495bSYour Name 								     vdev_id);
3624*5113495bSYour Name 
3625*5113495bSYour Name 	if (!vdev || !vdev->pdev)
3626*5113495bSYour Name 		return;
3627*5113495bSYour Name 
3628*5113495bSYour Name 	pdev = vdev->pdev;
3629*5113495bSYour Name 	peer = ol_txrx_find_peer_by_addr(ol_txrx_pdev_t_to_cdp_pdev(pdev),
3630*5113495bSYour Name 					 peer_mac);
3631*5113495bSYour Name 	if (!peer)
3632*5113495bSYour Name 		return;
3633*5113495bSYour Name 
3634*5113495bSYour Name 	/* Clear the peer_id_to_obj map entries */
3635*5113495bSYour Name 	ol_txrx_peer_remove_obj_map_entries(pdev, peer);
3636*5113495bSYour Name 	ol_txrx_peer_detach(soc_hdl, vdev_id, peer_mac,
3637*5113495bSYour Name 			    1 << CDP_PEER_DELETE_NO_SPECIAL,
3638*5113495bSYour Name 			    CDP_LINK_PEER_TYPE);
3639*5113495bSYour Name }
3640*5113495bSYour Name 
3641*5113495bSYour Name /**
3642*5113495bSYour Name  * ol_txrx_peer_detach_sync() - peer detach sync callback
3643*5113495bSYour Name  * @soc_hdl - datapath soc handle
3644*5113495bSYour Name  * @vdev_id - virtual interface id
3645*5113495bSYour Name  * @peer_mac - peer mac address
3646*5113495bSYour Name  * @peer_unmap_sync - peer unmap sync cb.
3647*5113495bSYour Name  * @bitmap - bitmap indicating special handling of request.
3648*5113495bSYour Name  *
3649*5113495bSYour Name  * Return: None
3650*5113495bSYour Name  */
ol_txrx_peer_detach_sync(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,uint8_t * peer_mac,ol_txrx_peer_unmap_sync_cb peer_unmap_sync,uint32_t bitmap)3651*5113495bSYour Name static void ol_txrx_peer_detach_sync(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3652*5113495bSYour Name 				     uint8_t *peer_mac,
3653*5113495bSYour Name 				     ol_txrx_peer_unmap_sync_cb peer_unmap_sync,
3654*5113495bSYour Name 				     uint32_t bitmap)
3655*5113495bSYour Name {
3656*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev;
3657*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
3658*5113495bSYour Name 	ol_txrx_vdev_handle vdev = ol_txrx_get_vdev_from_soc_vdev_id(soc,
3659*5113495bSYour Name 								     vdev_id);
3660*5113495bSYour Name 
3661*5113495bSYour Name 	if (!vdev || !vdev->pdev)
3662*5113495bSYour Name 		return;
3663*5113495bSYour Name 
3664*5113495bSYour Name 	pdev = vdev->pdev;
3665*5113495bSYour Name 	if (!pdev->peer_unmap_sync_cb)
3666*5113495bSYour Name 		pdev->peer_unmap_sync_cb = peer_unmap_sync;
3667*5113495bSYour Name 
3668*5113495bSYour Name 	ol_txrx_peer_detach(soc_hdl, vdev_id, peer_mac, bitmap,
3669*5113495bSYour Name 			    CDP_LINK_PEER_TYPE);
3670*5113495bSYour Name }
3671*5113495bSYour Name 
3672*5113495bSYour Name /**
3673*5113495bSYour Name  * ol_txrx_peer_unmap_sync_cb_set() - set peer unmap sync callback
3674*5113495bSYour Name  * @soc_hdl - datapath soc handle
3675*5113495bSYour Name  * pdev_id - physical device instance id
3676*5113495bSYour Name  * @peer_unmap_sync - peer unmap sync callback
3677*5113495bSYour Name  *
3678*5113495bSYour Name  * Return: None
3679*5113495bSYour Name  */
ol_txrx_peer_unmap_sync_cb_set(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,ol_txrx_peer_unmap_sync_cb peer_unmap_sync)3680*5113495bSYour Name static void ol_txrx_peer_unmap_sync_cb_set(
3681*5113495bSYour Name 				struct cdp_soc_t *soc_hdl,
3682*5113495bSYour Name 				uint8_t pdev_id,
3683*5113495bSYour Name 				ol_txrx_peer_unmap_sync_cb peer_unmap_sync)
3684*5113495bSYour Name {
3685*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
3686*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev = ol_txrx_get_pdev_from_pdev_id(soc,
3687*5113495bSYour Name 								    pdev_id);
3688*5113495bSYour Name 
3689*5113495bSYour Name 	if (!pdev)
3690*5113495bSYour Name 		return;
3691*5113495bSYour Name 
3692*5113495bSYour Name 	if (!pdev->peer_unmap_sync_cb)
3693*5113495bSYour Name 		pdev->peer_unmap_sync_cb = peer_unmap_sync;
3694*5113495bSYour Name }
3695*5113495bSYour Name 
3696*5113495bSYour Name /**
3697*5113495bSYour Name  * ol_txrx_peer_flush_frags() - Flush fragments for a particular peer
3698*5113495bSYour Name  * @soc_hdl - datapath soc handle
3699*5113495bSYour Name  * @vdev_id - virtual device id
3700*5113495bSYour Name  * @peer_mac - peer mac address
3701*5113495bSYour Name  *
3702*5113495bSYour Name  * Return: None
3703*5113495bSYour Name  */
3704*5113495bSYour Name static void
ol_txrx_peer_flush_frags(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,uint8_t * peer_mac)3705*5113495bSYour Name ol_txrx_peer_flush_frags(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3706*5113495bSYour Name 			 uint8_t *peer_mac)
3707*5113495bSYour Name {
3708*5113495bSYour Name 	struct ol_txrx_peer_t *peer;
3709*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
3710*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev =
3711*5113495bSYour Name 		ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
3712*5113495bSYour Name 
3713*5113495bSYour Name 	if (!pdev)
3714*5113495bSYour Name 		return;
3715*5113495bSYour Name 
3716*5113495bSYour Name 	peer =  ol_txrx_peer_find_hash_find_get_ref(pdev, peer_mac, 0, 1,
3717*5113495bSYour Name 						    PEER_DEBUG_ID_OL_INTERNAL);
3718*5113495bSYour Name 	if (!peer)
3719*5113495bSYour Name 		return;
3720*5113495bSYour Name 
3721*5113495bSYour Name 	ol_rx_reorder_peer_cleanup(peer->vdev, peer);
3722*5113495bSYour Name 
3723*5113495bSYour Name 	ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
3724*5113495bSYour Name }
3725*5113495bSYour Name 
3726*5113495bSYour Name /**
3727*5113495bSYour Name  * ol_txrx_dump_tx_desc() - dump tx desc total and free count
3728*5113495bSYour Name  * @txrx_pdev: Pointer to txrx pdev
3729*5113495bSYour Name  *
3730*5113495bSYour Name  * Return: none
3731*5113495bSYour Name  */
ol_txrx_dump_tx_desc(ol_txrx_pdev_handle pdev_handle)3732*5113495bSYour Name static void ol_txrx_dump_tx_desc(ol_txrx_pdev_handle pdev_handle)
3733*5113495bSYour Name {
3734*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev = (ol_txrx_pdev_handle) pdev_handle;
3735*5113495bSYour Name 	uint32_t total, num_free;
3736*5113495bSYour Name 
3737*5113495bSYour Name 	if (ol_cfg_is_high_latency(pdev->ctrl_pdev))
3738*5113495bSYour Name 		total = qdf_atomic_read(&pdev->orig_target_tx_credit);
3739*5113495bSYour Name 	else
3740*5113495bSYour Name 		total = ol_tx_get_desc_global_pool_size(pdev);
3741*5113495bSYour Name 
3742*5113495bSYour Name 	num_free = ol_tx_get_total_free_desc(pdev);
3743*5113495bSYour Name 
3744*5113495bSYour Name 	ol_txrx_info_high(
3745*5113495bSYour Name 		   "total tx credit %d num_free %d",
3746*5113495bSYour Name 		   total, num_free);
3747*5113495bSYour Name 
3748*5113495bSYour Name }
3749*5113495bSYour Name 
3750*5113495bSYour Name /**
3751*5113495bSYour Name  * ol_txrx_wait_for_pending_tx() - wait for tx queue to be empty
3752*5113495bSYour Name  * @timeout: timeout in ms
3753*5113495bSYour Name  *
3754*5113495bSYour Name  * Wait for tx queue to be empty, return timeout error if
3755*5113495bSYour Name  * queue doesn't empty before timeout occurs.
3756*5113495bSYour Name  *
3757*5113495bSYour Name  * Return:
3758*5113495bSYour Name  *    QDF_STATUS_SUCCESS if the queue empties,
3759*5113495bSYour Name  *    QDF_STATUS_E_TIMEOUT in case of timeout,
3760*5113495bSYour Name  *    QDF_STATUS_E_FAULT in case of missing handle
3761*5113495bSYour Name  */
ol_txrx_wait_for_pending_tx(int timeout)3762*5113495bSYour Name static QDF_STATUS ol_txrx_wait_for_pending_tx(int timeout)
3763*5113495bSYour Name {
3764*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
3765*5113495bSYour Name 	struct ol_txrx_pdev_t *txrx_pdev;
3766*5113495bSYour Name 
3767*5113495bSYour Name 	if (qdf_unlikely(!soc))
3768*5113495bSYour Name 		return QDF_STATUS_E_FAULT;
3769*5113495bSYour Name 
3770*5113495bSYour Name 	txrx_pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
3771*5113495bSYour Name 	if (!txrx_pdev) {
3772*5113495bSYour Name 		ol_txrx_err("txrx context is null");
3773*5113495bSYour Name 		return QDF_STATUS_E_FAULT;
3774*5113495bSYour Name 	}
3775*5113495bSYour Name 
3776*5113495bSYour Name 	while (ol_txrx_get_tx_pending((struct cdp_pdev *)txrx_pdev)) {
3777*5113495bSYour Name 		qdf_sleep(OL_ATH_TX_DRAIN_WAIT_DELAY);
3778*5113495bSYour Name 		if (timeout <= 0) {
3779*5113495bSYour Name 			ol_txrx_err("tx frames are pending");
3780*5113495bSYour Name 			ol_txrx_dump_tx_desc(txrx_pdev);
3781*5113495bSYour Name 			return QDF_STATUS_E_TIMEOUT;
3782*5113495bSYour Name 		}
3783*5113495bSYour Name 		timeout = timeout - OL_ATH_TX_DRAIN_WAIT_DELAY;
3784*5113495bSYour Name 	}
3785*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
3786*5113495bSYour Name }
3787*5113495bSYour Name 
3788*5113495bSYour Name #ifndef QCA_WIFI_3_0_EMU
3789*5113495bSYour Name #define SUSPEND_DRAIN_WAIT 500
3790*5113495bSYour Name #else
3791*5113495bSYour Name #define SUSPEND_DRAIN_WAIT 3000
3792*5113495bSYour Name #endif
3793*5113495bSYour Name 
3794*5113495bSYour Name #ifdef FEATURE_RUNTIME_PM
3795*5113495bSYour Name /**
3796*5113495bSYour Name  * ol_txrx_runtime_suspend() - ensure TXRX is ready to runtime suspend
3797*5113495bSYour Name  * @soc_hdl: Datapath soc handle
3798*5113495bSYour Name  * @pdev_id: id of data path pdev handle
3799*5113495bSYour Name  *
3800*5113495bSYour Name  * TXRX is ready to runtime suspend if there are no pending packets
3801*5113495bSYour Name  * in the tx queue.
3802*5113495bSYour Name  *
3803*5113495bSYour Name  * Return: QDF_STATUS
3804*5113495bSYour Name  */
ol_txrx_runtime_suspend(struct cdp_soc_t * soc_hdl,uint8_t pdev_id)3805*5113495bSYour Name static QDF_STATUS ol_txrx_runtime_suspend(struct cdp_soc_t *soc_hdl,
3806*5113495bSYour Name 					  uint8_t pdev_id)
3807*5113495bSYour Name {
3808*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
3809*5113495bSYour Name 	struct cdp_pdev *txrx_pdev = (struct cdp_pdev *)
3810*5113495bSYour Name 				ol_txrx_get_pdev_from_pdev_id(soc, pdev_id);
3811*5113495bSYour Name 
3812*5113495bSYour Name 	if (ol_txrx_get_tx_pending(txrx_pdev))
3813*5113495bSYour Name 		return QDF_STATUS_E_BUSY;
3814*5113495bSYour Name 	else
3815*5113495bSYour Name 		return QDF_STATUS_SUCCESS;
3816*5113495bSYour Name }
3817*5113495bSYour Name 
3818*5113495bSYour Name /**
3819*5113495bSYour Name  * ol_txrx_runtime_resume() - ensure TXRX is ready to runtime resume
3820*5113495bSYour Name  * @soc_hdl: Datapath soc handle
3821*5113495bSYour Name  * @pdev_id: id of data path pdev handle
3822*5113495bSYour Name  *
3823*5113495bSYour Name  * This is a dummy function for symmetry.
3824*5113495bSYour Name  *
3825*5113495bSYour Name  * Return: QDF_STATUS_SUCCESS
3826*5113495bSYour Name  */
ol_txrx_runtime_resume(struct cdp_soc_t * soc_hdl,uint8_t pdev_id)3827*5113495bSYour Name static QDF_STATUS ol_txrx_runtime_resume(struct cdp_soc_t *soc_hdl,
3828*5113495bSYour Name 					 uint8_t pdev_id)
3829*5113495bSYour Name {
3830*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
3831*5113495bSYour Name }
3832*5113495bSYour Name #endif
3833*5113495bSYour Name 
3834*5113495bSYour Name /**
3835*5113495bSYour Name  * ol_txrx_bus_suspend() - bus suspend
3836*5113495bSYour Name  * @soc_hdl: Datapath soc handle
3837*5113495bSYour Name  * @pdev_id: id of data path pdev handle
3838*5113495bSYour Name  *
3839*5113495bSYour Name  * Ensure that ol_txrx is ready for bus suspend
3840*5113495bSYour Name  *
3841*5113495bSYour Name  * Return: QDF_STATUS
3842*5113495bSYour Name  */
ol_txrx_bus_suspend(struct cdp_soc_t * soc_hdl,uint8_t pdev_id)3843*5113495bSYour Name static QDF_STATUS ol_txrx_bus_suspend(struct cdp_soc_t *soc_hdl,
3844*5113495bSYour Name 				      uint8_t pdev_id)
3845*5113495bSYour Name {
3846*5113495bSYour Name 	return ol_txrx_wait_for_pending_tx(SUSPEND_DRAIN_WAIT);
3847*5113495bSYour Name }
3848*5113495bSYour Name 
3849*5113495bSYour Name /**
3850*5113495bSYour Name  * ol_txrx_bus_resume() - bus resume
3851*5113495bSYour Name  * @soc_hdl: Datapath soc handle
3852*5113495bSYour Name  * @pdev_id: id of data path pdev handle
3853*5113495bSYour Name  *
3854*5113495bSYour Name  * Dummy function for symmetry
3855*5113495bSYour Name  *
3856*5113495bSYour Name  * Return: QDF_STATUS_SUCCESS
3857*5113495bSYour Name  */
ol_txrx_bus_resume(struct cdp_soc_t * soc_hdl,uint8_t pdev_id)3858*5113495bSYour Name static QDF_STATUS ol_txrx_bus_resume(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
3859*5113495bSYour Name {
3860*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
3861*5113495bSYour Name }
3862*5113495bSYour Name 
3863*5113495bSYour Name /**
3864*5113495bSYour Name  * ol_txrx_get_tx_pending - Get the number of pending transmit
3865*5113495bSYour Name  * frames that are awaiting completion.
3866*5113495bSYour Name  *
3867*5113495bSYour Name  * @pdev - the data physical device object
3868*5113495bSYour Name  *  Mainly used in clean up path to make sure all buffers have been freed
3869*5113495bSYour Name  *
3870*5113495bSYour Name  * Return: count of pending frames
3871*5113495bSYour Name  */
ol_txrx_get_tx_pending(struct cdp_pdev * ppdev)3872*5113495bSYour Name uint32_t ol_txrx_get_tx_pending(struct cdp_pdev *ppdev)
3873*5113495bSYour Name {
3874*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
3875*5113495bSYour Name 	uint32_t total;
3876*5113495bSYour Name 
3877*5113495bSYour Name 	if (ol_cfg_is_high_latency(pdev->ctrl_pdev))
3878*5113495bSYour Name 		total = qdf_atomic_read(&pdev->orig_target_tx_credit);
3879*5113495bSYour Name 	else
3880*5113495bSYour Name 		total = ol_tx_get_desc_global_pool_size(pdev);
3881*5113495bSYour Name 
3882*5113495bSYour Name 	return total - ol_tx_get_total_free_desc(pdev);
3883*5113495bSYour Name }
3884*5113495bSYour Name 
ol_txrx_discard_tx_pending(ol_txrx_pdev_handle pdev_handle)3885*5113495bSYour Name void ol_txrx_discard_tx_pending(ol_txrx_pdev_handle pdev_handle)
3886*5113495bSYour Name {
3887*5113495bSYour Name 	ol_tx_desc_list tx_descs;
3888*5113495bSYour Name 	/*
3889*5113495bSYour Name 	 * First let hif do the qdf_atomic_dec_and_test(&tx_desc->ref_cnt)
3890*5113495bSYour Name 	 * then let htt do the qdf_atomic_dec_and_test(&tx_desc->ref_cnt)
3891*5113495bSYour Name 	 * which is the same with normal data send complete path
3892*5113495bSYour Name 	 */
3893*5113495bSYour Name 	htt_tx_pending_discard(pdev_handle->htt_pdev);
3894*5113495bSYour Name 
3895*5113495bSYour Name 	TAILQ_INIT(&tx_descs);
3896*5113495bSYour Name 	ol_tx_queue_discard(pdev_handle, true, &tx_descs);
3897*5113495bSYour Name 	/* Discard Frames in Discard List */
3898*5113495bSYour Name 	ol_tx_desc_frame_list_free(pdev_handle, &tx_descs, 1 /* error */);
3899*5113495bSYour Name 
3900*5113495bSYour Name 	ol_tx_discard_target_frms(pdev_handle);
3901*5113495bSYour Name }
3902*5113495bSYour Name 
3903*5113495bSYour Name static inline
ol_txrx_stats_ptr_to_u64(struct ol_txrx_stats_req_internal * req)3904*5113495bSYour Name uint64_t ol_txrx_stats_ptr_to_u64(struct ol_txrx_stats_req_internal *req)
3905*5113495bSYour Name {
3906*5113495bSYour Name 	return (uint64_t) ((size_t) req);
3907*5113495bSYour Name }
3908*5113495bSYour Name 
3909*5113495bSYour Name static inline
ol_txrx_u64_to_stats_ptr(uint64_t cookie)3910*5113495bSYour Name struct ol_txrx_stats_req_internal *ol_txrx_u64_to_stats_ptr(uint64_t cookie)
3911*5113495bSYour Name {
3912*5113495bSYour Name 	return (struct ol_txrx_stats_req_internal *)((size_t) cookie);
3913*5113495bSYour Name }
3914*5113495bSYour Name 
3915*5113495bSYour Name #ifdef currently_unused
3916*5113495bSYour Name void
ol_txrx_fw_stats_cfg(ol_txrx_vdev_handle vdev,uint8_t cfg_stats_type,uint32_t cfg_val)3917*5113495bSYour Name ol_txrx_fw_stats_cfg(ol_txrx_vdev_handle vdev,
3918*5113495bSYour Name 		     uint8_t cfg_stats_type, uint32_t cfg_val)
3919*5113495bSYour Name {
3920*5113495bSYour Name 	uint8_t dummy_cookie = 0;
3921*5113495bSYour Name 
3922*5113495bSYour Name 	htt_h2t_dbg_stats_get(vdev->pdev->htt_pdev, 0 /* upload mask */,
3923*5113495bSYour Name 			      0 /* reset mask */,
3924*5113495bSYour Name 			      cfg_stats_type, cfg_val, dummy_cookie);
3925*5113495bSYour Name }
3926*5113495bSYour Name #endif
3927*5113495bSYour Name 
3928*5113495bSYour Name /**
3929*5113495bSYour Name  * ol_txrx_fw_stats_desc_pool_init() - Initialize the fw stats descriptor pool
3930*5113495bSYour Name  * @pdev: handle to ol txrx pdev
3931*5113495bSYour Name  * @pool_size: Size of fw stats descriptor pool
3932*5113495bSYour Name  *
3933*5113495bSYour Name  * Return: 0 for success, error code on failure.
3934*5113495bSYour Name  */
ol_txrx_fw_stats_desc_pool_init(struct ol_txrx_pdev_t * pdev,uint8_t pool_size)3935*5113495bSYour Name int ol_txrx_fw_stats_desc_pool_init(struct ol_txrx_pdev_t *pdev,
3936*5113495bSYour Name 				    uint8_t pool_size)
3937*5113495bSYour Name {
3938*5113495bSYour Name 	int i;
3939*5113495bSYour Name 
3940*5113495bSYour Name 	if (!pdev) {
3941*5113495bSYour Name 		ol_txrx_err("pdev is NULL");
3942*5113495bSYour Name 		return -EINVAL;
3943*5113495bSYour Name 	}
3944*5113495bSYour Name 	pdev->ol_txrx_fw_stats_desc_pool.pool = qdf_mem_malloc(pool_size *
3945*5113495bSYour Name 		sizeof(struct ol_txrx_fw_stats_desc_elem_t));
3946*5113495bSYour Name 	if (!pdev->ol_txrx_fw_stats_desc_pool.pool)
3947*5113495bSYour Name 		return -ENOMEM;
3948*5113495bSYour Name 
3949*5113495bSYour Name 	pdev->ol_txrx_fw_stats_desc_pool.freelist =
3950*5113495bSYour Name 		&pdev->ol_txrx_fw_stats_desc_pool.pool[0];
3951*5113495bSYour Name 	pdev->ol_txrx_fw_stats_desc_pool.pool_size = pool_size;
3952*5113495bSYour Name 
3953*5113495bSYour Name 	for (i = 0; i < (pool_size - 1); i++) {
3954*5113495bSYour Name 		pdev->ol_txrx_fw_stats_desc_pool.pool[i].desc.desc_id = i;
3955*5113495bSYour Name 		pdev->ol_txrx_fw_stats_desc_pool.pool[i].desc.req = NULL;
3956*5113495bSYour Name 		pdev->ol_txrx_fw_stats_desc_pool.pool[i].next =
3957*5113495bSYour Name 			&pdev->ol_txrx_fw_stats_desc_pool.pool[i + 1];
3958*5113495bSYour Name 	}
3959*5113495bSYour Name 	pdev->ol_txrx_fw_stats_desc_pool.pool[i].desc.desc_id = i;
3960*5113495bSYour Name 	pdev->ol_txrx_fw_stats_desc_pool.pool[i].desc.req = NULL;
3961*5113495bSYour Name 	pdev->ol_txrx_fw_stats_desc_pool.pool[i].next = NULL;
3962*5113495bSYour Name 	qdf_spinlock_create(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock);
3963*5113495bSYour Name 	qdf_atomic_init(&pdev->ol_txrx_fw_stats_desc_pool.initialized);
3964*5113495bSYour Name 	qdf_atomic_set(&pdev->ol_txrx_fw_stats_desc_pool.initialized, 1);
3965*5113495bSYour Name 	return 0;
3966*5113495bSYour Name }
3967*5113495bSYour Name 
3968*5113495bSYour Name /**
3969*5113495bSYour Name  * ol_txrx_fw_stats_desc_pool_deinit() - Deinitialize the
3970*5113495bSYour Name  * fw stats descriptor pool
3971*5113495bSYour Name  * @pdev: handle to ol txrx pdev
3972*5113495bSYour Name  *
3973*5113495bSYour Name  * Return: None
3974*5113495bSYour Name  */
ol_txrx_fw_stats_desc_pool_deinit(struct ol_txrx_pdev_t * pdev)3975*5113495bSYour Name void ol_txrx_fw_stats_desc_pool_deinit(struct ol_txrx_pdev_t *pdev)
3976*5113495bSYour Name {
3977*5113495bSYour Name 	if (!pdev) {
3978*5113495bSYour Name 		ol_txrx_err("pdev is NULL");
3979*5113495bSYour Name 		return;
3980*5113495bSYour Name 	}
3981*5113495bSYour Name 	if (!qdf_atomic_read(&pdev->ol_txrx_fw_stats_desc_pool.initialized)) {
3982*5113495bSYour Name 		ol_txrx_err("Pool is not initialized");
3983*5113495bSYour Name 		return;
3984*5113495bSYour Name 	}
3985*5113495bSYour Name 	if (!pdev->ol_txrx_fw_stats_desc_pool.pool) {
3986*5113495bSYour Name 		ol_txrx_err("Pool is not allocated");
3987*5113495bSYour Name 		return;
3988*5113495bSYour Name 	}
3989*5113495bSYour Name 	qdf_spin_lock_bh(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock);
3990*5113495bSYour Name 	qdf_atomic_set(&pdev->ol_txrx_fw_stats_desc_pool.initialized, 0);
3991*5113495bSYour Name 	qdf_mem_free(pdev->ol_txrx_fw_stats_desc_pool.pool);
3992*5113495bSYour Name 	pdev->ol_txrx_fw_stats_desc_pool.pool = NULL;
3993*5113495bSYour Name 
3994*5113495bSYour Name 	pdev->ol_txrx_fw_stats_desc_pool.freelist = NULL;
3995*5113495bSYour Name 	pdev->ol_txrx_fw_stats_desc_pool.pool_size = 0;
3996*5113495bSYour Name 	qdf_spin_unlock_bh(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock);
3997*5113495bSYour Name }
3998*5113495bSYour Name 
3999*5113495bSYour Name /**
4000*5113495bSYour Name  * ol_txrx_fw_stats_desc_alloc() - Get fw stats descriptor from fw stats
4001*5113495bSYour Name  * free descriptor pool
4002*5113495bSYour Name  * @pdev: handle to ol txrx pdev
4003*5113495bSYour Name  *
4004*5113495bSYour Name  * Return: pointer to fw stats descriptor, NULL on failure
4005*5113495bSYour Name  */
4006*5113495bSYour Name struct ol_txrx_fw_stats_desc_t
ol_txrx_fw_stats_desc_alloc(struct ol_txrx_pdev_t * pdev)4007*5113495bSYour Name 	*ol_txrx_fw_stats_desc_alloc(struct ol_txrx_pdev_t *pdev)
4008*5113495bSYour Name {
4009*5113495bSYour Name 	struct ol_txrx_fw_stats_desc_t *desc = NULL;
4010*5113495bSYour Name 
4011*5113495bSYour Name 	qdf_spin_lock_bh(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock);
4012*5113495bSYour Name 	if (!qdf_atomic_read(&pdev->ol_txrx_fw_stats_desc_pool.initialized)) {
4013*5113495bSYour Name 		qdf_spin_unlock_bh(&pdev->
4014*5113495bSYour Name 				   ol_txrx_fw_stats_desc_pool.pool_lock);
4015*5113495bSYour Name 		ol_txrx_err("Pool deinitialized");
4016*5113495bSYour Name 		return NULL;
4017*5113495bSYour Name 	}
4018*5113495bSYour Name 	if (pdev->ol_txrx_fw_stats_desc_pool.freelist) {
4019*5113495bSYour Name 		desc = &pdev->ol_txrx_fw_stats_desc_pool.freelist->desc;
4020*5113495bSYour Name 		pdev->ol_txrx_fw_stats_desc_pool.freelist =
4021*5113495bSYour Name 			pdev->ol_txrx_fw_stats_desc_pool.freelist->next;
4022*5113495bSYour Name 	}
4023*5113495bSYour Name 	qdf_spin_unlock_bh(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock);
4024*5113495bSYour Name 
4025*5113495bSYour Name 	if (desc)
4026*5113495bSYour Name 		ol_txrx_dbg("desc_id %d allocated", desc->desc_id);
4027*5113495bSYour Name 	else
4028*5113495bSYour Name 		ol_txrx_err("fw stats descriptors are exhausted");
4029*5113495bSYour Name 
4030*5113495bSYour Name 	return desc;
4031*5113495bSYour Name }
4032*5113495bSYour Name 
4033*5113495bSYour Name /**
4034*5113495bSYour Name  * ol_txrx_fw_stats_desc_get_req() - Put fw stats descriptor
4035*5113495bSYour Name  * back into free pool
4036*5113495bSYour Name  * @pdev: handle to ol txrx pdev
4037*5113495bSYour Name  * @fw_stats_desc: fw_stats_desc_get descriptor
4038*5113495bSYour Name  *
4039*5113495bSYour Name  * Return: pointer to request
4040*5113495bSYour Name  */
4041*5113495bSYour Name struct ol_txrx_stats_req_internal
ol_txrx_fw_stats_desc_get_req(struct ol_txrx_pdev_t * pdev,unsigned char desc_id)4042*5113495bSYour Name 	*ol_txrx_fw_stats_desc_get_req(struct ol_txrx_pdev_t *pdev,
4043*5113495bSYour Name 				       unsigned char desc_id)
4044*5113495bSYour Name {
4045*5113495bSYour Name 	struct ol_txrx_fw_stats_desc_elem_t *desc_elem;
4046*5113495bSYour Name 	struct ol_txrx_stats_req_internal *req;
4047*5113495bSYour Name 
4048*5113495bSYour Name 	qdf_spin_lock_bh(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock);
4049*5113495bSYour Name 	if (!qdf_atomic_read(&pdev->ol_txrx_fw_stats_desc_pool.initialized)) {
4050*5113495bSYour Name 		qdf_spin_unlock_bh(&pdev->
4051*5113495bSYour Name 				   ol_txrx_fw_stats_desc_pool.pool_lock);
4052*5113495bSYour Name 		ol_txrx_err("Desc ID %u Pool deinitialized", desc_id);
4053*5113495bSYour Name 		return NULL;
4054*5113495bSYour Name 	}
4055*5113495bSYour Name 	desc_elem = &pdev->ol_txrx_fw_stats_desc_pool.pool[desc_id];
4056*5113495bSYour Name 	req = desc_elem->desc.req;
4057*5113495bSYour Name 	desc_elem->desc.req = NULL;
4058*5113495bSYour Name 	desc_elem->next =
4059*5113495bSYour Name 		pdev->ol_txrx_fw_stats_desc_pool.freelist;
4060*5113495bSYour Name 	pdev->ol_txrx_fw_stats_desc_pool.freelist = desc_elem;
4061*5113495bSYour Name 	qdf_spin_unlock_bh(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock);
4062*5113495bSYour Name 	return req;
4063*5113495bSYour Name }
4064*5113495bSYour Name 
4065*5113495bSYour Name /**
4066*5113495bSYour Name  * ol_txrx_fw_stats_get() - Get fw stats
4067*5113495bSYour Name  *
4068*5113495bSYour Name  * @soc_hdl: datapath soc handle
4069*5113495bSYour Name  * @vdev_id: virtual interface id
4070*5113495bSYour Name  * @req: specifications of stats request
4071*5113495bSYour Name  * @per_vdev: bool input whether stats requested per vdev or not
4072*5113495bSYour Name  * @response_expected: bool input whether expecting response or not
4073*5113495bSYour Name  *
4074*5113495bSYour Name  * Return: success or failure
4075*5113495bSYour Name  */
4076*5113495bSYour Name static A_STATUS
ol_txrx_fw_stats_get(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,struct ol_txrx_stats_req * req,bool per_vdev,bool response_expected)4077*5113495bSYour Name ol_txrx_fw_stats_get(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
4078*5113495bSYour Name 		     struct ol_txrx_stats_req *req, bool per_vdev,
4079*5113495bSYour Name 		     bool response_expected)
4080*5113495bSYour Name {
4081*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
4082*5113495bSYour Name 	ol_txrx_vdev_handle vdev = ol_txrx_get_vdev_from_soc_vdev_id(soc,
4083*5113495bSYour Name 								     vdev_id);
4084*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev;
4085*5113495bSYour Name 	uint8_t cookie = FW_STATS_DESC_POOL_SIZE;
4086*5113495bSYour Name 	struct ol_txrx_stats_req_internal *non_volatile_req;
4087*5113495bSYour Name 	struct ol_txrx_fw_stats_desc_t *desc = NULL;
4088*5113495bSYour Name 	struct ol_txrx_fw_stats_desc_elem_t *elem = NULL;
4089*5113495bSYour Name 
4090*5113495bSYour Name 	if (!vdev)
4091*5113495bSYour Name 		return A_EINVAL;
4092*5113495bSYour Name 
4093*5113495bSYour Name 	pdev = vdev->pdev;
4094*5113495bSYour Name 	if (!pdev ||
4095*5113495bSYour Name 	    req->stats_type_upload_mask >= 1 << HTT_DBG_NUM_STATS ||
4096*5113495bSYour Name 	    req->stats_type_reset_mask >= 1 << HTT_DBG_NUM_STATS) {
4097*5113495bSYour Name 		return A_EINVAL;
4098*5113495bSYour Name 	}
4099*5113495bSYour Name 
4100*5113495bSYour Name 	/*
4101*5113495bSYour Name 	 * Allocate a non-transient stats request object.
4102*5113495bSYour Name 	 * (The one provided as an argument is likely allocated on the stack.)
4103*5113495bSYour Name 	 */
4104*5113495bSYour Name 	non_volatile_req = qdf_mem_malloc(sizeof(*non_volatile_req));
4105*5113495bSYour Name 	if (!non_volatile_req)
4106*5113495bSYour Name 		return A_NO_MEMORY;
4107*5113495bSYour Name 
4108*5113495bSYour Name 	/* copy the caller's specifications */
4109*5113495bSYour Name 	non_volatile_req->base = *req;
4110*5113495bSYour Name 	non_volatile_req->serviced = 0;
4111*5113495bSYour Name 	non_volatile_req->offset = 0;
4112*5113495bSYour Name 	if (response_expected) {
4113*5113495bSYour Name 		desc = ol_txrx_fw_stats_desc_alloc(pdev);
4114*5113495bSYour Name 		if (!desc) {
4115*5113495bSYour Name 			qdf_mem_free(non_volatile_req);
4116*5113495bSYour Name 			return A_NO_MEMORY;
4117*5113495bSYour Name 		}
4118*5113495bSYour Name 
4119*5113495bSYour Name 		/* use the desc id as the cookie */
4120*5113495bSYour Name 		cookie = desc->desc_id;
4121*5113495bSYour Name 		desc->req = non_volatile_req;
4122*5113495bSYour Name 		qdf_spin_lock_bh(&pdev->req_list_spinlock);
4123*5113495bSYour Name 		TAILQ_INSERT_TAIL(&pdev->req_list, non_volatile_req, req_list_elem);
4124*5113495bSYour Name 		pdev->req_list_depth++;
4125*5113495bSYour Name 		qdf_spin_unlock_bh(&pdev->req_list_spinlock);
4126*5113495bSYour Name 	}
4127*5113495bSYour Name 
4128*5113495bSYour Name 	if (htt_h2t_dbg_stats_get(pdev->htt_pdev,
4129*5113495bSYour Name 				  req->stats_type_upload_mask,
4130*5113495bSYour Name 				  req->stats_type_reset_mask,
4131*5113495bSYour Name 				  HTT_H2T_STATS_REQ_CFG_STAT_TYPE_INVALID, 0,
4132*5113495bSYour Name 				  cookie)) {
4133*5113495bSYour Name 		if (response_expected) {
4134*5113495bSYour Name 			qdf_spin_lock_bh(&pdev->req_list_spinlock);
4135*5113495bSYour Name 			TAILQ_REMOVE(&pdev->req_list, non_volatile_req,
4136*5113495bSYour Name 				     req_list_elem);
4137*5113495bSYour Name 			pdev->req_list_depth--;
4138*5113495bSYour Name 			qdf_spin_unlock_bh(&pdev->req_list_spinlock);
4139*5113495bSYour Name 			if (desc) {
4140*5113495bSYour Name 				qdf_spin_lock_bh(&pdev->ol_txrx_fw_stats_desc_pool.
4141*5113495bSYour Name 						 pool_lock);
4142*5113495bSYour Name 				desc->req = NULL;
4143*5113495bSYour Name 				elem = container_of(desc,
4144*5113495bSYour Name 						    struct ol_txrx_fw_stats_desc_elem_t,
4145*5113495bSYour Name 						    desc);
4146*5113495bSYour Name 				elem->next =
4147*5113495bSYour Name 					pdev->ol_txrx_fw_stats_desc_pool.freelist;
4148*5113495bSYour Name 				pdev->ol_txrx_fw_stats_desc_pool.freelist = elem;
4149*5113495bSYour Name 				qdf_spin_unlock_bh(&pdev->
4150*5113495bSYour Name 						   ol_txrx_fw_stats_desc_pool.
4151*5113495bSYour Name 						   pool_lock);
4152*5113495bSYour Name 			}
4153*5113495bSYour Name 		}
4154*5113495bSYour Name 
4155*5113495bSYour Name 		qdf_mem_free(non_volatile_req);
4156*5113495bSYour Name 		return A_ERROR;
4157*5113495bSYour Name 	}
4158*5113495bSYour Name 
4159*5113495bSYour Name 	if (response_expected == false)
4160*5113495bSYour Name 		qdf_mem_free(non_volatile_req);
4161*5113495bSYour Name 
4162*5113495bSYour Name 	return A_OK;
4163*5113495bSYour Name }
4164*5113495bSYour Name 
4165*5113495bSYour Name void
ol_txrx_fw_stats_handler(ol_txrx_pdev_handle pdev,uint8_t cookie,uint8_t * stats_info_list)4166*5113495bSYour Name ol_txrx_fw_stats_handler(ol_txrx_pdev_handle pdev,
4167*5113495bSYour Name 			 uint8_t cookie, uint8_t *stats_info_list)
4168*5113495bSYour Name {
4169*5113495bSYour Name 	enum htt_dbg_stats_type type;
4170*5113495bSYour Name 	enum htt_cmn_dbg_stats_type cmn_type = HTT_DBG_CMN_NUM_STATS_INVALID;
4171*5113495bSYour Name 	enum htt_dbg_stats_status status;
4172*5113495bSYour Name 	int length;
4173*5113495bSYour Name 	uint8_t *stats_data;
4174*5113495bSYour Name 	struct ol_txrx_stats_req_internal *req, *tmp;
4175*5113495bSYour Name 	int more = 0;
4176*5113495bSYour Name 	int found = 0;
4177*5113495bSYour Name 
4178*5113495bSYour Name 	if (cookie >= FW_STATS_DESC_POOL_SIZE) {
4179*5113495bSYour Name 		ol_txrx_err("Cookie is not valid");
4180*5113495bSYour Name 		return;
4181*5113495bSYour Name 	}
4182*5113495bSYour Name 	req = ol_txrx_fw_stats_desc_get_req(pdev, (uint8_t)cookie);
4183*5113495bSYour Name 	if (!req) {
4184*5113495bSYour Name 		ol_txrx_err("Request not retrieved for cookie %u",
4185*5113495bSYour Name 			    (uint8_t)cookie);
4186*5113495bSYour Name 		return;
4187*5113495bSYour Name 	}
4188*5113495bSYour Name 	qdf_spin_lock_bh(&pdev->req_list_spinlock);
4189*5113495bSYour Name 	TAILQ_FOREACH(tmp, &pdev->req_list, req_list_elem) {
4190*5113495bSYour Name 		if (req == tmp) {
4191*5113495bSYour Name 			found = 1;
4192*5113495bSYour Name 			break;
4193*5113495bSYour Name 		}
4194*5113495bSYour Name 	}
4195*5113495bSYour Name 	qdf_spin_unlock_bh(&pdev->req_list_spinlock);
4196*5113495bSYour Name 
4197*5113495bSYour Name 	if (!found) {
4198*5113495bSYour Name 		ol_txrx_err(
4199*5113495bSYour Name 			"req(%pK) from firmware can't be found in the list", req);
4200*5113495bSYour Name 		return;
4201*5113495bSYour Name 	}
4202*5113495bSYour Name 
4203*5113495bSYour Name 	do {
4204*5113495bSYour Name 		htt_t2h_dbg_stats_hdr_parse(stats_info_list, &type, &status,
4205*5113495bSYour Name 					    &length, &stats_data);
4206*5113495bSYour Name 		if (status == HTT_DBG_STATS_STATUS_SERIES_DONE)
4207*5113495bSYour Name 			break;
4208*5113495bSYour Name 		if (status == HTT_DBG_STATS_STATUS_PRESENT ||
4209*5113495bSYour Name 		    status == HTT_DBG_STATS_STATUS_PARTIAL) {
4210*5113495bSYour Name 			uint8_t *buf;
4211*5113495bSYour Name 			int bytes = 0;
4212*5113495bSYour Name 
4213*5113495bSYour Name 			if (status == HTT_DBG_STATS_STATUS_PARTIAL)
4214*5113495bSYour Name 				more = 1;
4215*5113495bSYour Name 			if (req->base.print.verbose || req->base.print.concise)
4216*5113495bSYour Name 				/* provide the header along with the data */
4217*5113495bSYour Name 				htt_t2h_stats_print(stats_info_list,
4218*5113495bSYour Name 						    req->base.print.concise);
4219*5113495bSYour Name 
4220*5113495bSYour Name 			switch (type) {
4221*5113495bSYour Name 			case HTT_DBG_STATS_WAL_PDEV_TXRX:
4222*5113495bSYour Name 				bytes = sizeof(struct wlan_dbg_stats);
4223*5113495bSYour Name 				if (req->base.copy.buf) {
4224*5113495bSYour Name 					int lmt;
4225*5113495bSYour Name 
4226*5113495bSYour Name 					lmt = sizeof(struct wlan_dbg_stats);
4227*5113495bSYour Name 					if (req->base.copy.byte_limit < lmt)
4228*5113495bSYour Name 						lmt = req->base.copy.byte_limit;
4229*5113495bSYour Name 					buf = req->base.copy.buf + req->offset;
4230*5113495bSYour Name 					qdf_mem_copy(buf, stats_data, lmt);
4231*5113495bSYour Name 				}
4232*5113495bSYour Name 				break;
4233*5113495bSYour Name 			case HTT_DBG_STATS_RX_REORDER:
4234*5113495bSYour Name 				bytes = sizeof(struct rx_reorder_stats);
4235*5113495bSYour Name 				if (req->base.copy.buf) {
4236*5113495bSYour Name 					int lmt;
4237*5113495bSYour Name 
4238*5113495bSYour Name 					lmt = sizeof(struct rx_reorder_stats);
4239*5113495bSYour Name 					if (req->base.copy.byte_limit < lmt)
4240*5113495bSYour Name 						lmt = req->base.copy.byte_limit;
4241*5113495bSYour Name 					buf = req->base.copy.buf + req->offset;
4242*5113495bSYour Name 					qdf_mem_copy(buf, stats_data, lmt);
4243*5113495bSYour Name 				}
4244*5113495bSYour Name 				break;
4245*5113495bSYour Name 			case HTT_DBG_STATS_RX_RATE_INFO:
4246*5113495bSYour Name 				bytes = sizeof(wlan_dbg_rx_rate_info_t);
4247*5113495bSYour Name 				if (req->base.copy.buf) {
4248*5113495bSYour Name 					int lmt;
4249*5113495bSYour Name 
4250*5113495bSYour Name 					lmt = sizeof(wlan_dbg_rx_rate_info_t);
4251*5113495bSYour Name 					if (req->base.copy.byte_limit < lmt)
4252*5113495bSYour Name 						lmt = req->base.copy.byte_limit;
4253*5113495bSYour Name 					buf = req->base.copy.buf + req->offset;
4254*5113495bSYour Name 					qdf_mem_copy(buf, stats_data, lmt);
4255*5113495bSYour Name 				}
4256*5113495bSYour Name 				break;
4257*5113495bSYour Name 
4258*5113495bSYour Name 			case HTT_DBG_STATS_TX_RATE_INFO:
4259*5113495bSYour Name 				bytes = sizeof(wlan_dbg_tx_rate_info_t);
4260*5113495bSYour Name 				if (req->base.copy.buf) {
4261*5113495bSYour Name 					int lmt;
4262*5113495bSYour Name 
4263*5113495bSYour Name 					lmt = sizeof(wlan_dbg_tx_rate_info_t);
4264*5113495bSYour Name 					if (req->base.copy.byte_limit < lmt)
4265*5113495bSYour Name 						lmt = req->base.copy.byte_limit;
4266*5113495bSYour Name 					buf = req->base.copy.buf + req->offset;
4267*5113495bSYour Name 					qdf_mem_copy(buf, stats_data, lmt);
4268*5113495bSYour Name 				}
4269*5113495bSYour Name 				break;
4270*5113495bSYour Name 
4271*5113495bSYour Name 			case HTT_DBG_STATS_TX_PPDU_LOG:
4272*5113495bSYour Name 				bytes = 0;
4273*5113495bSYour Name 				/* TO DO: specify how many bytes are present */
4274*5113495bSYour Name 				/* TO DO: add copying to the requestor's buf */
4275*5113495bSYour Name 				fallthrough;
4276*5113495bSYour Name 			case HTT_DBG_STATS_RX_REMOTE_RING_BUFFER_INFO:
4277*5113495bSYour Name 				bytes = sizeof(struct
4278*5113495bSYour Name 						rx_remote_buffer_mgmt_stats);
4279*5113495bSYour Name 				if (req->base.copy.buf) {
4280*5113495bSYour Name 					int limit;
4281*5113495bSYour Name 
4282*5113495bSYour Name 					limit = sizeof(struct
4283*5113495bSYour Name 						rx_remote_buffer_mgmt_stats);
4284*5113495bSYour Name 					if (req->base.copy.byte_limit < limit)
4285*5113495bSYour Name 						limit = req->base.copy.
4286*5113495bSYour Name 							byte_limit;
4287*5113495bSYour Name 					buf = req->base.copy.buf + req->offset;
4288*5113495bSYour Name 					qdf_mem_copy(buf, stats_data, limit);
4289*5113495bSYour Name 				}
4290*5113495bSYour Name 				break;
4291*5113495bSYour Name 
4292*5113495bSYour Name 			case HTT_DBG_STATS_TXBF_INFO:
4293*5113495bSYour Name 				bytes = sizeof(struct wlan_dbg_txbf_data_stats);
4294*5113495bSYour Name 				if (req->base.copy.buf) {
4295*5113495bSYour Name 					int limit;
4296*5113495bSYour Name 
4297*5113495bSYour Name 					limit = sizeof(struct
4298*5113495bSYour Name 						wlan_dbg_txbf_data_stats);
4299*5113495bSYour Name 					if (req->base.copy.byte_limit < limit)
4300*5113495bSYour Name 						limit = req->base.copy.
4301*5113495bSYour Name 							byte_limit;
4302*5113495bSYour Name 					buf = req->base.copy.buf + req->offset;
4303*5113495bSYour Name 					qdf_mem_copy(buf, stats_data, limit);
4304*5113495bSYour Name 				}
4305*5113495bSYour Name 				break;
4306*5113495bSYour Name 
4307*5113495bSYour Name 			case HTT_DBG_STATS_SND_INFO:
4308*5113495bSYour Name 				bytes = sizeof(struct wlan_dbg_txbf_snd_stats);
4309*5113495bSYour Name 				if (req->base.copy.buf) {
4310*5113495bSYour Name 					int limit;
4311*5113495bSYour Name 
4312*5113495bSYour Name 					limit = sizeof(struct
4313*5113495bSYour Name 						wlan_dbg_txbf_snd_stats);
4314*5113495bSYour Name 					if (req->base.copy.byte_limit < limit)
4315*5113495bSYour Name 						limit = req->base.copy.
4316*5113495bSYour Name 							byte_limit;
4317*5113495bSYour Name 					buf = req->base.copy.buf + req->offset;
4318*5113495bSYour Name 					qdf_mem_copy(buf, stats_data, limit);
4319*5113495bSYour Name 				}
4320*5113495bSYour Name 				break;
4321*5113495bSYour Name 
4322*5113495bSYour Name 			case HTT_DBG_STATS_TX_SELFGEN_INFO:
4323*5113495bSYour Name 				bytes = sizeof(struct
4324*5113495bSYour Name 					wlan_dbg_tx_selfgen_stats);
4325*5113495bSYour Name 				if (req->base.copy.buf) {
4326*5113495bSYour Name 					int limit;
4327*5113495bSYour Name 
4328*5113495bSYour Name 					limit = sizeof(struct
4329*5113495bSYour Name 						wlan_dbg_tx_selfgen_stats);
4330*5113495bSYour Name 					if (req->base.copy.byte_limit < limit)
4331*5113495bSYour Name 						limit = req->base.copy.
4332*5113495bSYour Name 							byte_limit;
4333*5113495bSYour Name 					buf = req->base.copy.buf + req->offset;
4334*5113495bSYour Name 					qdf_mem_copy(buf, stats_data, limit);
4335*5113495bSYour Name 				}
4336*5113495bSYour Name 				break;
4337*5113495bSYour Name 
4338*5113495bSYour Name 			case HTT_DBG_STATS_ERROR_INFO:
4339*5113495bSYour Name 				bytes =
4340*5113495bSYour Name 				  sizeof(struct wlan_dbg_wifi2_error_stats);
4341*5113495bSYour Name 				if (req->base.copy.buf) {
4342*5113495bSYour Name 					int limit;
4343*5113495bSYour Name 
4344*5113495bSYour Name 					limit = sizeof(struct
4345*5113495bSYour Name 						wlan_dbg_wifi2_error_stats);
4346*5113495bSYour Name 					if (req->base.copy.byte_limit < limit)
4347*5113495bSYour Name 						limit = req->base.copy.
4348*5113495bSYour Name 							byte_limit;
4349*5113495bSYour Name 					buf = req->base.copy.buf + req->offset;
4350*5113495bSYour Name 					qdf_mem_copy(buf, stats_data, limit);
4351*5113495bSYour Name 				}
4352*5113495bSYour Name 				break;
4353*5113495bSYour Name 
4354*5113495bSYour Name 			case HTT_DBG_STATS_TXBF_MUSU_NDPA_PKT:
4355*5113495bSYour Name 				bytes =
4356*5113495bSYour Name 				  sizeof(struct rx_txbf_musu_ndpa_pkts_stats);
4357*5113495bSYour Name 				if (req->base.copy.buf) {
4358*5113495bSYour Name 					int limit;
4359*5113495bSYour Name 
4360*5113495bSYour Name 					limit = sizeof(struct
4361*5113495bSYour Name 						rx_txbf_musu_ndpa_pkts_stats);
4362*5113495bSYour Name 					if (req->base.copy.byte_limit <	limit)
4363*5113495bSYour Name 						limit =
4364*5113495bSYour Name 						req->base.copy.byte_limit;
4365*5113495bSYour Name 					buf = req->base.copy.buf + req->offset;
4366*5113495bSYour Name 					qdf_mem_copy(buf, stats_data, limit);
4367*5113495bSYour Name 				}
4368*5113495bSYour Name 				break;
4369*5113495bSYour Name 
4370*5113495bSYour Name 			default:
4371*5113495bSYour Name 				break;
4372*5113495bSYour Name 			}
4373*5113495bSYour Name 			buf = req->base.copy.buf ?
4374*5113495bSYour Name 				req->base.copy.buf : stats_data;
4375*5113495bSYour Name 
4376*5113495bSYour Name 			/* Not implemented for MCL */
4377*5113495bSYour Name 			if (req->base.callback.fp)
4378*5113495bSYour Name 				req->base.callback.fp(req->base.callback.ctxt,
4379*5113495bSYour Name 						      cmn_type, buf, bytes);
4380*5113495bSYour Name 		}
4381*5113495bSYour Name 		stats_info_list += length;
4382*5113495bSYour Name 	} while (1);
4383*5113495bSYour Name 
4384*5113495bSYour Name 	if (!more) {
4385*5113495bSYour Name 		qdf_spin_lock_bh(&pdev->req_list_spinlock);
4386*5113495bSYour Name 		TAILQ_FOREACH(tmp, &pdev->req_list, req_list_elem) {
4387*5113495bSYour Name 			if (req == tmp) {
4388*5113495bSYour Name 				TAILQ_REMOVE(&pdev->req_list, req, req_list_elem);
4389*5113495bSYour Name 				pdev->req_list_depth--;
4390*5113495bSYour Name 				qdf_mem_free(req);
4391*5113495bSYour Name 				break;
4392*5113495bSYour Name 			}
4393*5113495bSYour Name 		}
4394*5113495bSYour Name 		qdf_spin_unlock_bh(&pdev->req_list_spinlock);
4395*5113495bSYour Name 	}
4396*5113495bSYour Name }
4397*5113495bSYour Name 
4398*5113495bSYour Name #ifndef ATH_PERF_PWR_OFFLOAD /*---------------------------------------------*/
ol_txrx_debug(ol_txrx_vdev_handle vdev,int debug_specs)4399*5113495bSYour Name int ol_txrx_debug(ol_txrx_vdev_handle vdev, int debug_specs)
4400*5113495bSYour Name {
4401*5113495bSYour Name 	if (debug_specs & TXRX_DBG_MASK_OBJS) {
4402*5113495bSYour Name #if defined(TXRX_DEBUG_LEVEL) && TXRX_DEBUG_LEVEL > 5
4403*5113495bSYour Name 		ol_txrx_pdev_display(vdev->pdev, 0);
4404*5113495bSYour Name #else
4405*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_FATAL,
4406*5113495bSYour Name 			  "The pdev,vdev,peer display functions are disabled.\n To enable them, recompile with TXRX_DEBUG_LEVEL > 5");
4407*5113495bSYour Name #endif
4408*5113495bSYour Name 	}
4409*5113495bSYour Name 	if (debug_specs & TXRX_DBG_MASK_STATS)
4410*5113495bSYour Name 		ol_txrx_stats_display(vdev->pdev,
4411*5113495bSYour Name 				      QDF_STATS_VERBOSITY_LEVEL_HIGH);
4412*5113495bSYour Name 	if (debug_specs & TXRX_DBG_MASK_PROT_ANALYZE) {
4413*5113495bSYour Name #if defined(ENABLE_TXRX_PROT_ANALYZE)
4414*5113495bSYour Name 		ol_txrx_prot_ans_display(vdev->pdev);
4415*5113495bSYour Name #else
4416*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_FATAL,
4417*5113495bSYour Name 			  "txrx protocol analysis is disabled.\n To enable it, recompile with ENABLE_TXRX_PROT_ANALYZE defined");
4418*5113495bSYour Name #endif
4419*5113495bSYour Name 	}
4420*5113495bSYour Name 	if (debug_specs & TXRX_DBG_MASK_RX_REORDER_TRACE) {
4421*5113495bSYour Name #if defined(ENABLE_RX_REORDER_TRACE)
4422*5113495bSYour Name 		ol_rx_reorder_trace_display(vdev->pdev, 0, 0);
4423*5113495bSYour Name #else
4424*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_FATAL,
4425*5113495bSYour Name 			  "rx reorder seq num trace is disabled.\n To enable it, recompile with ENABLE_RX_REORDER_TRACE defined");
4426*5113495bSYour Name #endif
4427*5113495bSYour Name 
4428*5113495bSYour Name 	}
4429*5113495bSYour Name 	return 0;
4430*5113495bSYour Name }
4431*5113495bSYour Name #endif
4432*5113495bSYour Name 
4433*5113495bSYour Name #ifdef currently_unused
ol_txrx_aggr_cfg(ol_txrx_vdev_handle vdev,int max_subfrms_ampdu,int max_subfrms_amsdu)4434*5113495bSYour Name int ol_txrx_aggr_cfg(ol_txrx_vdev_handle vdev,
4435*5113495bSYour Name 		     int max_subfrms_ampdu, int max_subfrms_amsdu)
4436*5113495bSYour Name {
4437*5113495bSYour Name 	return htt_h2t_aggr_cfg_msg(vdev->pdev->htt_pdev,
4438*5113495bSYour Name 				    max_subfrms_ampdu, max_subfrms_amsdu);
4439*5113495bSYour Name }
4440*5113495bSYour Name #endif
4441*5113495bSYour Name 
4442*5113495bSYour Name #if defined(TXRX_DEBUG_LEVEL) && TXRX_DEBUG_LEVEL > 5
ol_txrx_pdev_display(ol_txrx_pdev_handle pdev,int indent)4443*5113495bSYour Name void ol_txrx_pdev_display(ol_txrx_pdev_handle pdev, int indent)
4444*5113495bSYour Name {
4445*5113495bSYour Name 	struct ol_txrx_vdev_t *vdev;
4446*5113495bSYour Name 
4447*5113495bSYour Name 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
4448*5113495bSYour Name 		  "%*s%s:\n", indent, " ", "txrx pdev");
4449*5113495bSYour Name 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
4450*5113495bSYour Name 		  "%*spdev object: %pK", indent + 4, " ", pdev);
4451*5113495bSYour Name 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
4452*5113495bSYour Name 		  "%*svdev list:", indent + 4, " ");
4453*5113495bSYour Name 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
4454*5113495bSYour Name 		      ol_txrx_vdev_display(vdev, indent + 8);
4455*5113495bSYour Name 	}
4456*5113495bSYour Name 	ol_txrx_peer_find_display(pdev, indent + 4);
4457*5113495bSYour Name 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
4458*5113495bSYour Name 		  "%*stx desc pool: %d elems @ %pK", indent + 4, " ",
4459*5113495bSYour Name 		  pdev->tx_desc.pool_size, pdev->tx_desc.array);
4460*5113495bSYour Name 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW, " ");
4461*5113495bSYour Name 	htt_display(pdev->htt_pdev, indent);
4462*5113495bSYour Name }
4463*5113495bSYour Name 
ol_txrx_vdev_display(ol_txrx_vdev_handle vdev,int indent)4464*5113495bSYour Name void ol_txrx_vdev_display(ol_txrx_vdev_handle vdev, int indent)
4465*5113495bSYour Name {
4466*5113495bSYour Name 	struct ol_txrx_peer_t *peer;
4467*5113495bSYour Name 
4468*5113495bSYour Name 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
4469*5113495bSYour Name 		  "%*stxrx vdev: %pK\n", indent, " ", vdev);
4470*5113495bSYour Name 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
4471*5113495bSYour Name 		  "%*sID: %d\n", indent + 4, " ", vdev->vdev_id);
4472*5113495bSYour Name 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
4473*5113495bSYour Name 		  "%*sMAC addr: %d:%d:%d:%d:%d:%d",
4474*5113495bSYour Name 		  indent + 4, " ",
4475*5113495bSYour Name 		  vdev->mac_addr.raw[0], vdev->mac_addr.raw[1],
4476*5113495bSYour Name 		  vdev->mac_addr.raw[2], vdev->mac_addr.raw[3],
4477*5113495bSYour Name 		  vdev->mac_addr.raw[4], vdev->mac_addr.raw[5]);
4478*5113495bSYour Name 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
4479*5113495bSYour Name 		  "%*speer list:", indent + 4, " ");
4480*5113495bSYour Name 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
4481*5113495bSYour Name 		      ol_txrx_peer_display(peer, indent + 8);
4482*5113495bSYour Name 	}
4483*5113495bSYour Name }
4484*5113495bSYour Name 
ol_txrx_peer_display(ol_txrx_peer_handle peer,int indent)4485*5113495bSYour Name void ol_txrx_peer_display(ol_txrx_peer_handle peer, int indent)
4486*5113495bSYour Name {
4487*5113495bSYour Name 	int i;
4488*5113495bSYour Name 
4489*5113495bSYour Name 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
4490*5113495bSYour Name 		  "%*stxrx peer: %pK", indent, " ", peer);
4491*5113495bSYour Name 	for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
4492*5113495bSYour Name 		if (peer->peer_ids[i] != HTT_INVALID_PEER) {
4493*5113495bSYour Name 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
4494*5113495bSYour Name 				  "%*sID: %d", indent + 4, " ",
4495*5113495bSYour Name 				  peer->peer_ids[i]);
4496*5113495bSYour Name 		}
4497*5113495bSYour Name 	}
4498*5113495bSYour Name }
4499*5113495bSYour Name #endif /* TXRX_DEBUG_LEVEL */
4500*5113495bSYour Name 
4501*5113495bSYour Name /**
4502*5113495bSYour Name  * ol_txrx_stats() - update ol layer stats
4503*5113495bSYour Name  * @vdev_id: vdev_id
4504*5113495bSYour Name  * @buffer: pointer to buffer
4505*5113495bSYour Name  * @buf_len: length of the buffer
4506*5113495bSYour Name  *
4507*5113495bSYour Name  * Return: length of string
4508*5113495bSYour Name  */
4509*5113495bSYour Name static int
ol_txrx_stats(uint8_t vdev_id,char * buffer,unsigned int buf_len)4510*5113495bSYour Name ol_txrx_stats(uint8_t vdev_id, char *buffer, unsigned int buf_len)
4511*5113495bSYour Name {
4512*5113495bSYour Name 	uint32_t len = 0;
4513*5113495bSYour Name 
4514*5113495bSYour Name 	struct ol_txrx_vdev_t *vdev =
4515*5113495bSYour Name 			(struct ol_txrx_vdev_t *)
4516*5113495bSYour Name 			ol_txrx_get_vdev_from_vdev_id(vdev_id);
4517*5113495bSYour Name 
4518*5113495bSYour Name 	if (!vdev) {
4519*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4520*5113495bSYour Name 			  "%s: vdev is NULL", __func__);
4521*5113495bSYour Name 		snprintf(buffer, buf_len, "vdev not found");
4522*5113495bSYour Name 		return len;
4523*5113495bSYour Name 	}
4524*5113495bSYour Name 
4525*5113495bSYour Name 	len = scnprintf(buffer, buf_len,
4526*5113495bSYour Name 			"\n\nTXRX stats:\nllQueue State : %s\npause %u unpause %u\noverflow %u\nllQueue timer state : %s",
4527*5113495bSYour Name 			((vdev->ll_pause.is_q_paused == false) ?
4528*5113495bSYour Name 			 "UNPAUSED" : "PAUSED"),
4529*5113495bSYour Name 			vdev->ll_pause.q_pause_cnt,
4530*5113495bSYour Name 			vdev->ll_pause.q_unpause_cnt,
4531*5113495bSYour Name 			vdev->ll_pause.q_overflow_cnt,
4532*5113495bSYour Name 			((vdev->ll_pause.is_q_timer_on == false)
4533*5113495bSYour Name 			 ? "NOT-RUNNING" : "RUNNING"));
4534*5113495bSYour Name 	return len;
4535*5113495bSYour Name }
4536*5113495bSYour Name 
4537*5113495bSYour Name #ifdef QCA_SUPPORT_TXRX_LOCAL_PEER_ID
4538*5113495bSYour Name /**
4539*5113495bSYour Name  * ol_txrx_disp_peer_cached_bufq_stats() - display peer cached_bufq stats
4540*5113495bSYour Name  * @peer: peer pointer
4541*5113495bSYour Name  *
4542*5113495bSYour Name  * Return: None
4543*5113495bSYour Name  */
ol_txrx_disp_peer_cached_bufq_stats(struct ol_txrx_peer_t * peer)4544*5113495bSYour Name static void ol_txrx_disp_peer_cached_bufq_stats(struct ol_txrx_peer_t *peer)
4545*5113495bSYour Name {
4546*5113495bSYour Name 	txrx_nofl_info("cached_bufq: curr %d drops %d hwm %d whatifs %d thresh %d",
4547*5113495bSYour Name 		       peer->bufq_info.curr,
4548*5113495bSYour Name 		       peer->bufq_info.dropped,
4549*5113495bSYour Name 		       peer->bufq_info.high_water_mark,
4550*5113495bSYour Name 		       peer->bufq_info.qdepth_no_thresh,
4551*5113495bSYour Name 		       peer->bufq_info.thresh);
4552*5113495bSYour Name }
4553*5113495bSYour Name 
4554*5113495bSYour Name /**
4555*5113495bSYour Name  * ol_txrx_disp_peer_stats() - display peer stats
4556*5113495bSYour Name  * @pdev: pdev pointer
4557*5113495bSYour Name  *
4558*5113495bSYour Name  * Return: None
4559*5113495bSYour Name  */
ol_txrx_disp_peer_stats(ol_txrx_pdev_handle pdev)4560*5113495bSYour Name static void ol_txrx_disp_peer_stats(ol_txrx_pdev_handle pdev)
4561*5113495bSYour Name {	int i;
4562*5113495bSYour Name 	struct ol_txrx_peer_t *peer;
4563*5113495bSYour Name 	struct hif_opaque_softc *osc =  cds_get_context(QDF_MODULE_ID_HIF);
4564*5113495bSYour Name 
4565*5113495bSYour Name 	if (osc && hif_is_load_or_unload_in_progress(HIF_GET_SOFTC(osc)))
4566*5113495bSYour Name 		return;
4567*5113495bSYour Name 
4568*5113495bSYour Name 	for (i = 0; i < OL_TXRX_NUM_LOCAL_PEER_IDS; i++) {
4569*5113495bSYour Name 		qdf_spin_lock_bh(&pdev->peer_ref_mutex);
4570*5113495bSYour Name 		qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
4571*5113495bSYour Name 		peer = pdev->local_peer_ids.map[i];
4572*5113495bSYour Name 		if (peer) {
4573*5113495bSYour Name 			ol_txrx_peer_get_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
4574*5113495bSYour Name 		}
4575*5113495bSYour Name 		qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
4576*5113495bSYour Name 		qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
4577*5113495bSYour Name 
4578*5113495bSYour Name 		if (peer) {
4579*5113495bSYour Name 			txrx_nofl_info("stats: peer 0x%pK local peer id %d",
4580*5113495bSYour Name 				       peer, i);
4581*5113495bSYour Name 			ol_txrx_disp_peer_cached_bufq_stats(peer);
4582*5113495bSYour Name 			ol_txrx_peer_release_ref(peer,
4583*5113495bSYour Name 						 PEER_DEBUG_ID_OL_INTERNAL);
4584*5113495bSYour Name 		}
4585*5113495bSYour Name 	}
4586*5113495bSYour Name }
4587*5113495bSYour Name #else
ol_txrx_disp_peer_stats(ol_txrx_pdev_handle pdev)4588*5113495bSYour Name static void ol_txrx_disp_peer_stats(ol_txrx_pdev_handle pdev)
4589*5113495bSYour Name {
4590*5113495bSYour Name 	txrx_nofl_info("peer stats not supported w/o QCA_SUPPORT_TXRX_LOCAL_PEER_ID");
4591*5113495bSYour Name }
4592*5113495bSYour Name #endif
4593*5113495bSYour Name 
ol_txrx_stats_display(ol_txrx_pdev_handle pdev,enum qdf_stats_verbosity_level level)4594*5113495bSYour Name void ol_txrx_stats_display(ol_txrx_pdev_handle pdev,
4595*5113495bSYour Name 			   enum qdf_stats_verbosity_level level)
4596*5113495bSYour Name {
4597*5113495bSYour Name 	u64 tx_dropped =
4598*5113495bSYour Name 		pdev->stats.pub.tx.dropped.download_fail.pkts
4599*5113495bSYour Name 		  + pdev->stats.pub.tx.dropped.target_discard.pkts
4600*5113495bSYour Name 		  + pdev->stats.pub.tx.dropped.no_ack.pkts
4601*5113495bSYour Name 		  + pdev->stats.pub.tx.dropped.target_drop.pkts
4602*5113495bSYour Name 		  + pdev->stats.pub.tx.dropped.others.pkts;
4603*5113495bSYour Name 
4604*5113495bSYour Name 	if (level == QDF_STATS_VERBOSITY_LEVEL_LOW) {
4605*5113495bSYour Name 		txrx_nofl_dbg("STATS |%u %u|TX: %lld tso %lld ok %lld drops(%u-%lld %u-%lld %u-%lld %u-%lld ?-%lld hR-%lld)|RX: %lld drops(E %lld PI %lld ME %lld) fwd(S %d F %d SF %d)|",
4606*5113495bSYour Name 			      pdev->tx_desc.num_free,
4607*5113495bSYour Name 			      pdev->tx_desc.pool_size,
4608*5113495bSYour Name 			      pdev->stats.pub.tx.from_stack.pkts,
4609*5113495bSYour Name 			      pdev->stats.pub.tx.tso.tso_pkts.pkts,
4610*5113495bSYour Name 			      pdev->stats.pub.tx.delivered.pkts,
4611*5113495bSYour Name 			      htt_tx_status_download_fail,
4612*5113495bSYour Name 			      pdev->stats.pub.tx.dropped.download_fail.pkts,
4613*5113495bSYour Name 			      htt_tx_status_discard,
4614*5113495bSYour Name 			      pdev->stats.pub.tx.dropped.
4615*5113495bSYour Name 					target_discard.pkts,
4616*5113495bSYour Name 			      htt_tx_status_no_ack,
4617*5113495bSYour Name 			      pdev->stats.pub.tx.dropped.no_ack.pkts,
4618*5113495bSYour Name 			      htt_tx_status_drop,
4619*5113495bSYour Name 			      pdev->stats.pub.tx.dropped.target_drop.pkts,
4620*5113495bSYour Name 			      pdev->stats.pub.tx.dropped.others.pkts,
4621*5113495bSYour Name 			      pdev->stats.pub.tx.dropped.host_reject.pkts,
4622*5113495bSYour Name 			      pdev->stats.pub.rx.delivered.pkts,
4623*5113495bSYour Name 			      pdev->stats.pub.rx.dropped_err.pkts,
4624*5113495bSYour Name 			      pdev->stats.pub.rx.dropped_peer_invalid.pkts,
4625*5113495bSYour Name 			      pdev->stats.pub.rx.dropped_mic_err.pkts,
4626*5113495bSYour Name 			      pdev->stats.pub.rx.intra_bss_fwd.
4627*5113495bSYour Name 					packets_stack,
4628*5113495bSYour Name 			      pdev->stats.pub.rx.intra_bss_fwd.
4629*5113495bSYour Name 					packets_fwd,
4630*5113495bSYour Name 			      pdev->stats.pub.rx.intra_bss_fwd.
4631*5113495bSYour Name 					packets_stack_n_fwd);
4632*5113495bSYour Name 		return;
4633*5113495bSYour Name 	}
4634*5113495bSYour Name 
4635*5113495bSYour Name 	txrx_nofl_info("TX PATH Statistics:");
4636*5113495bSYour Name 	txrx_nofl_info("sent %lld msdus (%lld B), host rejected %lld (%lld B), dropped %lld (%lld B)",
4637*5113495bSYour Name 		       pdev->stats.pub.tx.from_stack.pkts,
4638*5113495bSYour Name 		       pdev->stats.pub.tx.from_stack.bytes,
4639*5113495bSYour Name 		       pdev->stats.pub.tx.dropped.host_reject.pkts,
4640*5113495bSYour Name 		       pdev->stats.pub.tx.dropped.host_reject.bytes,
4641*5113495bSYour Name 			  tx_dropped,
4642*5113495bSYour Name 		       pdev->stats.pub.tx.dropped.download_fail.bytes
4643*5113495bSYour Name 			  + pdev->stats.pub.tx.dropped.target_discard.bytes
4644*5113495bSYour Name 			  + pdev->stats.pub.tx.dropped.target_drop.bytes
4645*5113495bSYour Name 			  + pdev->stats.pub.tx.dropped.no_ack.bytes);
4646*5113495bSYour Name 	txrx_nofl_info("successfully delivered: %lld (%lld B), download fail: %lld (%lld B), target discard: %lld (%lld B), no ack: %lld (%lld B),target drop: %lld (%lld B), others: %lld (%lld B)",
4647*5113495bSYour Name 		       pdev->stats.pub.tx.delivered.pkts,
4648*5113495bSYour Name 		       pdev->stats.pub.tx.delivered.bytes,
4649*5113495bSYour Name 		       pdev->stats.pub.tx.dropped.download_fail.pkts,
4650*5113495bSYour Name 		       pdev->stats.pub.tx.dropped.download_fail.bytes,
4651*5113495bSYour Name 		       pdev->stats.pub.tx.dropped.target_discard.pkts,
4652*5113495bSYour Name 		       pdev->stats.pub.tx.dropped.target_discard.bytes,
4653*5113495bSYour Name 		       pdev->stats.pub.tx.dropped.no_ack.pkts,
4654*5113495bSYour Name 		       pdev->stats.pub.tx.dropped.no_ack.bytes,
4655*5113495bSYour Name 		       pdev->stats.pub.tx.dropped.target_drop.pkts,
4656*5113495bSYour Name 		       pdev->stats.pub.tx.dropped.target_drop.bytes,
4657*5113495bSYour Name 		       pdev->stats.pub.tx.dropped.others.pkts,
4658*5113495bSYour Name 		       pdev->stats.pub.tx.dropped.others.bytes);
4659*5113495bSYour Name 	txrx_nofl_info("Tx completions per HTT message:\n"
4660*5113495bSYour Name 		       "Single Packet  %d\n"
4661*5113495bSYour Name 		       " 2-10 Packets  %d\n"
4662*5113495bSYour Name 		       "11-20 Packets  %d\n"
4663*5113495bSYour Name 		       "21-30 Packets  %d\n"
4664*5113495bSYour Name 		       "31-40 Packets  %d\n"
4665*5113495bSYour Name 		       "41-50 Packets  %d\n"
4666*5113495bSYour Name 		       "51-60 Packets  %d\n"
4667*5113495bSYour Name 		       "  60+ Packets  %d\n",
4668*5113495bSYour Name 		       pdev->stats.pub.tx.comp_histogram.pkts_1,
4669*5113495bSYour Name 		       pdev->stats.pub.tx.comp_histogram.pkts_2_10,
4670*5113495bSYour Name 		       pdev->stats.pub.tx.comp_histogram.pkts_11_20,
4671*5113495bSYour Name 		       pdev->stats.pub.tx.comp_histogram.pkts_21_30,
4672*5113495bSYour Name 		       pdev->stats.pub.tx.comp_histogram.pkts_31_40,
4673*5113495bSYour Name 		       pdev->stats.pub.tx.comp_histogram.pkts_41_50,
4674*5113495bSYour Name 		       pdev->stats.pub.tx.comp_histogram.pkts_51_60,
4675*5113495bSYour Name 		       pdev->stats.pub.tx.comp_histogram.pkts_61_plus);
4676*5113495bSYour Name 
4677*5113495bSYour Name 	txrx_nofl_info("RX PATH Statistics:");
4678*5113495bSYour Name 	txrx_nofl_info("%lld ppdus, %lld mpdus, %lld msdus, %lld bytes\n"
4679*5113495bSYour Name 		       "dropped: err %lld (%lld B), peer_invalid %lld (%lld B), mic_err %lld (%lld B)\n"
4680*5113495bSYour Name 		       "msdus with frag_ind: %d msdus with offload_ind: %d",
4681*5113495bSYour Name 		       pdev->stats.priv.rx.normal.ppdus,
4682*5113495bSYour Name 		       pdev->stats.priv.rx.normal.mpdus,
4683*5113495bSYour Name 		       pdev->stats.pub.rx.delivered.pkts,
4684*5113495bSYour Name 		       pdev->stats.pub.rx.delivered.bytes,
4685*5113495bSYour Name 		       pdev->stats.pub.rx.dropped_err.pkts,
4686*5113495bSYour Name 		       pdev->stats.pub.rx.dropped_err.bytes,
4687*5113495bSYour Name 		       pdev->stats.pub.rx.dropped_peer_invalid.pkts,
4688*5113495bSYour Name 		       pdev->stats.pub.rx.dropped_peer_invalid.bytes,
4689*5113495bSYour Name 		       pdev->stats.pub.rx.dropped_mic_err.pkts,
4690*5113495bSYour Name 		       pdev->stats.pub.rx.dropped_mic_err.bytes,
4691*5113495bSYour Name 		       pdev->stats.pub.rx.msdus_with_frag_ind,
4692*5113495bSYour Name 		       pdev->stats.pub.rx.msdus_with_offload_ind);
4693*5113495bSYour Name 
4694*5113495bSYour Name 	txrx_nofl_info("  fwd to stack %d, fwd to fw %d, fwd to stack & fw  %d\n",
4695*5113495bSYour Name 		       pdev->stats.pub.rx.intra_bss_fwd.packets_stack,
4696*5113495bSYour Name 		       pdev->stats.pub.rx.intra_bss_fwd.packets_fwd,
4697*5113495bSYour Name 		       pdev->stats.pub.rx.intra_bss_fwd.packets_stack_n_fwd);
4698*5113495bSYour Name 
4699*5113495bSYour Name 	txrx_nofl_info("packets per HTT message:\n"
4700*5113495bSYour Name 		       "Single Packet  %d\n"
4701*5113495bSYour Name 		       " 2-10 Packets  %d\n"
4702*5113495bSYour Name 		       "11-20 Packets  %d\n"
4703*5113495bSYour Name 		       "21-30 Packets  %d\n"
4704*5113495bSYour Name 		       "31-40 Packets  %d\n"
4705*5113495bSYour Name 		       "41-50 Packets  %d\n"
4706*5113495bSYour Name 		       "51-60 Packets  %d\n"
4707*5113495bSYour Name 		       "  60+ Packets  %d\n",
4708*5113495bSYour Name 		       pdev->stats.pub.rx.rx_ind_histogram.pkts_1,
4709*5113495bSYour Name 		       pdev->stats.pub.rx.rx_ind_histogram.pkts_2_10,
4710*5113495bSYour Name 		       pdev->stats.pub.rx.rx_ind_histogram.pkts_11_20,
4711*5113495bSYour Name 		       pdev->stats.pub.rx.rx_ind_histogram.pkts_21_30,
4712*5113495bSYour Name 		       pdev->stats.pub.rx.rx_ind_histogram.pkts_31_40,
4713*5113495bSYour Name 		       pdev->stats.pub.rx.rx_ind_histogram.pkts_41_50,
4714*5113495bSYour Name 		       pdev->stats.pub.rx.rx_ind_histogram.pkts_51_60,
4715*5113495bSYour Name 		       pdev->stats.pub.rx.rx_ind_histogram.pkts_61_plus);
4716*5113495bSYour Name 
4717*5113495bSYour Name 	ol_txrx_disp_peer_stats(pdev);
4718*5113495bSYour Name }
4719*5113495bSYour Name 
ol_txrx_stats_clear(ol_txrx_pdev_handle pdev)4720*5113495bSYour Name void ol_txrx_stats_clear(ol_txrx_pdev_handle pdev)
4721*5113495bSYour Name {
4722*5113495bSYour Name 	qdf_mem_zero(&pdev->stats, sizeof(pdev->stats));
4723*5113495bSYour Name }
4724*5113495bSYour Name 
4725*5113495bSYour Name #if defined(ENABLE_TXRX_PROT_ANALYZE)
4726*5113495bSYour Name 
ol_txrx_prot_ans_display(ol_txrx_pdev_handle pdev)4727*5113495bSYour Name void ol_txrx_prot_ans_display(ol_txrx_pdev_handle pdev)
4728*5113495bSYour Name {
4729*5113495bSYour Name 	ol_txrx_prot_an_display(pdev->prot_an_tx_sent);
4730*5113495bSYour Name 	ol_txrx_prot_an_display(pdev->prot_an_rx_sent);
4731*5113495bSYour Name }
4732*5113495bSYour Name 
4733*5113495bSYour Name #endif /* ENABLE_TXRX_PROT_ANALYZE */
4734*5113495bSYour Name 
4735*5113495bSYour Name #ifdef QCA_SUPPORT_PEER_DATA_RX_RSSI
ol_txrx_peer_rssi(ol_txrx_peer_handle peer)4736*5113495bSYour Name int16_t ol_txrx_peer_rssi(ol_txrx_peer_handle peer)
4737*5113495bSYour Name {
4738*5113495bSYour Name 	return (peer->rssi_dbm == HTT_RSSI_INVALID) ?
4739*5113495bSYour Name 	       OL_TXRX_RSSI_INVALID : peer->rssi_dbm;
4740*5113495bSYour Name }
4741*5113495bSYour Name #endif /* #ifdef QCA_SUPPORT_PEER_DATA_RX_RSSI */
4742*5113495bSYour Name 
4743*5113495bSYour Name #ifdef QCA_ENABLE_OL_TXRX_PEER_STATS
4744*5113495bSYour Name A_STATUS
ol_txrx_peer_stats_copy(ol_txrx_pdev_handle pdev,ol_txrx_peer_handle peer,ol_txrx_peer_stats_t * stats)4745*5113495bSYour Name ol_txrx_peer_stats_copy(ol_txrx_pdev_handle pdev,
4746*5113495bSYour Name 			ol_txrx_peer_handle peer, ol_txrx_peer_stats_t *stats)
4747*5113495bSYour Name {
4748*5113495bSYour Name 	qdf_assert(pdev && peer && stats);
4749*5113495bSYour Name 	qdf_spin_lock_bh(&pdev->peer_stat_mutex);
4750*5113495bSYour Name 	qdf_mem_copy(stats, &peer->stats, sizeof(*stats));
4751*5113495bSYour Name 	qdf_spin_unlock_bh(&pdev->peer_stat_mutex);
4752*5113495bSYour Name 	return A_OK;
4753*5113495bSYour Name }
4754*5113495bSYour Name #endif /* QCA_ENABLE_OL_TXRX_PEER_STATS */
4755*5113495bSYour Name 
4756*5113495bSYour Name /**
4757*5113495bSYour Name  * ol_vdev_rx_set_intrabss_fwd() - Get fw stats
4758*5113495bSYour Name  *
4759*5113495bSYour Name  * @soc_hdl: datapath soc handle
4760*5113495bSYour Name  * @vdev_id: virtual interface id
4761*5113495bSYour Name  * @val: enable or disable
4762*5113495bSYour Name  *
4763*5113495bSYour Name  * Return: void
4764*5113495bSYour Name  */
ol_vdev_rx_set_intrabss_fwd(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,bool val)4765*5113495bSYour Name static void ol_vdev_rx_set_intrabss_fwd(struct cdp_soc_t *soc_hdl,
4766*5113495bSYour Name 					uint8_t vdev_id, bool val)
4767*5113495bSYour Name {
4768*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
4769*5113495bSYour Name 	ol_txrx_vdev_handle vdev = ol_txrx_get_vdev_from_soc_vdev_id(soc,
4770*5113495bSYour Name 								     vdev_id);
4771*5113495bSYour Name 
4772*5113495bSYour Name 	if (!vdev)
4773*5113495bSYour Name 		return;
4774*5113495bSYour Name 
4775*5113495bSYour Name 	vdev->disable_intrabss_fwd = val;
4776*5113495bSYour Name }
4777*5113495bSYour Name 
4778*5113495bSYour Name /**
4779*5113495bSYour Name  * ol_txrx_update_mac_id() - update mac_id for vdev
4780*5113495bSYour Name  * @soc_hdl: Datapath soc handle
4781*5113495bSYour Name  * @vdev_id: vdev id
4782*5113495bSYour Name  * @mac_id: mac id
4783*5113495bSYour Name  *
4784*5113495bSYour Name  * Return: none
4785*5113495bSYour Name  */
ol_txrx_update_mac_id(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,uint8_t mac_id)4786*5113495bSYour Name static void ol_txrx_update_mac_id(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
4787*5113495bSYour Name 				  uint8_t mac_id)
4788*5113495bSYour Name {
4789*5113495bSYour Name 	struct ol_txrx_vdev_t *vdev =
4790*5113495bSYour Name 			(struct ol_txrx_vdev_t *)
4791*5113495bSYour Name 			ol_txrx_get_vdev_from_vdev_id(vdev_id);
4792*5113495bSYour Name 
4793*5113495bSYour Name 	if (!vdev) {
4794*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4795*5113495bSYour Name 			  "%s: Invalid vdev_id %d", __func__, vdev_id);
4796*5113495bSYour Name 		return;
4797*5113495bSYour Name 	}
4798*5113495bSYour Name 	vdev->mac_id = mac_id;
4799*5113495bSYour Name }
4800*5113495bSYour Name 
4801*5113495bSYour Name /**
4802*5113495bSYour Name  * ol_txrx_get_tx_ack_count() - get tx ack count
4803*5113495bSYour Name  * @soc_hdl: Datapath soc handle
4804*5113495bSYour Name  * @vdev_id: vdev_id
4805*5113495bSYour Name  *
4806*5113495bSYour Name  * Return: tx ack count
4807*5113495bSYour Name  */
ol_txrx_get_tx_ack_stats(struct cdp_soc_t * soc_hdl,uint8_t vdev_id)4808*5113495bSYour Name static uint32_t ol_txrx_get_tx_ack_stats(struct cdp_soc_t *soc_hdl,
4809*5113495bSYour Name 					 uint8_t vdev_id)
4810*5113495bSYour Name {
4811*5113495bSYour Name 	struct ol_txrx_vdev_t *vdev =
4812*5113495bSYour Name 		(struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
4813*5113495bSYour Name 
4814*5113495bSYour Name 	if (!vdev) {
4815*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4816*5113495bSYour Name 			  "%s: Invalid vdev_id %d", __func__, vdev_id);
4817*5113495bSYour Name 		return 0;
4818*5113495bSYour Name 	}
4819*5113495bSYour Name 
4820*5113495bSYour Name 	return vdev->txrx_stats.txack_success;
4821*5113495bSYour Name }
4822*5113495bSYour Name 
4823*5113495bSYour Name /**
4824*5113495bSYour Name  * ol_txrx_display_stats() - Display OL TXRX display stats
4825*5113495bSYour Name  * @soc_hdl: Datapath soc handle
4826*5113495bSYour Name  * @value: Module id for which stats needs to be displayed
4827*5113495bSYour Name  * @verb_level: verbose level of stats to be displayed
4828*5113495bSYour Name  *
4829*5113495bSYour Name  * Return: status
4830*5113495bSYour Name  */
4831*5113495bSYour Name static QDF_STATUS
ol_txrx_display_stats(struct cdp_soc_t * soc_hdl,uint16_t value,enum qdf_stats_verbosity_level verb_level)4832*5113495bSYour Name ol_txrx_display_stats(struct cdp_soc_t *soc_hdl, uint16_t value,
4833*5113495bSYour Name 		      enum qdf_stats_verbosity_level verb_level)
4834*5113495bSYour Name {
4835*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
4836*5113495bSYour Name 	ol_txrx_pdev_handle pdev = ol_txrx_get_pdev_from_pdev_id(
4837*5113495bSYour Name 							soc,
4838*5113495bSYour Name 							OL_TXRX_PDEV_ID);
4839*5113495bSYour Name 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4840*5113495bSYour Name 
4841*5113495bSYour Name 	if (!pdev) {
4842*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4843*5113495bSYour Name 			  "%s: pdev is NULL", __func__);
4844*5113495bSYour Name 		return QDF_STATUS_E_NULL_VALUE;
4845*5113495bSYour Name 	}
4846*5113495bSYour Name 
4847*5113495bSYour Name 	switch (value) {
4848*5113495bSYour Name 	case CDP_TXRX_PATH_STATS:
4849*5113495bSYour Name 		ol_txrx_stats_display(pdev, verb_level);
4850*5113495bSYour Name 		break;
4851*5113495bSYour Name 	case CDP_TXRX_TSO_STATS:
4852*5113495bSYour Name 		ol_txrx_stats_display_tso(pdev);
4853*5113495bSYour Name 		break;
4854*5113495bSYour Name 	case CDP_DUMP_TX_FLOW_POOL_INFO:
4855*5113495bSYour Name 		if (verb_level == QDF_STATS_VERBOSITY_LEVEL_LOW)
4856*5113495bSYour Name 			ol_tx_dump_flow_pool_info_compact(pdev);
4857*5113495bSYour Name 		else
4858*5113495bSYour Name 			ol_tx_dump_flow_pool_info(soc_hdl);
4859*5113495bSYour Name 		break;
4860*5113495bSYour Name 	case CDP_TXRX_DESC_STATS:
4861*5113495bSYour Name 		qdf_nbuf_tx_desc_count_display();
4862*5113495bSYour Name 		break;
4863*5113495bSYour Name 	case CDP_WLAN_RX_BUF_DEBUG_STATS:
4864*5113495bSYour Name 		htt_display_rx_buf_debug(pdev->htt_pdev);
4865*5113495bSYour Name 		break;
4866*5113495bSYour Name #ifdef CONFIG_HL_SUPPORT
4867*5113495bSYour Name 	case CDP_SCHEDULER_STATS:
4868*5113495bSYour Name 		ol_tx_sched_cur_state_display(pdev);
4869*5113495bSYour Name 		ol_tx_sched_stats_display(pdev);
4870*5113495bSYour Name 		break;
4871*5113495bSYour Name 	case CDP_TX_QUEUE_STATS:
4872*5113495bSYour Name 		ol_tx_queue_log_display(pdev);
4873*5113495bSYour Name 		break;
4874*5113495bSYour Name #ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
4875*5113495bSYour Name 	case CDP_CREDIT_STATS:
4876*5113495bSYour Name 		ol_tx_dump_group_credit_stats(pdev);
4877*5113495bSYour Name 		break;
4878*5113495bSYour Name #endif
4879*5113495bSYour Name 
4880*5113495bSYour Name #ifdef DEBUG_HL_LOGGING
4881*5113495bSYour Name 	case CDP_BUNDLE_STATS:
4882*5113495bSYour Name 		htt_dump_bundle_stats(pdev->htt_pdev);
4883*5113495bSYour Name 		break;
4884*5113495bSYour Name #endif
4885*5113495bSYour Name #endif
4886*5113495bSYour Name 	default:
4887*5113495bSYour Name 		status = QDF_STATUS_E_INVAL;
4888*5113495bSYour Name 		break;
4889*5113495bSYour Name 	}
4890*5113495bSYour Name 	return status;
4891*5113495bSYour Name }
4892*5113495bSYour Name 
4893*5113495bSYour Name /**
4894*5113495bSYour Name  * ol_txrx_clear_stats() - Clear OL TXRX stats
4895*5113495bSYour Name  * @soc - ol soc handle
4896*5113495bSYour Name  * @pdev_id: pdev identifier
4897*5113495bSYour Name  * @value - Module id for which stats needs to be cleared
4898*5113495bSYour Name  *
4899*5113495bSYour Name  * Return: 0 - success/ non-zero failure
4900*5113495bSYour Name  */
ol_txrx_clear_stats(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,uint8_t value)4901*5113495bSYour Name static QDF_STATUS ol_txrx_clear_stats(struct cdp_soc_t *soc_hdl,
4902*5113495bSYour Name 				      uint8_t pdev_id, uint8_t value)
4903*5113495bSYour Name {
4904*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
4905*5113495bSYour Name 	ol_txrx_pdev_handle pdev = ol_txrx_get_pdev_from_pdev_id(soc, pdev_id);
4906*5113495bSYour Name 	QDF_STATUS status = QDF_STATUS_SUCCESS;
4907*5113495bSYour Name 
4908*5113495bSYour Name 	if (!pdev) {
4909*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
4910*5113495bSYour Name 			  "%s: pdev is NULL", __func__);
4911*5113495bSYour Name 		return QDF_STATUS_E_INVAL;
4912*5113495bSYour Name 	}
4913*5113495bSYour Name 
4914*5113495bSYour Name 	switch (value) {
4915*5113495bSYour Name 	case CDP_TXRX_PATH_STATS:
4916*5113495bSYour Name 		ol_txrx_stats_clear(pdev);
4917*5113495bSYour Name 		break;
4918*5113495bSYour Name 	case CDP_TXRX_TSO_STATS:
4919*5113495bSYour Name 		ol_txrx_tso_stats_clear(pdev);
4920*5113495bSYour Name 		break;
4921*5113495bSYour Name 	case CDP_DUMP_TX_FLOW_POOL_INFO:
4922*5113495bSYour Name 		ol_tx_clear_flow_pool_stats();
4923*5113495bSYour Name 		break;
4924*5113495bSYour Name 	case CDP_TXRX_DESC_STATS:
4925*5113495bSYour Name 		qdf_nbuf_tx_desc_count_clear();
4926*5113495bSYour Name 		break;
4927*5113495bSYour Name #ifdef CONFIG_HL_SUPPORT
4928*5113495bSYour Name 	case CDP_SCHEDULER_STATS:
4929*5113495bSYour Name 		ol_tx_sched_stats_clear(pdev);
4930*5113495bSYour Name 		break;
4931*5113495bSYour Name 	case CDP_TX_QUEUE_STATS:
4932*5113495bSYour Name 		ol_tx_queue_log_clear(pdev);
4933*5113495bSYour Name 		break;
4934*5113495bSYour Name #ifdef FEATURE_HL_GROUP_CREDIT_FLOW_CONTROL
4935*5113495bSYour Name 	case CDP_CREDIT_STATS:
4936*5113495bSYour Name 		ol_tx_clear_group_credit_stats(pdev);
4937*5113495bSYour Name 		break;
4938*5113495bSYour Name #endif
4939*5113495bSYour Name 	case CDP_BUNDLE_STATS:
4940*5113495bSYour Name 		htt_clear_bundle_stats(pdev->htt_pdev);
4941*5113495bSYour Name 		break;
4942*5113495bSYour Name #endif
4943*5113495bSYour Name 	default:
4944*5113495bSYour Name 		status = QDF_STATUS_E_INVAL;
4945*5113495bSYour Name 		break;
4946*5113495bSYour Name 	}
4947*5113495bSYour Name 
4948*5113495bSYour Name 	return status;
4949*5113495bSYour Name }
4950*5113495bSYour Name 
4951*5113495bSYour Name /**
4952*5113495bSYour Name  * ol_txrx_drop_nbuf_list() - drop an nbuf list
4953*5113495bSYour Name  * @buf_list: buffer list to be dropepd
4954*5113495bSYour Name  *
4955*5113495bSYour Name  * Return: int (number of bufs dropped)
4956*5113495bSYour Name  */
ol_txrx_drop_nbuf_list(qdf_nbuf_t buf_list)4957*5113495bSYour Name static inline int ol_txrx_drop_nbuf_list(qdf_nbuf_t buf_list)
4958*5113495bSYour Name {
4959*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
4960*5113495bSYour Name 	ol_txrx_pdev_handle pdev;
4961*5113495bSYour Name 	int num_dropped = 0;
4962*5113495bSYour Name 	qdf_nbuf_t buf, next_buf;
4963*5113495bSYour Name 
4964*5113495bSYour Name 	if (qdf_unlikely(!soc))
4965*5113495bSYour Name 		return 0;
4966*5113495bSYour Name 
4967*5113495bSYour Name 	pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
4968*5113495bSYour Name 	if (!pdev) {
4969*5113495bSYour Name 		ol_txrx_err("pdev is NULL");
4970*5113495bSYour Name 		return 0;
4971*5113495bSYour Name 	}
4972*5113495bSYour Name 
4973*5113495bSYour Name 	buf = buf_list;
4974*5113495bSYour Name 	while (buf) {
4975*5113495bSYour Name 		QDF_NBUF_CB_RX_PEER_CACHED_FRM(buf) = 1;
4976*5113495bSYour Name 		next_buf = qdf_nbuf_queue_next(buf);
4977*5113495bSYour Name 		if (pdev)
4978*5113495bSYour Name 			TXRX_STATS_MSDU_INCR(pdev,
4979*5113495bSYour Name 				 rx.dropped_peer_invalid, buf);
4980*5113495bSYour Name 		qdf_nbuf_free(buf);
4981*5113495bSYour Name 		buf = next_buf;
4982*5113495bSYour Name 		num_dropped++;
4983*5113495bSYour Name 	}
4984*5113495bSYour Name 	return num_dropped;
4985*5113495bSYour Name }
4986*5113495bSYour Name 
4987*5113495bSYour Name /**
4988*5113495bSYour Name  * ol_rx_data_handler() - data rx handler
4989*5113495bSYour Name  * @pdev: dev handle
4990*5113495bSYour Name  * @buf_list: buffer list
4991*5113495bSYour Name  * @staid: Station id
4992*5113495bSYour Name  *
4993*5113495bSYour Name  * Return: None
4994*5113495bSYour Name  */
ol_rx_data_handler(struct ol_txrx_pdev_t * pdev,qdf_nbuf_t buf_list,uint16_t staid)4995*5113495bSYour Name static void ol_rx_data_handler(struct ol_txrx_pdev_t *pdev,
4996*5113495bSYour Name 			       qdf_nbuf_t buf_list, uint16_t staid)
4997*5113495bSYour Name {
4998*5113495bSYour Name 	void *osif_dev;
4999*5113495bSYour Name 	uint8_t drop_count = 0;
5000*5113495bSYour Name 	qdf_nbuf_t buf, next_buf;
5001*5113495bSYour Name 	QDF_STATUS ret;
5002*5113495bSYour Name 	ol_txrx_rx_fp data_rx = NULL;
5003*5113495bSYour Name 	struct ol_txrx_peer_t *peer;
5004*5113495bSYour Name 
5005*5113495bSYour Name 	if (qdf_unlikely(!pdev))
5006*5113495bSYour Name 		goto free_buf;
5007*5113495bSYour Name 
5008*5113495bSYour Name 	/* Do not use peer directly. Derive peer from staid to
5009*5113495bSYour Name 	 * make sure that peer is valid.
5010*5113495bSYour Name 	 */
5011*5113495bSYour Name 	peer = ol_txrx_peer_get_ref_by_local_id((struct cdp_pdev *)pdev,
5012*5113495bSYour Name 			staid, PEER_DEBUG_ID_OL_RX_THREAD);
5013*5113495bSYour Name 	if (!peer)
5014*5113495bSYour Name 		goto free_buf;
5015*5113495bSYour Name 
5016*5113495bSYour Name 	qdf_spin_lock_bh(&peer->peer_info_lock);
5017*5113495bSYour Name 	if (qdf_unlikely(!(peer->state >= OL_TXRX_PEER_STATE_CONN) ||
5018*5113495bSYour Name 					 !peer->vdev->rx)) {
5019*5113495bSYour Name 		qdf_spin_unlock_bh(&peer->peer_info_lock);
5020*5113495bSYour Name 		ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_RX_THREAD);
5021*5113495bSYour Name 		goto free_buf;
5022*5113495bSYour Name 	}
5023*5113495bSYour Name 
5024*5113495bSYour Name 	data_rx = peer->vdev->rx;
5025*5113495bSYour Name 	osif_dev = peer->vdev->osif_dev;
5026*5113495bSYour Name 	qdf_spin_unlock_bh(&peer->peer_info_lock);
5027*5113495bSYour Name 
5028*5113495bSYour Name 	qdf_spin_lock_bh(&peer->bufq_info.bufq_lock);
5029*5113495bSYour Name 	if (!list_empty(&peer->bufq_info.cached_bufq)) {
5030*5113495bSYour Name 		qdf_spin_unlock_bh(&peer->bufq_info.bufq_lock);
5031*5113495bSYour Name 		/* Flush the cached frames to HDD before passing new rx frame */
5032*5113495bSYour Name 		ol_txrx_flush_rx_frames(peer, 0);
5033*5113495bSYour Name 	} else
5034*5113495bSYour Name 		qdf_spin_unlock_bh(&peer->bufq_info.bufq_lock);
5035*5113495bSYour Name 
5036*5113495bSYour Name 	ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_RX_THREAD);
5037*5113495bSYour Name 
5038*5113495bSYour Name 	buf = buf_list;
5039*5113495bSYour Name 	while (buf) {
5040*5113495bSYour Name 		next_buf = qdf_nbuf_queue_next(buf);
5041*5113495bSYour Name 		qdf_nbuf_set_next(buf, NULL);   /* Add NULL terminator */
5042*5113495bSYour Name 		ret = data_rx(osif_dev, buf);
5043*5113495bSYour Name 		if (ret != QDF_STATUS_SUCCESS) {
5044*5113495bSYour Name 			ol_txrx_err("Frame Rx to HDD failed");
5045*5113495bSYour Name 			if (pdev)
5046*5113495bSYour Name 				TXRX_STATS_MSDU_INCR(pdev, rx.dropped_err, buf);
5047*5113495bSYour Name 			qdf_nbuf_free(buf);
5048*5113495bSYour Name 		}
5049*5113495bSYour Name 		buf = next_buf;
5050*5113495bSYour Name 	}
5051*5113495bSYour Name 	return;
5052*5113495bSYour Name 
5053*5113495bSYour Name free_buf:
5054*5113495bSYour Name 	drop_count = ol_txrx_drop_nbuf_list(buf_list);
5055*5113495bSYour Name 	ol_txrx_warn("Dropped frames %u", drop_count);
5056*5113495bSYour Name }
5057*5113495bSYour Name 
5058*5113495bSYour Name /**
5059*5113495bSYour Name  * ol_rx_data_cb() - data rx callback
5060*5113495bSYour Name  * @context: dev handle
5061*5113495bSYour Name  * @buf_list: buffer list
5062*5113495bSYour Name  * @staid: Station id
5063*5113495bSYour Name  *
5064*5113495bSYour Name  * Return: None
5065*5113495bSYour Name  */
5066*5113495bSYour Name static inline void
ol_rx_data_cb(void * context,qdf_nbuf_t buf_list,uint16_t staid)5067*5113495bSYour Name ol_rx_data_cb(void *context, qdf_nbuf_t buf_list, uint16_t staid)
5068*5113495bSYour Name {
5069*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev = context;
5070*5113495bSYour Name 
5071*5113495bSYour Name 	ol_rx_data_handler(pdev, buf_list, staid);
5072*5113495bSYour Name }
5073*5113495bSYour Name 
5074*5113495bSYour Name /* print for every 16th packet */
5075*5113495bSYour Name #define OL_TXRX_PRINT_RATE_LIMIT_THRESH 0x0f
5076*5113495bSYour Name struct ol_rx_cached_buf *cache_buf;
5077*5113495bSYour Name 
5078*5113495bSYour Name /** helper function to drop packets
5079*5113495bSYour Name  *  Note: caller must hold the cached buq lock before invoking
5080*5113495bSYour Name  *  this function. Also, it assumes that the pointers passed in
5081*5113495bSYour Name  *  are valid (non-NULL)
5082*5113495bSYour Name  */
ol_txrx_drop_frames(struct ol_txrx_cached_bufq_t * bufqi,qdf_nbuf_t rx_buf_list)5083*5113495bSYour Name static inline void ol_txrx_drop_frames(
5084*5113495bSYour Name 					struct ol_txrx_cached_bufq_t *bufqi,
5085*5113495bSYour Name 					qdf_nbuf_t rx_buf_list)
5086*5113495bSYour Name {
5087*5113495bSYour Name 	uint32_t dropped = ol_txrx_drop_nbuf_list(rx_buf_list);
5088*5113495bSYour Name 
5089*5113495bSYour Name 	bufqi->dropped += dropped;
5090*5113495bSYour Name 	bufqi->qdepth_no_thresh += dropped;
5091*5113495bSYour Name 
5092*5113495bSYour Name 	if (bufqi->qdepth_no_thresh > bufqi->high_water_mark)
5093*5113495bSYour Name 		bufqi->high_water_mark = bufqi->qdepth_no_thresh;
5094*5113495bSYour Name }
5095*5113495bSYour Name 
ol_txrx_enqueue_rx_frames(struct ol_txrx_peer_t * peer,struct ol_txrx_cached_bufq_t * bufqi,qdf_nbuf_t rx_buf_list)5096*5113495bSYour Name static QDF_STATUS ol_txrx_enqueue_rx_frames(
5097*5113495bSYour Name 					struct ol_txrx_peer_t *peer,
5098*5113495bSYour Name 					struct ol_txrx_cached_bufq_t *bufqi,
5099*5113495bSYour Name 					qdf_nbuf_t rx_buf_list)
5100*5113495bSYour Name {
5101*5113495bSYour Name 	struct ol_rx_cached_buf *cache_buf;
5102*5113495bSYour Name 	qdf_nbuf_t buf, next_buf;
5103*5113495bSYour Name 	static uint32_t count;
5104*5113495bSYour Name 
5105*5113495bSYour Name 	if ((count++ & OL_TXRX_PRINT_RATE_LIMIT_THRESH) == 0)
5106*5113495bSYour Name 		ol_txrx_info_high(
5107*5113495bSYour Name 		   "Data on the peer before it is registered bufq->curr %d bufq->drops %d",
5108*5113495bSYour Name 		   bufqi->curr, bufqi->dropped);
5109*5113495bSYour Name 
5110*5113495bSYour Name 	qdf_spin_lock_bh(&bufqi->bufq_lock);
5111*5113495bSYour Name 	if (bufqi->curr >= bufqi->thresh) {
5112*5113495bSYour Name 		ol_txrx_drop_frames(bufqi, rx_buf_list);
5113*5113495bSYour Name 		qdf_spin_unlock_bh(&bufqi->bufq_lock);
5114*5113495bSYour Name 		return QDF_STATUS_E_FAULT;
5115*5113495bSYour Name 	}
5116*5113495bSYour Name 	qdf_spin_unlock_bh(&bufqi->bufq_lock);
5117*5113495bSYour Name 
5118*5113495bSYour Name 	buf = rx_buf_list;
5119*5113495bSYour Name 	while (buf) {
5120*5113495bSYour Name 		QDF_NBUF_CB_RX_PEER_CACHED_FRM(buf) = 1;
5121*5113495bSYour Name 		next_buf = qdf_nbuf_queue_next(buf);
5122*5113495bSYour Name 		cache_buf = qdf_mem_malloc(sizeof(*cache_buf));
5123*5113495bSYour Name 		if (!cache_buf) {
5124*5113495bSYour Name 			qdf_nbuf_free(buf);
5125*5113495bSYour Name 		} else {
5126*5113495bSYour Name 			/* Add NULL terminator */
5127*5113495bSYour Name 			qdf_nbuf_set_next(buf, NULL);
5128*5113495bSYour Name 			cache_buf->buf = buf;
5129*5113495bSYour Name 			if (peer && peer->valid) {
5130*5113495bSYour Name 				qdf_spin_lock_bh(&bufqi->bufq_lock);
5131*5113495bSYour Name 				list_add_tail(&cache_buf->list,
5132*5113495bSYour Name 				      &bufqi->cached_bufq);
5133*5113495bSYour Name 				bufqi->curr++;
5134*5113495bSYour Name 				qdf_spin_unlock_bh(&bufqi->bufq_lock);
5135*5113495bSYour Name 			} else {
5136*5113495bSYour Name 				qdf_mem_free(cache_buf);
5137*5113495bSYour Name 				rx_buf_list = buf;
5138*5113495bSYour Name 				qdf_nbuf_set_next(rx_buf_list, next_buf);
5139*5113495bSYour Name 				qdf_spin_lock_bh(&bufqi->bufq_lock);
5140*5113495bSYour Name 				ol_txrx_drop_frames(bufqi, rx_buf_list);
5141*5113495bSYour Name 				qdf_spin_unlock_bh(&bufqi->bufq_lock);
5142*5113495bSYour Name 				return QDF_STATUS_E_FAULT;
5143*5113495bSYour Name 			}
5144*5113495bSYour Name 		}
5145*5113495bSYour Name 		buf = next_buf;
5146*5113495bSYour Name 	}
5147*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
5148*5113495bSYour Name }
5149*5113495bSYour Name /**
5150*5113495bSYour Name  * ol_rx_data_process() - process rx frame
5151*5113495bSYour Name  * @peer: peer
5152*5113495bSYour Name  * @rx_buf_list: rx buffer list
5153*5113495bSYour Name  *
5154*5113495bSYour Name  * Return: None
5155*5113495bSYour Name  */
ol_rx_data_process(struct ol_txrx_peer_t * peer,qdf_nbuf_t rx_buf_list)5156*5113495bSYour Name void ol_rx_data_process(struct ol_txrx_peer_t *peer,
5157*5113495bSYour Name 			qdf_nbuf_t rx_buf_list)
5158*5113495bSYour Name {
5159*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
5160*5113495bSYour Name 	ol_txrx_pdev_handle pdev;
5161*5113495bSYour Name 	/*
5162*5113495bSYour Name 	 * Firmware data path active response will use shim RX thread
5163*5113495bSYour Name 	 * T2H MSG running on SIRQ context,
5164*5113495bSYour Name 	 * IPA kernel module API should not be called on SIRQ CTXT
5165*5113495bSYour Name 	 */
5166*5113495bSYour Name 	ol_txrx_rx_fp data_rx = NULL;
5167*5113495bSYour Name 
5168*5113495bSYour Name 	if (qdf_unlikely(!soc))
5169*5113495bSYour Name 		goto drop_rx_buf;
5170*5113495bSYour Name 
5171*5113495bSYour Name 	pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
5172*5113495bSYour Name 	if ((!peer) || (!pdev)) {
5173*5113495bSYour Name 		ol_txrx_err("peer/pdev is NULL");
5174*5113495bSYour Name 		goto drop_rx_buf;
5175*5113495bSYour Name 	}
5176*5113495bSYour Name 
5177*5113495bSYour Name 	qdf_assert(peer->vdev);
5178*5113495bSYour Name 
5179*5113495bSYour Name 	qdf_spin_lock_bh(&peer->peer_info_lock);
5180*5113495bSYour Name 	if (peer->state >= OL_TXRX_PEER_STATE_CONN)
5181*5113495bSYour Name 		data_rx = peer->vdev->rx;
5182*5113495bSYour Name 	qdf_spin_unlock_bh(&peer->peer_info_lock);
5183*5113495bSYour Name 
5184*5113495bSYour Name 	/*
5185*5113495bSYour Name 	 * If there is a data frame from peer before the peer is
5186*5113495bSYour Name 	 * registered for data service, enqueue them on to pending queue
5187*5113495bSYour Name 	 * which will be flushed to HDD once that station is registered.
5188*5113495bSYour Name 	 */
5189*5113495bSYour Name 	if (!data_rx) {
5190*5113495bSYour Name 		if (ol_txrx_enqueue_rx_frames(peer, &peer->bufq_info,
5191*5113495bSYour Name 					      rx_buf_list)
5192*5113495bSYour Name 				!= QDF_STATUS_SUCCESS)
5193*5113495bSYour Name 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
5194*5113495bSYour Name 				  "%s: failed to enqueue rx frm to cached_bufq",
5195*5113495bSYour Name 				  __func__);
5196*5113495bSYour Name 	} else {
5197*5113495bSYour Name #ifdef WLAN_DP_LEGACY_OL_RX_THREAD
5198*5113495bSYour Name 		/*
5199*5113495bSYour Name 		 * If the kernel is SMP, schedule rx thread to
5200*5113495bSYour Name 		 * better use multicores.
5201*5113495bSYour Name 		 */
5202*5113495bSYour Name 		if (!ol_cfg_is_rx_thread_enabled(pdev->ctrl_pdev)) {
5203*5113495bSYour Name 			ol_rx_data_handler(pdev, rx_buf_list, peer->local_id);
5204*5113495bSYour Name 		} else {
5205*5113495bSYour Name 			p_cds_sched_context sched_ctx =
5206*5113495bSYour Name 				get_cds_sched_ctxt();
5207*5113495bSYour Name 			struct cds_ol_rx_pkt *pkt;
5208*5113495bSYour Name 
5209*5113495bSYour Name 			if (unlikely(!sched_ctx))
5210*5113495bSYour Name 				goto drop_rx_buf;
5211*5113495bSYour Name 
5212*5113495bSYour Name 			pkt = cds_alloc_ol_rx_pkt(sched_ctx);
5213*5113495bSYour Name 			if (!pkt)
5214*5113495bSYour Name 				goto drop_rx_buf;
5215*5113495bSYour Name 
5216*5113495bSYour Name 			pkt->callback = ol_rx_data_cb;
5217*5113495bSYour Name 			pkt->context = pdev;
5218*5113495bSYour Name 			pkt->Rxpkt = rx_buf_list;
5219*5113495bSYour Name 			pkt->staId = peer->local_id;
5220*5113495bSYour Name 			cds_indicate_rxpkt(sched_ctx, pkt);
5221*5113495bSYour Name 		}
5222*5113495bSYour Name #else                           /* WLAN_DP_LEGACY_OL_RX_THREAD */
5223*5113495bSYour Name 		ol_rx_data_handler(pdev, rx_buf_list, peer->local_id);
5224*5113495bSYour Name #endif /* WLAN_DP_LEGACY_OL_RX_THREAD */
5225*5113495bSYour Name 	}
5226*5113495bSYour Name 
5227*5113495bSYour Name 	return;
5228*5113495bSYour Name 
5229*5113495bSYour Name drop_rx_buf:
5230*5113495bSYour Name 	ol_txrx_drop_nbuf_list(rx_buf_list);
5231*5113495bSYour Name }
5232*5113495bSYour Name 
5233*5113495bSYour Name /**
5234*5113495bSYour Name  * ol_txrx_register_peer() - register peer
5235*5113495bSYour Name  * @sta_desc: sta descriptor
5236*5113495bSYour Name  *
5237*5113495bSYour Name  * Return: QDF Status
5238*5113495bSYour Name  */
ol_txrx_register_peer(struct ol_txrx_desc_type * sta_desc)5239*5113495bSYour Name static QDF_STATUS ol_txrx_register_peer(struct ol_txrx_desc_type *sta_desc)
5240*5113495bSYour Name {
5241*5113495bSYour Name 	struct ol_txrx_peer_t *peer;
5242*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
5243*5113495bSYour Name 	ol_txrx_pdev_handle pdev;
5244*5113495bSYour Name 	union ol_txrx_peer_update_param_t param;
5245*5113495bSYour Name 	struct privacy_exemption privacy_filter;
5246*5113495bSYour Name 
5247*5113495bSYour Name 	if (!soc) {
5248*5113495bSYour Name 		ol_txrx_err("Soc is NULL");
5249*5113495bSYour Name 		return QDF_STATUS_E_INVAL;
5250*5113495bSYour Name 	}
5251*5113495bSYour Name 
5252*5113495bSYour Name 	pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
5253*5113495bSYour Name 
5254*5113495bSYour Name 	if (!pdev) {
5255*5113495bSYour Name 		ol_txrx_err("Pdev is NULL");
5256*5113495bSYour Name 		return QDF_STATUS_E_INVAL;
5257*5113495bSYour Name 	}
5258*5113495bSYour Name 
5259*5113495bSYour Name 	peer = ol_txrx_find_peer_by_addr((struct cdp_pdev *)pdev,
5260*5113495bSYour Name 					 sta_desc->peer_addr.bytes);
5261*5113495bSYour Name 
5262*5113495bSYour Name 	if (!peer)
5263*5113495bSYour Name 		return QDF_STATUS_E_FAULT;
5264*5113495bSYour Name 
5265*5113495bSYour Name 	qdf_spin_lock_bh(&peer->peer_info_lock);
5266*5113495bSYour Name 	peer->state = OL_TXRX_PEER_STATE_CONN;
5267*5113495bSYour Name 	qdf_spin_unlock_bh(&peer->peer_info_lock);
5268*5113495bSYour Name 
5269*5113495bSYour Name 	param.qos_capable = sta_desc->is_qos_enabled;
5270*5113495bSYour Name 	ol_txrx_peer_update(peer->vdev, peer->mac_addr.raw, &param,
5271*5113495bSYour Name 			    ol_txrx_peer_update_qos_capable);
5272*5113495bSYour Name 
5273*5113495bSYour Name 	if (sta_desc->is_wapi_supported) {
5274*5113495bSYour Name 		/*Privacy filter to accept unencrypted WAI frames */
5275*5113495bSYour Name 		privacy_filter.ether_type = ETHERTYPE_WAI;
5276*5113495bSYour Name 		privacy_filter.filter_type = PRIVACY_FILTER_ALWAYS;
5277*5113495bSYour Name 		privacy_filter.packet_type = PRIVACY_FILTER_PACKET_BOTH;
5278*5113495bSYour Name 		ol_txrx_set_privacy_filters(peer->vdev, &privacy_filter, 1);
5279*5113495bSYour Name 	}
5280*5113495bSYour Name 
5281*5113495bSYour Name 	ol_txrx_flush_rx_frames(peer, 0);
5282*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
5283*5113495bSYour Name }
5284*5113495bSYour Name 
5285*5113495bSYour Name /**
5286*5113495bSYour Name  * ol_txrx_register_ocb_peer - Function to register the OCB peer
5287*5113495bSYour Name  * @mac_addr: MAC address of the self peer
5288*5113495bSYour Name  *
5289*5113495bSYour Name  * Return: QDF_STATUS_SUCCESS on success, QDF_STATUS_E_FAILURE on failure
5290*5113495bSYour Name  */
ol_txrx_register_ocb_peer(uint8_t * mac_addr)5291*5113495bSYour Name static QDF_STATUS ol_txrx_register_ocb_peer(uint8_t *mac_addr)
5292*5113495bSYour Name {
5293*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
5294*5113495bSYour Name 	ol_txrx_pdev_handle pdev;
5295*5113495bSYour Name 	ol_txrx_peer_handle peer;
5296*5113495bSYour Name 
5297*5113495bSYour Name 	if (!soc) {
5298*5113495bSYour Name 		ol_txrx_err("Unable to find soc!");
5299*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
5300*5113495bSYour Name 	}
5301*5113495bSYour Name 
5302*5113495bSYour Name 	pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
5303*5113495bSYour Name 
5304*5113495bSYour Name 	if (!pdev) {
5305*5113495bSYour Name 		ol_txrx_err("Unable to find pdev!");
5306*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
5307*5113495bSYour Name 	}
5308*5113495bSYour Name 
5309*5113495bSYour Name 	peer = ol_txrx_find_peer_by_addr((struct cdp_pdev *)pdev,
5310*5113495bSYour Name 					 mac_addr);
5311*5113495bSYour Name 	if (!peer) {
5312*5113495bSYour Name 		ol_txrx_err("Unable to find OCB peer!");
5313*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
5314*5113495bSYour Name 	}
5315*5113495bSYour Name 
5316*5113495bSYour Name 	ol_txrx_set_ocb_peer(pdev, peer);
5317*5113495bSYour Name 
5318*5113495bSYour Name 	/* Set peer state to connected */
5319*5113495bSYour Name 	ol_txrx_peer_state_update((struct cdp_soc_t *)soc, peer->mac_addr.raw,
5320*5113495bSYour Name 				  OL_TXRX_PEER_STATE_AUTH);
5321*5113495bSYour Name 
5322*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
5323*5113495bSYour Name }
5324*5113495bSYour Name 
5325*5113495bSYour Name /**
5326*5113495bSYour Name  * ol_txrx_set_ocb_peer - Function to store the OCB peer
5327*5113495bSYour Name  * @pdev: Handle to the HTT instance
5328*5113495bSYour Name  * @peer: Pointer to the peer
5329*5113495bSYour Name  */
ol_txrx_set_ocb_peer(struct ol_txrx_pdev_t * pdev,struct ol_txrx_peer_t * peer)5330*5113495bSYour Name void ol_txrx_set_ocb_peer(struct ol_txrx_pdev_t *pdev,
5331*5113495bSYour Name 			  struct ol_txrx_peer_t *peer)
5332*5113495bSYour Name {
5333*5113495bSYour Name 	if (!pdev)
5334*5113495bSYour Name 		return;
5335*5113495bSYour Name 
5336*5113495bSYour Name 	pdev->ocb_peer = peer;
5337*5113495bSYour Name 	pdev->ocb_peer_valid = (NULL != peer);
5338*5113495bSYour Name }
5339*5113495bSYour Name 
5340*5113495bSYour Name /**
5341*5113495bSYour Name  * ol_txrx_get_ocb_peer - Function to retrieve the OCB peer
5342*5113495bSYour Name  * @pdev: Handle to the HTT instance
5343*5113495bSYour Name  * @peer: Pointer to the returned peer
5344*5113495bSYour Name  *
5345*5113495bSYour Name  * Return: true if the peer is valid, false if not
5346*5113495bSYour Name  */
ol_txrx_get_ocb_peer(struct ol_txrx_pdev_t * pdev,struct ol_txrx_peer_t ** peer)5347*5113495bSYour Name bool ol_txrx_get_ocb_peer(struct ol_txrx_pdev_t *pdev,
5348*5113495bSYour Name 			  struct ol_txrx_peer_t **peer)
5349*5113495bSYour Name {
5350*5113495bSYour Name 	int rc;
5351*5113495bSYour Name 
5352*5113495bSYour Name 	if ((!pdev) || (!peer)) {
5353*5113495bSYour Name 		rc = false;
5354*5113495bSYour Name 		goto exit;
5355*5113495bSYour Name 	}
5356*5113495bSYour Name 
5357*5113495bSYour Name 	if (pdev->ocb_peer_valid) {
5358*5113495bSYour Name 		*peer = pdev->ocb_peer;
5359*5113495bSYour Name 		rc = true;
5360*5113495bSYour Name 	} else {
5361*5113495bSYour Name 		rc = false;
5362*5113495bSYour Name 	}
5363*5113495bSYour Name 
5364*5113495bSYour Name exit:
5365*5113495bSYour Name 	return rc;
5366*5113495bSYour Name }
5367*5113495bSYour Name 
5368*5113495bSYour Name /**
5369*5113495bSYour Name  * ol_txrx_register_pause_cb() - register pause callback
5370*5113495bSYour Name  * @soc_hdl: Datapath soc handle
5371*5113495bSYour Name  * @pause_cb: pause callback
5372*5113495bSYour Name  *
5373*5113495bSYour Name  * Return: QDF status
5374*5113495bSYour Name  */
ol_txrx_register_pause_cb(struct cdp_soc_t * soc_hdl,tx_pause_callback pause_cb)5375*5113495bSYour Name static QDF_STATUS ol_txrx_register_pause_cb(struct cdp_soc_t *soc_hdl,
5376*5113495bSYour Name 					    tx_pause_callback pause_cb)
5377*5113495bSYour Name {
5378*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
5379*5113495bSYour Name 	ol_txrx_pdev_handle pdev;
5380*5113495bSYour Name 
5381*5113495bSYour Name 	pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
5382*5113495bSYour Name 	if (!pdev || !pause_cb) {
5383*5113495bSYour Name 		ol_txrx_err("pdev or pause_cb is NULL");
5384*5113495bSYour Name 		return QDF_STATUS_E_INVAL;
5385*5113495bSYour Name 	}
5386*5113495bSYour Name 	pdev->pause_cb = pause_cb;
5387*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
5388*5113495bSYour Name }
5389*5113495bSYour Name 
5390*5113495bSYour Name #ifdef RECEIVE_OFFLOAD
5391*5113495bSYour Name /**
5392*5113495bSYour Name  * ol_txrx_offld_flush_handler() - offld flush handler
5393*5113495bSYour Name  * @context: dev handle
5394*5113495bSYour Name  * @rxpkt: rx data
5395*5113495bSYour Name  * @staid: station id
5396*5113495bSYour Name  *
5397*5113495bSYour Name  * This function handles an offld flush indication.
5398*5113495bSYour Name  * If the rx thread is enabled, it will be invoked by the rx
5399*5113495bSYour Name  * thread else it will be called in the tasklet context
5400*5113495bSYour Name  *
5401*5113495bSYour Name  * Return: none
5402*5113495bSYour Name  */
ol_txrx_offld_flush_handler(void * context,qdf_nbuf_t rxpkt,uint16_t staid)5403*5113495bSYour Name static void ol_txrx_offld_flush_handler(void *context,
5404*5113495bSYour Name 					qdf_nbuf_t rxpkt,
5405*5113495bSYour Name 					uint16_t staid)
5406*5113495bSYour Name {
5407*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
5408*5113495bSYour Name 	ol_txrx_pdev_handle pdev;
5409*5113495bSYour Name 
5410*5113495bSYour Name 	if (qdf_unlikely(!soc)) {
5411*5113495bSYour Name 		qdf_assert(0);
5412*5113495bSYour Name 		return;
5413*5113495bSYour Name 	}
5414*5113495bSYour Name 
5415*5113495bSYour Name 	pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
5416*5113495bSYour Name 	if (qdf_unlikely(!pdev)) {
5417*5113495bSYour Name 		ol_txrx_err("Invalid pdev context");
5418*5113495bSYour Name 		qdf_assert(0);
5419*5113495bSYour Name 		return;
5420*5113495bSYour Name 	}
5421*5113495bSYour Name 
5422*5113495bSYour Name 	if (pdev->offld_flush_cb)
5423*5113495bSYour Name 		pdev->offld_flush_cb(context);
5424*5113495bSYour Name 	else
5425*5113495bSYour Name 		ol_txrx_err("offld_flush_cb NULL");
5426*5113495bSYour Name }
5427*5113495bSYour Name 
5428*5113495bSYour Name /**
5429*5113495bSYour Name  * ol_txrx_offld_flush() - offld flush callback
5430*5113495bSYour Name  * @data: opaque data pointer
5431*5113495bSYour Name  *
5432*5113495bSYour Name  * This is the callback registered with CE to trigger
5433*5113495bSYour Name  * an offld flush
5434*5113495bSYour Name  *
5435*5113495bSYour Name  * Return: none
5436*5113495bSYour Name  */
ol_txrx_offld_flush(void * data)5437*5113495bSYour Name static void ol_txrx_offld_flush(void *data)
5438*5113495bSYour Name {
5439*5113495bSYour Name 	p_cds_sched_context sched_ctx = get_cds_sched_ctxt();
5440*5113495bSYour Name 	struct cds_ol_rx_pkt *pkt;
5441*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
5442*5113495bSYour Name 	ol_txrx_pdev_handle pdev;
5443*5113495bSYour Name 
5444*5113495bSYour Name 	if (qdf_unlikely(!sched_ctx))
5445*5113495bSYour Name 		return;
5446*5113495bSYour Name 
5447*5113495bSYour Name 	if (qdf_unlikely(!soc))
5448*5113495bSYour Name 		return;
5449*5113495bSYour Name 
5450*5113495bSYour Name 	pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
5451*5113495bSYour Name 	if (qdf_unlikely(!pdev)) {
5452*5113495bSYour Name 		ol_txrx_err("TXRX module context is NULL");
5453*5113495bSYour Name 		return;
5454*5113495bSYour Name 	}
5455*5113495bSYour Name 
5456*5113495bSYour Name 	if (!ol_cfg_is_rx_thread_enabled(pdev->ctrl_pdev)) {
5457*5113495bSYour Name 		ol_txrx_offld_flush_handler(data, NULL, 0);
5458*5113495bSYour Name 	} else {
5459*5113495bSYour Name 		pkt = cds_alloc_ol_rx_pkt(sched_ctx);
5460*5113495bSYour Name 		if (qdf_unlikely(!pkt))
5461*5113495bSYour Name 			return;
5462*5113495bSYour Name 
5463*5113495bSYour Name 		pkt->callback = ol_txrx_offld_flush_handler;
5464*5113495bSYour Name 		pkt->context = data;
5465*5113495bSYour Name 		pkt->Rxpkt = NULL;
5466*5113495bSYour Name 		pkt->staId = 0;
5467*5113495bSYour Name 		cds_indicate_rxpkt(sched_ctx, pkt);
5468*5113495bSYour Name 	}
5469*5113495bSYour Name }
5470*5113495bSYour Name 
5471*5113495bSYour Name /**
5472*5113495bSYour Name  * ol_register_offld_flush_cb() - register the offld flush callback
5473*5113495bSYour Name  * @offld_flush_cb: flush callback function
5474*5113495bSYour Name  * @offld_init_cb: Allocate and initialize offld data structure.
5475*5113495bSYour Name  *
5476*5113495bSYour Name  * Store the offld flush callback provided and in turn
5477*5113495bSYour Name  * register OL's offld flush handler with CE
5478*5113495bSYour Name  *
5479*5113495bSYour Name  * Return: none
5480*5113495bSYour Name  */
ol_register_offld_flush_cb(void (offld_flush_cb)(void *))5481*5113495bSYour Name static void ol_register_offld_flush_cb(void (offld_flush_cb)(void *))
5482*5113495bSYour Name {
5483*5113495bSYour Name 	struct hif_opaque_softc *hif_device;
5484*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
5485*5113495bSYour Name 	ol_txrx_pdev_handle pdev;
5486*5113495bSYour Name 
5487*5113495bSYour Name 	if (qdf_unlikely(!soc)) {
5488*5113495bSYour Name 		TXRX_ASSERT2(0);
5489*5113495bSYour Name 		goto out;
5490*5113495bSYour Name 	}
5491*5113495bSYour Name 
5492*5113495bSYour Name 	pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
5493*5113495bSYour Name 	if (!pdev) {
5494*5113495bSYour Name 		ol_txrx_err("pdev NULL!");
5495*5113495bSYour Name 		TXRX_ASSERT2(0);
5496*5113495bSYour Name 		goto out;
5497*5113495bSYour Name 	}
5498*5113495bSYour Name 	if (pdev->offld_flush_cb) {
5499*5113495bSYour Name 		ol_txrx_info("offld already initialised");
5500*5113495bSYour Name 		if (pdev->offld_flush_cb != offld_flush_cb) {
5501*5113495bSYour Name 			ol_txrx_err(
5502*5113495bSYour Name 				   "offld_flush_cb is differ to previously registered callback")
5503*5113495bSYour Name 			TXRX_ASSERT2(0);
5504*5113495bSYour Name 			goto out;
5505*5113495bSYour Name 		}
5506*5113495bSYour Name 		goto out;
5507*5113495bSYour Name 	}
5508*5113495bSYour Name 	pdev->offld_flush_cb = offld_flush_cb;
5509*5113495bSYour Name 	hif_device = cds_get_context(QDF_MODULE_ID_HIF);
5510*5113495bSYour Name 
5511*5113495bSYour Name 	if (qdf_unlikely(!hif_device)) {
5512*5113495bSYour Name 		qdf_assert(0);
5513*5113495bSYour Name 		goto out;
5514*5113495bSYour Name 	}
5515*5113495bSYour Name 
5516*5113495bSYour Name 	hif_offld_flush_cb_register(hif_device, ol_txrx_offld_flush);
5517*5113495bSYour Name 
5518*5113495bSYour Name out:
5519*5113495bSYour Name 	return;
5520*5113495bSYour Name }
5521*5113495bSYour Name 
5522*5113495bSYour Name /**
5523*5113495bSYour Name  * ol_deregister_offld_flush_cb() - deregister the offld flush callback
5524*5113495bSYour Name  *
5525*5113495bSYour Name  * Remove the offld flush callback provided and in turn
5526*5113495bSYour Name  * deregister OL's offld flush handler with CE
5527*5113495bSYour Name  *
5528*5113495bSYour Name  * Return: none
5529*5113495bSYour Name  */
ol_deregister_offld_flush_cb(void)5530*5113495bSYour Name static void ol_deregister_offld_flush_cb(void)
5531*5113495bSYour Name {
5532*5113495bSYour Name 	struct hif_opaque_softc *hif_device;
5533*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
5534*5113495bSYour Name 	ol_txrx_pdev_handle pdev;
5535*5113495bSYour Name 
5536*5113495bSYour Name 	if (qdf_unlikely(!soc))
5537*5113495bSYour Name 		return;
5538*5113495bSYour Name 
5539*5113495bSYour Name 	pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
5540*5113495bSYour Name 	if (!pdev) {
5541*5113495bSYour Name 		ol_txrx_err("pdev NULL!");
5542*5113495bSYour Name 		return;
5543*5113495bSYour Name 	}
5544*5113495bSYour Name 	hif_device = cds_get_context(QDF_MODULE_ID_HIF);
5545*5113495bSYour Name 
5546*5113495bSYour Name 	if (qdf_unlikely(!hif_device)) {
5547*5113495bSYour Name 		qdf_assert(0);
5548*5113495bSYour Name 		return;
5549*5113495bSYour Name 	}
5550*5113495bSYour Name 
5551*5113495bSYour Name 	hif_offld_flush_cb_deregister(hif_device);
5552*5113495bSYour Name 
5553*5113495bSYour Name 	pdev->offld_flush_cb = NULL;
5554*5113495bSYour Name }
5555*5113495bSYour Name #endif /* RECEIVE_OFFLOAD */
5556*5113495bSYour Name 
5557*5113495bSYour Name /**
5558*5113495bSYour Name  * ol_register_data_stall_detect_cb() - register data stall callback
5559*5113495bSYour Name  * @soc_hdl: Datapath soc handle
5560*5113495bSYour Name  * @pdev_id: id of data path pdev handle
5561*5113495bSYour Name  * @data_stall_detect_callback: data stall callback function
5562*5113495bSYour Name  *
5563*5113495bSYour Name  *
5564*5113495bSYour Name  * Return: QDF_STATUS Enumeration
5565*5113495bSYour Name  */
ol_register_data_stall_detect_cb(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,data_stall_detect_cb data_stall_detect_callback)5566*5113495bSYour Name static QDF_STATUS ol_register_data_stall_detect_cb(
5567*5113495bSYour Name 			struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
5568*5113495bSYour Name 			data_stall_detect_cb data_stall_detect_callback)
5569*5113495bSYour Name {
5570*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
5571*5113495bSYour Name 	ol_txrx_pdev_handle pdev = ol_txrx_get_pdev_from_pdev_id(soc, pdev_id);
5572*5113495bSYour Name 
5573*5113495bSYour Name 	if (!pdev) {
5574*5113495bSYour Name 		ol_txrx_err("pdev NULL!");
5575*5113495bSYour Name 		return QDF_STATUS_E_INVAL;
5576*5113495bSYour Name 	}
5577*5113495bSYour Name 	pdev->data_stall_detect_callback = data_stall_detect_callback;
5578*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
5579*5113495bSYour Name }
5580*5113495bSYour Name 
5581*5113495bSYour Name /**
5582*5113495bSYour Name  * ol_deregister_data_stall_detect_cb() - de-register data stall callback
5583*5113495bSYour Name  * @soc_hdl: Datapath soc handle
5584*5113495bSYour Name  * @pdev_id: id of data path pdev handle
5585*5113495bSYour Name  * @data_stall_detect_callback: data stall callback function
5586*5113495bSYour Name  *
5587*5113495bSYour Name  *
5588*5113495bSYour Name  * Return: QDF_STATUS Enumeration
5589*5113495bSYour Name  */
ol_deregister_data_stall_detect_cb(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,data_stall_detect_cb data_stall_detect_callback)5590*5113495bSYour Name static QDF_STATUS ol_deregister_data_stall_detect_cb(
5591*5113495bSYour Name 			struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
5592*5113495bSYour Name 			data_stall_detect_cb data_stall_detect_callback)
5593*5113495bSYour Name {
5594*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
5595*5113495bSYour Name 	ol_txrx_pdev_handle pdev = ol_txrx_get_pdev_from_pdev_id(soc, pdev_id);
5596*5113495bSYour Name 
5597*5113495bSYour Name 	if (!pdev) {
5598*5113495bSYour Name 		ol_txrx_err("pdev NULL!");
5599*5113495bSYour Name 		return QDF_STATUS_E_INVAL;
5600*5113495bSYour Name 	}
5601*5113495bSYour Name 	pdev->data_stall_detect_callback = NULL;
5602*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
5603*5113495bSYour Name }
5604*5113495bSYour Name 
5605*5113495bSYour Name /**
5606*5113495bSYour Name  * ol_txrx_post_data_stall_event() - post data stall event
5607*5113495bSYour Name  * @indicator: Module triggering data stall
5608*5113495bSYour Name  * @data_stall_type: data stall event type
5609*5113495bSYour Name  * @pdev_id: pdev id
5610*5113495bSYour Name  * @vdev_id_bitmap: vdev id bitmap
5611*5113495bSYour Name  * @recovery_type: data stall recovery type
5612*5113495bSYour Name  *
5613*5113495bSYour Name  * Return: None
5614*5113495bSYour Name  */
ol_txrx_post_data_stall_event(struct cdp_soc_t * soc_hdl,enum data_stall_log_event_indicator indicator,enum data_stall_log_event_type data_stall_type,uint32_t pdev_id,uint32_t vdev_id_bitmap,enum data_stall_log_recovery_type recovery_type)5615*5113495bSYour Name static void ol_txrx_post_data_stall_event(
5616*5113495bSYour Name 				struct cdp_soc_t *soc_hdl,
5617*5113495bSYour Name 				enum data_stall_log_event_indicator indicator,
5618*5113495bSYour Name 				enum data_stall_log_event_type data_stall_type,
5619*5113495bSYour Name 				uint32_t pdev_id, uint32_t vdev_id_bitmap,
5620*5113495bSYour Name 				enum data_stall_log_recovery_type recovery_type)
5621*5113495bSYour Name {
5622*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
5623*5113495bSYour Name 	struct data_stall_event_info data_stall_info;
5624*5113495bSYour Name 	ol_txrx_pdev_handle pdev;
5625*5113495bSYour Name 
5626*5113495bSYour Name 	if (qdf_unlikely(!soc)) {
5627*5113495bSYour Name 		ol_txrx_err("soc is NULL");
5628*5113495bSYour Name 		return;
5629*5113495bSYour Name 	}
5630*5113495bSYour Name 
5631*5113495bSYour Name 	pdev = ol_txrx_get_pdev_from_pdev_id(soc, pdev_id);
5632*5113495bSYour Name 	if (!pdev) {
5633*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
5634*5113495bSYour Name 			  "%s: pdev is NULL.", __func__);
5635*5113495bSYour Name 		return;
5636*5113495bSYour Name 	}
5637*5113495bSYour Name 
5638*5113495bSYour Name 	if (!pdev->data_stall_detect_callback) {
5639*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
5640*5113495bSYour Name 			  "%s: data stall cb not registered", __func__);
5641*5113495bSYour Name 		return;
5642*5113495bSYour Name 	}
5643*5113495bSYour Name 
5644*5113495bSYour Name 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
5645*5113495bSYour Name 		  "%s: data_stall_type: %x pdev_id: %d",
5646*5113495bSYour Name 		  __func__, data_stall_type, pdev_id);
5647*5113495bSYour Name 
5648*5113495bSYour Name 	data_stall_info.indicator = indicator;
5649*5113495bSYour Name 	data_stall_info.data_stall_type = data_stall_type;
5650*5113495bSYour Name 	data_stall_info.vdev_id_bitmap = vdev_id_bitmap;
5651*5113495bSYour Name 	data_stall_info.pdev_id = pdev_id;
5652*5113495bSYour Name 	data_stall_info.recovery_type = recovery_type;
5653*5113495bSYour Name 
5654*5113495bSYour Name 	if (data_stall_info.data_stall_type ==
5655*5113495bSYour Name 				DATA_STALL_LOG_FW_RX_REFILL_FAILED) {
5656*5113495bSYour Name 		htt_log_rx_ring_info(pdev->htt_pdev);
5657*5113495bSYour Name 		htt_rx_refill_failure(pdev->htt_pdev);
5658*5113495bSYour Name 	}
5659*5113495bSYour Name 
5660*5113495bSYour Name 	pdev->data_stall_detect_callback(&data_stall_info);
5661*5113495bSYour Name }
5662*5113495bSYour Name 
5663*5113495bSYour Name void
ol_txrx_dump_pkt(qdf_nbuf_t nbuf,uint32_t nbuf_paddr,int len)5664*5113495bSYour Name ol_txrx_dump_pkt(qdf_nbuf_t nbuf, uint32_t nbuf_paddr, int len)
5665*5113495bSYour Name {
5666*5113495bSYour Name 	qdf_print(" Pkt: VA 0x%pK PA 0x%llx len %d\n",
5667*5113495bSYour Name 		  qdf_nbuf_data(nbuf), (unsigned long long int)nbuf_paddr, len);
5668*5113495bSYour Name 	print_hex_dump(KERN_DEBUG, "Pkt:   ", DUMP_PREFIX_ADDRESS, 16, 4,
5669*5113495bSYour Name 		       qdf_nbuf_data(nbuf), len, true);
5670*5113495bSYour Name }
5671*5113495bSYour Name 
ol_txrx_get_vdev_from_vdev_id(uint8_t vdev_id)5672*5113495bSYour Name struct cdp_vdev *ol_txrx_get_vdev_from_vdev_id(uint8_t vdev_id)
5673*5113495bSYour Name {
5674*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
5675*5113495bSYour Name 	ol_txrx_vdev_handle vdev = NULL;
5676*5113495bSYour Name 
5677*5113495bSYour Name 	if (qdf_unlikely(!soc))
5678*5113495bSYour Name 		return NULL;
5679*5113495bSYour Name 
5680*5113495bSYour Name 	vdev = ol_txrx_get_vdev_from_soc_vdev_id(soc, vdev_id);
5681*5113495bSYour Name 
5682*5113495bSYour Name 	return ol_txrx_vdev_t_to_cdp_vdev(vdev);
5683*5113495bSYour Name }
5684*5113495bSYour Name 
ol_txrx_get_vdev_from_soc_vdev_id(struct ol_txrx_soc_t * soc,uint8_t vdev_id)5685*5113495bSYour Name struct ol_txrx_vdev_t *ol_txrx_get_vdev_from_soc_vdev_id(
5686*5113495bSYour Name 				struct ol_txrx_soc_t *soc, uint8_t vdev_id)
5687*5113495bSYour Name {
5688*5113495bSYour Name 	ol_txrx_pdev_handle pdev;
5689*5113495bSYour Name 	ol_txrx_vdev_handle vdev = NULL;
5690*5113495bSYour Name 
5691*5113495bSYour Name 	pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
5692*5113495bSYour Name 	if (qdf_unlikely(!pdev))
5693*5113495bSYour Name 		return NULL;
5694*5113495bSYour Name 
5695*5113495bSYour Name 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
5696*5113495bSYour Name 		if (vdev->vdev_id == vdev_id)
5697*5113495bSYour Name 			break;
5698*5113495bSYour Name 	}
5699*5113495bSYour Name 
5700*5113495bSYour Name 	return vdev;
5701*5113495bSYour Name }
5702*5113495bSYour Name 
5703*5113495bSYour Name /**
5704*5113495bSYour Name  * ol_txrx_get_mon_vdev_from_pdev() - get monitor mode vdev from pdev
5705*5113495bSYour Name  * @soc_hdl: datapath soc handle
5706*5113495bSYour Name  * @pdev_id: the physical device id the virtual device belongs to
5707*5113495bSYour Name  *
5708*5113495bSYour Name  * Return: vdev id
5709*5113495bSYour Name  *            error if not found.
5710*5113495bSYour Name  */
ol_txrx_get_mon_vdev_from_pdev(struct cdp_soc_t * soc_hdl,uint8_t pdev_id)5711*5113495bSYour Name uint8_t ol_txrx_get_mon_vdev_from_pdev(struct cdp_soc_t *soc_hdl,
5712*5113495bSYour Name 				       uint8_t pdev_id)
5713*5113495bSYour Name {
5714*5113495bSYour Name 	struct ol_txrx_soc_t *soc = (struct ol_txrx_soc_t *)soc_hdl;
5715*5113495bSYour Name 	ol_txrx_pdev_handle pdev = ol_txrx_get_pdev_from_pdev_id(soc, pdev_id);
5716*5113495bSYour Name 
5717*5113495bSYour Name 	if (qdf_unlikely(!pdev))
5718*5113495bSYour Name 		return -EINVAL;
5719*5113495bSYour Name 
5720*5113495bSYour Name 	return pdev->monitor_vdev->vdev_id;
5721*5113495bSYour Name }
5722*5113495bSYour Name 
5723*5113495bSYour Name /**
5724*5113495bSYour Name  * ol_txrx_set_wisa_mode() - set wisa mode
5725*5113495bSYour Name  * @soc_hdl: Datapath soc handle
5726*5113495bSYour Name  * @vdev_id: vdev_id
5727*5113495bSYour Name  * @enable: enable flag
5728*5113495bSYour Name  *
5729*5113495bSYour Name  * Return: QDF STATUS
5730*5113495bSYour Name  */
ol_txrx_set_wisa_mode(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,bool enable)5731*5113495bSYour Name static QDF_STATUS ol_txrx_set_wisa_mode(struct cdp_soc_t *soc_hdl,
5732*5113495bSYour Name 					uint8_t vdev_id, bool enable)
5733*5113495bSYour Name {
5734*5113495bSYour Name 	struct ol_txrx_vdev_t *vdev =
5735*5113495bSYour Name 		(struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
5736*5113495bSYour Name 
5737*5113495bSYour Name 	if (!vdev)
5738*5113495bSYour Name 		return QDF_STATUS_E_INVAL;
5739*5113495bSYour Name 
5740*5113495bSYour Name 	vdev->is_wisa_mode_enable = enable;
5741*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
5742*5113495bSYour Name }
5743*5113495bSYour Name 
5744*5113495bSYour Name /**
5745*5113495bSYour Name  * ol_txrx_get_vdev_id() - get interface id from interface context
5746*5113495bSYour Name  * @pvdev: vdev handle
5747*5113495bSYour Name  *
5748*5113495bSYour Name  * Return: virtual interface id
5749*5113495bSYour Name  */
ol_txrx_get_vdev_id(struct cdp_vdev * pvdev)5750*5113495bSYour Name static uint16_t ol_txrx_get_vdev_id(struct cdp_vdev *pvdev)
5751*5113495bSYour Name {
5752*5113495bSYour Name 	struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
5753*5113495bSYour Name 
5754*5113495bSYour Name 	return vdev->vdev_id;
5755*5113495bSYour Name }
5756*5113495bSYour Name 
5757*5113495bSYour Name /**
5758*5113495bSYour Name  * ol_txrx_soc_attach_target() - attach soc target
5759*5113495bSYour Name  * @soc: soc handle
5760*5113495bSYour Name  *
5761*5113495bSYour Name  * MCL legacy OL do nothing here
5762*5113495bSYour Name  *
5763*5113495bSYour Name  * Return: 0
5764*5113495bSYour Name  */
ol_txrx_soc_attach_target(ol_txrx_soc_handle soc)5765*5113495bSYour Name static QDF_STATUS ol_txrx_soc_attach_target(ol_txrx_soc_handle soc)
5766*5113495bSYour Name {
5767*5113495bSYour Name 	/* MCL legacy OL do nothing here */
5768*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
5769*5113495bSYour Name }
5770*5113495bSYour Name 
5771*5113495bSYour Name /**
5772*5113495bSYour Name  * ol_txrx_soc_detach() - detach soc target
5773*5113495bSYour Name  * @soc: soc handle
5774*5113495bSYour Name  *
5775*5113495bSYour Name  * MCL legacy OL do nothing here
5776*5113495bSYour Name  *
5777*5113495bSYour Name  * Return: none
5778*5113495bSYour Name  */
ol_txrx_soc_detach(struct cdp_soc_t * soc)5779*5113495bSYour Name static void ol_txrx_soc_detach(struct cdp_soc_t *soc)
5780*5113495bSYour Name {
5781*5113495bSYour Name 	qdf_mem_free(soc);
5782*5113495bSYour Name }
5783*5113495bSYour Name 
5784*5113495bSYour Name #ifdef REMOVE_PKT_LOG
5785*5113495bSYour Name /**
5786*5113495bSYour Name  * ol_txrx_pkt_log_con_service() - connect packet log service
5787*5113495bSYour Name  * @soc_hdl: Datapath soc handle
5788*5113495bSYour Name  * @pdev_id: id of data path pdev handle
5789*5113495bSYour Name  * @scn: device context
5790*5113495bSYour Name  *
5791*5113495bSYour Name  * Return: none
5792*5113495bSYour Name  */
ol_txrx_pkt_log_con_service(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,void * scn)5793*5113495bSYour Name static void ol_txrx_pkt_log_con_service(struct cdp_soc_t *soc_hdl,
5794*5113495bSYour Name 					uint8_t pdev_id, void *scn)
5795*5113495bSYour Name {
5796*5113495bSYour Name }
5797*5113495bSYour Name 
5798*5113495bSYour Name /**
5799*5113495bSYour Name  * ol_txrx_pkt_log_exit() - cleanup packet log info
5800*5113495bSYour Name  * @soc_hdl: Datapath soc handle
5801*5113495bSYour Name  * @pdev_id: id of data path pdev handle
5802*5113495bSYour Name  *
5803*5113495bSYour Name  * Return: none
5804*5113495bSYour Name  */
ol_txrx_pkt_log_exit(struct cdp_soc_t * soc_hdl,uint8_t pdev_id)5805*5113495bSYour Name static void ol_txrx_pkt_log_exit(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
5806*5113495bSYour Name {
5807*5113495bSYour Name }
5808*5113495bSYour Name 
5809*5113495bSYour Name #else
ol_txrx_pkt_log_con_service(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,void * scn)5810*5113495bSYour Name static void ol_txrx_pkt_log_con_service(struct cdp_soc_t *soc_hdl,
5811*5113495bSYour Name 					uint8_t pdev_id, void *scn)
5812*5113495bSYour Name {
5813*5113495bSYour Name 	htt_pkt_log_init(soc_hdl, pdev_id, scn);
5814*5113495bSYour Name 	pktlog_htc_attach();
5815*5113495bSYour Name }
5816*5113495bSYour Name 
ol_txrx_pkt_log_exit(struct cdp_soc_t * soc_hdl,uint8_t pdev_id)5817*5113495bSYour Name static void ol_txrx_pkt_log_exit(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
5818*5113495bSYour Name {
5819*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
5820*5113495bSYour Name 	ol_txrx_pdev_handle pdev = ol_txrx_get_pdev_from_pdev_id(soc, pdev_id);
5821*5113495bSYour Name 
5822*5113495bSYour Name 	if (!pdev) {
5823*5113495bSYour Name 		ol_txrx_err("pdev handle is NULL");
5824*5113495bSYour Name 		return;
5825*5113495bSYour Name 	}
5826*5113495bSYour Name 
5827*5113495bSYour Name 	htt_pktlogmod_exit(pdev);
5828*5113495bSYour Name }
5829*5113495bSYour Name #endif
5830*5113495bSYour Name 
5831*5113495bSYour Name /* OL wrapper functions for CDP abstraction */
5832*5113495bSYour Name /**
5833*5113495bSYour Name  * ol_txrx_wrapper_flush_rx_frames() - flush rx frames on the queue
5834*5113495bSYour Name  * @soc: data path soc handle
5835*5113495bSYour Name  * @pdev_id: datapath pdev identifier
5836*5113495bSYour Name  * @peer_mac: peer mac address
5837*5113495bSYour Name  * @drop: rx packets drop or deliver
5838*5113495bSYour Name  *
5839*5113495bSYour Name  * Return: none
5840*5113495bSYour Name  */
ol_txrx_wrapper_flush_rx_frames(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,void * peer_mac,bool drop)5841*5113495bSYour Name static void ol_txrx_wrapper_flush_rx_frames(struct cdp_soc_t *soc_hdl,
5842*5113495bSYour Name 					    uint8_t pdev_id, void *peer_mac,
5843*5113495bSYour Name 					    bool drop)
5844*5113495bSYour Name {
5845*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
5846*5113495bSYour Name 	ol_txrx_pdev_handle pdev = ol_txrx_get_pdev_from_pdev_id(soc, pdev_id);
5847*5113495bSYour Name 	struct ol_txrx_peer_t *peer;
5848*5113495bSYour Name 
5849*5113495bSYour Name 	if (!pdev) {
5850*5113495bSYour Name 		ol_txrx_err("pdev is NULL");
5851*5113495bSYour Name 		return;
5852*5113495bSYour Name 	}
5853*5113495bSYour Name 
5854*5113495bSYour Name 	peer = ol_txrx_peer_find_hash_find_get_ref(pdev, peer_mac, 0, 1,
5855*5113495bSYour Name 						   PEER_DEBUG_ID_OL_INTERNAL);
5856*5113495bSYour Name 	if (!peer) {
5857*5113495bSYour Name 		ol_txrx_err("peer "QDF_MAC_ADDR_FMT" not found",
5858*5113495bSYour Name 			    QDF_MAC_ADDR_REF(peer_mac));
5859*5113495bSYour Name 		return;
5860*5113495bSYour Name 	}
5861*5113495bSYour Name 
5862*5113495bSYour Name 	ol_txrx_flush_rx_frames(peer, drop);
5863*5113495bSYour Name }
5864*5113495bSYour Name 
5865*5113495bSYour Name /**
5866*5113495bSYour Name  * ol_txrx_wrapper_register_peer() - register peer
5867*5113495bSYour Name  * @pdev: pdev handle
5868*5113495bSYour Name  * @sta_desc: peer description
5869*5113495bSYour Name  *
5870*5113495bSYour Name  * Return: QDF STATUS
5871*5113495bSYour Name  */
ol_txrx_wrapper_register_peer(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,struct ol_txrx_desc_type * sta_desc)5872*5113495bSYour Name static QDF_STATUS ol_txrx_wrapper_register_peer(
5873*5113495bSYour Name 					struct cdp_soc_t *soc_hdl,
5874*5113495bSYour Name 					uint8_t pdev_id,
5875*5113495bSYour Name 					struct ol_txrx_desc_type *sta_desc)
5876*5113495bSYour Name {
5877*5113495bSYour Name 	return ol_txrx_register_peer(sta_desc);
5878*5113495bSYour Name }
5879*5113495bSYour Name 
5880*5113495bSYour Name /**
5881*5113495bSYour Name  * ol_txrx_wrapper_cfg_is_high_latency() - device is high or low latency device
5882*5113495bSYour Name  * @pdev: pdev handle
5883*5113495bSYour Name  *
5884*5113495bSYour Name  * Return: 1 high latency bus
5885*5113495bSYour Name  *         0 low latency bus
5886*5113495bSYour Name  */
ol_txrx_wrapper_cfg_is_high_latency(struct cdp_cfg * cfg_pdev)5887*5113495bSYour Name static int ol_txrx_wrapper_cfg_is_high_latency(struct cdp_cfg *cfg_pdev)
5888*5113495bSYour Name {
5889*5113495bSYour Name 	return ol_cfg_is_high_latency(cfg_pdev);
5890*5113495bSYour Name }
5891*5113495bSYour Name 
5892*5113495bSYour Name /**
5893*5113495bSYour Name  * ol_txrx_wrapper_peer_state_update() - specify the peer's authentication state
5894*5113495bSYour Name  * @soc_hdl - datapath soc handle
5895*5113495bSYour Name  * @peer_mac - mac address of which peer has changed its state
5896*5113495bSYour Name  * @state - the new state of the peer
5897*5113495bSYour Name  *
5898*5113495bSYour Name  *  Specify the peer's authentication state (none, connected, authenticated)
5899*5113495bSYour Name  *  to allow the data SW to determine whether to filter out invalid data frames.
5900*5113495bSYour Name  *  (In the "connected" state, where security is enabled, but authentication
5901*5113495bSYour Name  *  has not completed, tx and rx data frames other than EAPOL or WAPI should
5902*5113495bSYour Name  *  be discarded.)
5903*5113495bSYour Name  *  This function is only relevant for systems in which the tx and rx filtering
5904*5113495bSYour Name  *  are done in the host rather than in the target.
5905*5113495bSYour Name  *
5906*5113495bSYour Name  * Return: QDF Status
5907*5113495bSYour Name  */
ol_txrx_wrapper_peer_state_update(struct cdp_soc_t * soc_hdl,uint8_t * peer_mac,enum ol_txrx_peer_state state)5908*5113495bSYour Name static QDF_STATUS ol_txrx_wrapper_peer_state_update(
5909*5113495bSYour Name 						struct cdp_soc_t *soc_hdl,
5910*5113495bSYour Name 						uint8_t *peer_mac,
5911*5113495bSYour Name 						enum ol_txrx_peer_state state)
5912*5113495bSYour Name {
5913*5113495bSYour Name 	return ol_txrx_peer_state_update(soc_hdl, peer_mac, state);
5914*5113495bSYour Name }
5915*5113495bSYour Name 
5916*5113495bSYour Name /**
5917*5113495bSYour Name  * ol_txrx_wrapper_set_flow_control_parameters() - set flow control parameters
5918*5113495bSYour Name  * @cfg_ctx: cfg context
5919*5113495bSYour Name  * @cfg_param: cfg parameters
5920*5113495bSYour Name  *
5921*5113495bSYour Name  * Return: none
5922*5113495bSYour Name  */
5923*5113495bSYour Name static void
ol_txrx_wrapper_set_flow_control_parameters(struct cdp_cfg * cfg_pdev,void * cfg_param)5924*5113495bSYour Name ol_txrx_wrapper_set_flow_control_parameters(struct cdp_cfg *cfg_pdev,
5925*5113495bSYour Name 		void *cfg_param)
5926*5113495bSYour Name {
5927*5113495bSYour Name 	return ol_tx_set_flow_control_parameters(
5928*5113495bSYour Name 		cfg_pdev,
5929*5113495bSYour Name 		(struct txrx_pdev_cfg_param_t *)cfg_param);
5930*5113495bSYour Name }
5931*5113495bSYour Name 
5932*5113495bSYour Name /**
5933*5113495bSYour Name  * ol_txrx_get_cfg() - get ini/cgf values in legacy dp
5934*5113495bSYour Name  * @soc_hdl: soc context
5935*5113495bSYour Name  * @cfg_param: cfg parameters
5936*5113495bSYour Name  *
5937*5113495bSYour Name  * Return: none
5938*5113495bSYour Name  */
ol_txrx_get_cfg(struct cdp_soc_t * soc_hdl,enum cdp_dp_cfg cfg)5939*5113495bSYour Name static uint32_t ol_txrx_get_cfg(struct cdp_soc_t *soc_hdl, enum cdp_dp_cfg cfg)
5940*5113495bSYour Name {
5941*5113495bSYour Name 	struct txrx_pdev_cfg_t *cfg_ctx;
5942*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
5943*5113495bSYour Name 	ol_txrx_pdev_handle pdev = ol_txrx_get_pdev_from_pdev_id(
5944*5113495bSYour Name 							soc,
5945*5113495bSYour Name 							OL_TXRX_PDEV_ID);
5946*5113495bSYour Name 	uint32_t value = 0;
5947*5113495bSYour Name 
5948*5113495bSYour Name 	if (!pdev) {
5949*5113495bSYour Name 		qdf_print("pdev is NULL");
5950*5113495bSYour Name 		return 0;
5951*5113495bSYour Name 	}
5952*5113495bSYour Name 
5953*5113495bSYour Name 	cfg_ctx = (struct txrx_pdev_cfg_t *)(pdev->ctrl_pdev);
5954*5113495bSYour Name 	switch (cfg) {
5955*5113495bSYour Name 	case cfg_dp_enable_data_stall:
5956*5113495bSYour Name 		value = cfg_ctx->enable_data_stall_detection;
5957*5113495bSYour Name 		break;
5958*5113495bSYour Name 	case cfg_dp_enable_ip_tcp_udp_checksum_offload:
5959*5113495bSYour Name 		value = cfg_ctx->ip_tcp_udp_checksum_offload;
5960*5113495bSYour Name 		break;
5961*5113495bSYour Name 	case cfg_dp_enable_p2p_ip_tcp_udp_checksum_offload:
5962*5113495bSYour Name 		value = cfg_ctx->p2p_ip_tcp_udp_checksum_offload;
5963*5113495bSYour Name 		break;
5964*5113495bSYour Name 	case cfg_dp_enable_nan_ip_tcp_udp_checksum_offload:
5965*5113495bSYour Name 		value = cfg_ctx->nan_tcp_udp_checksumoffload;
5966*5113495bSYour Name 		break;
5967*5113495bSYour Name 	case cfg_dp_tso_enable:
5968*5113495bSYour Name 		value = cfg_ctx->tso_enable;
5969*5113495bSYour Name 		break;
5970*5113495bSYour Name 	case cfg_dp_lro_enable:
5971*5113495bSYour Name 		value = cfg_ctx->lro_enable;
5972*5113495bSYour Name 		break;
5973*5113495bSYour Name 	case cfg_dp_sg_enable:
5974*5113495bSYour Name 		value = cfg_ctx->sg_enable;
5975*5113495bSYour Name 		break;
5976*5113495bSYour Name 	case cfg_dp_gro_enable:
5977*5113495bSYour Name 		value = cfg_ctx->gro_enable;
5978*5113495bSYour Name 		break;
5979*5113495bSYour Name #ifdef QCA_LL_TX_FLOW_CONTROL_V2
5980*5113495bSYour Name 	case cfg_dp_tx_flow_start_queue_offset:
5981*5113495bSYour Name 		value = cfg_ctx->tx_flow_start_queue_offset;
5982*5113495bSYour Name 		break;
5983*5113495bSYour Name 	case cfg_dp_tx_flow_stop_queue_threshold:
5984*5113495bSYour Name 		value = cfg_ctx->tx_flow_stop_queue_th;
5985*5113495bSYour Name 		break;
5986*5113495bSYour Name #endif
5987*5113495bSYour Name 	case cfg_dp_ipa_uc_tx_buf_size:
5988*5113495bSYour Name 		value = cfg_ctx->uc_tx_buffer_size;
5989*5113495bSYour Name 		break;
5990*5113495bSYour Name 	case cfg_dp_ipa_uc_tx_partition_base:
5991*5113495bSYour Name 		value = cfg_ctx->uc_tx_partition_base;
5992*5113495bSYour Name 		break;
5993*5113495bSYour Name 	case cfg_dp_ipa_uc_rx_ind_ring_count:
5994*5113495bSYour Name 		value = cfg_ctx->uc_rx_indication_ring_count;
5995*5113495bSYour Name 		break;
5996*5113495bSYour Name 	case cfg_dp_enable_flow_steering:
5997*5113495bSYour Name 		value = cfg_ctx->enable_flow_steering;
5998*5113495bSYour Name 		break;
5999*5113495bSYour Name 	case cfg_dp_reorder_offload_supported:
6000*5113495bSYour Name 		value = cfg_ctx->is_full_reorder_offload;
6001*5113495bSYour Name 		break;
6002*5113495bSYour Name 	case cfg_dp_ce_classify_enable:
6003*5113495bSYour Name 		value = cfg_ctx->ce_classify_enabled;
6004*5113495bSYour Name 		break;
6005*5113495bSYour Name 	case cfg_dp_disable_intra_bss_fwd:
6006*5113495bSYour Name 		value = cfg_ctx->disable_intra_bss_fwd;
6007*5113495bSYour Name 		break;
6008*5113495bSYour Name 	case cfg_dp_pktlog_buffer_size:
6009*5113495bSYour Name 		value = cfg_ctx->pktlog_buffer_size;
6010*5113495bSYour Name 		break;
6011*5113495bSYour Name 	default:
6012*5113495bSYour Name 		value =  0;
6013*5113495bSYour Name 		break;
6014*5113495bSYour Name 	}
6015*5113495bSYour Name 
6016*5113495bSYour Name 	return value;
6017*5113495bSYour Name }
6018*5113495bSYour Name 
6019*5113495bSYour Name /*
6020*5113495bSYour Name  * ol_get_pdev_param: function to get parameters from pdev
6021*5113495bSYour Name  * @cdp_soc: txrx soc handle
6022*5113495bSYour Name  * @pdev_id: id of pdev handle
6023*5113495bSYour Name  * @param: parameter type to be get
6024*5113495bSYour Name  * @val: parameter type to be get
6025*5113495bSYour Name  *
6026*5113495bSYour Name  * Return: SUCCESS or FAILURE
6027*5113495bSYour Name  */
ol_get_pdev_param(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,enum cdp_pdev_param_type param,cdp_config_param_type * val)6028*5113495bSYour Name static QDF_STATUS ol_get_pdev_param(struct cdp_soc_t *soc_hdl,  uint8_t pdev_id,
6029*5113495bSYour Name 				    enum cdp_pdev_param_type param,
6030*5113495bSYour Name 				    cdp_config_param_type *val)
6031*5113495bSYour Name {
6032*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
6033*5113495bSYour Name 	struct ol_txrx_pdev_t *olpdev = ol_txrx_get_pdev_from_pdev_id(soc,
6034*5113495bSYour Name 								      pdev_id);
6035*5113495bSYour Name 	struct cdp_pdev *pdev = ol_txrx_pdev_t_to_cdp_pdev(olpdev);
6036*5113495bSYour Name 
6037*5113495bSYour Name 	if (!pdev)
6038*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
6039*5113495bSYour Name 
6040*5113495bSYour Name 	switch (param) {
6041*5113495bSYour Name 	case CDP_TX_PENDING:
6042*5113495bSYour Name 		val->cdp_pdev_param_tx_pending = ol_txrx_get_tx_pending(pdev);
6043*5113495bSYour Name 		break;
6044*5113495bSYour Name 	default:
6045*5113495bSYour Name 		return QDF_STATUS_E_INVAL;
6046*5113495bSYour Name 	}
6047*5113495bSYour Name 
6048*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
6049*5113495bSYour Name }
6050*5113495bSYour Name 
6051*5113495bSYour Name /*
6052*5113495bSYour Name  * ol_set_pdev_param: function to get parameters from pdev
6053*5113495bSYour Name  * @cdp_soc: txrx soc handle
6054*5113495bSYour Name  * @pdev_id: id of pdev handle
6055*5113495bSYour Name  * @param: parameter type to be get
6056*5113495bSYour Name  * @val: parameter type to be get
6057*5113495bSYour Name  *
6058*5113495bSYour Name  * Return: SUCCESS or FAILURE
6059*5113495bSYour Name  */
ol_set_pdev_param(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,enum cdp_pdev_param_type param,cdp_config_param_type val)6060*5113495bSYour Name static QDF_STATUS ol_set_pdev_param(struct cdp_soc_t *soc_hdl,  uint8_t pdev_id,
6061*5113495bSYour Name 				    enum cdp_pdev_param_type param,
6062*5113495bSYour Name 				    cdp_config_param_type val)
6063*5113495bSYour Name {
6064*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
6065*5113495bSYour Name 	struct ol_txrx_pdev_t *olpdev = ol_txrx_get_pdev_from_pdev_id(soc,
6066*5113495bSYour Name 								      pdev_id);
6067*5113495bSYour Name 	struct cdp_pdev *pdev = ol_txrx_pdev_t_to_cdp_pdev(olpdev);
6068*5113495bSYour Name 
6069*5113495bSYour Name 	if (!pdev)
6070*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
6071*5113495bSYour Name 
6072*5113495bSYour Name 	switch (param) {
6073*5113495bSYour Name 	case CDP_MONITOR_CHANNEL:
6074*5113495bSYour Name 	{
6075*5113495bSYour Name 		ol_htt_mon_note_chan(pdev, val.cdp_pdev_param_monitor_chan);
6076*5113495bSYour Name 		break;
6077*5113495bSYour Name 	}
6078*5113495bSYour Name 	default:
6079*5113495bSYour Name 		return QDF_STATUS_E_INVAL;
6080*5113495bSYour Name 	}
6081*5113495bSYour Name 
6082*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
6083*5113495bSYour Name }
6084*5113495bSYour Name 
6085*5113495bSYour Name #ifdef WDI_EVENT_ENABLE
ol_get_pldev(struct cdp_soc_t * soc_hdl,uint8_t pdev_id)6086*5113495bSYour Name void *ol_get_pldev(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
6087*5113495bSYour Name {
6088*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
6089*5113495bSYour Name 	struct ol_txrx_pdev_t *pdev = ol_txrx_get_pdev_from_pdev_id(soc,
6090*5113495bSYour Name 								    pdev_id);
6091*5113495bSYour Name 
6092*5113495bSYour Name 	if (pdev)
6093*5113495bSYour Name 		return pdev->pl_dev;
6094*5113495bSYour Name 
6095*5113495bSYour Name 	return NULL;
6096*5113495bSYour Name }
6097*5113495bSYour Name #endif
6098*5113495bSYour Name 
6099*5113495bSYour Name /**
6100*5113495bSYour Name  * ol_register_packetdump_callback() - registers
6101*5113495bSYour Name  *  tx data packet, tx mgmt. packet and rx data packet
6102*5113495bSYour Name  *  dump callback handler.
6103*5113495bSYour Name  *
6104*5113495bSYour Name  * @soc_hdl: Datapath soc handle
6105*5113495bSYour Name  * @pdev_id: id of data path pdev handle
6106*5113495bSYour Name  * @ol_tx_packetdump_cb: tx packetdump cb
6107*5113495bSYour Name  * @ol_rx_packetdump_cb: rx packetdump cb
6108*5113495bSYour Name  *
6109*5113495bSYour Name  * This function is used to register tx data pkt, tx mgmt.
6110*5113495bSYour Name  * pkt and rx data pkt dump callback
6111*5113495bSYour Name  *
6112*5113495bSYour Name  * Return: None
6113*5113495bSYour Name  *
6114*5113495bSYour Name  */
6115*5113495bSYour Name static inline
ol_register_packetdump_callback(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,ol_txrx_pktdump_cb ol_tx_packetdump_cb,ol_txrx_pktdump_cb ol_rx_packetdump_cb)6116*5113495bSYour Name void ol_register_packetdump_callback(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
6117*5113495bSYour Name 				     ol_txrx_pktdump_cb ol_tx_packetdump_cb,
6118*5113495bSYour Name 				     ol_txrx_pktdump_cb ol_rx_packetdump_cb)
6119*5113495bSYour Name {
6120*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
6121*5113495bSYour Name 	ol_txrx_pdev_handle pdev = ol_txrx_get_pdev_from_pdev_id(soc, pdev_id);
6122*5113495bSYour Name 
6123*5113495bSYour Name 	if (!pdev) {
6124*5113495bSYour Name 		ol_txrx_err("pdev is NULL");
6125*5113495bSYour Name 		return;
6126*5113495bSYour Name 	}
6127*5113495bSYour Name 
6128*5113495bSYour Name 	pdev->ol_tx_packetdump_cb = ol_tx_packetdump_cb;
6129*5113495bSYour Name 	pdev->ol_rx_packetdump_cb = ol_rx_packetdump_cb;
6130*5113495bSYour Name }
6131*5113495bSYour Name 
6132*5113495bSYour Name /**
6133*5113495bSYour Name  * ol_deregister_packetdump_callback() - deregidters
6134*5113495bSYour Name  *  tx data packet, tx mgmt. packet and rx data packet
6135*5113495bSYour Name  *  dump callback handler
6136*5113495bSYour Name  * @soc_hdl: Datapath soc handle
6137*5113495bSYour Name  * @pdev_id: id of data path pdev handle
6138*5113495bSYour Name  *
6139*5113495bSYour Name  * This function is used to deregidter tx data pkt.,
6140*5113495bSYour Name  * tx mgmt. pkt and rx data pkt. dump callback
6141*5113495bSYour Name  *
6142*5113495bSYour Name  * Return: None
6143*5113495bSYour Name  *
6144*5113495bSYour Name  */
6145*5113495bSYour Name static inline
ol_deregister_packetdump_callback(struct cdp_soc_t * soc_hdl,uint8_t pdev_id)6146*5113495bSYour Name void ol_deregister_packetdump_callback(struct cdp_soc_t *soc_hdl,
6147*5113495bSYour Name 				       uint8_t pdev_id)
6148*5113495bSYour Name {
6149*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
6150*5113495bSYour Name 	ol_txrx_pdev_handle pdev = ol_txrx_get_pdev_from_pdev_id(soc, pdev_id);
6151*5113495bSYour Name 
6152*5113495bSYour Name 	if (!pdev) {
6153*5113495bSYour Name 		ol_txrx_err("pdev is NULL");
6154*5113495bSYour Name 		return;
6155*5113495bSYour Name 	}
6156*5113495bSYour Name 
6157*5113495bSYour Name 	pdev->ol_tx_packetdump_cb = NULL;
6158*5113495bSYour Name 	pdev->ol_rx_packetdump_cb = NULL;
6159*5113495bSYour Name }
6160*5113495bSYour Name 
6161*5113495bSYour Name static struct cdp_cmn_ops ol_ops_cmn = {
6162*5113495bSYour Name 	.txrx_soc_attach_target = ol_txrx_soc_attach_target,
6163*5113495bSYour Name 	.txrx_vdev_attach = ol_txrx_vdev_attach,
6164*5113495bSYour Name 	.txrx_vdev_detach = ol_txrx_vdev_detach,
6165*5113495bSYour Name 	.txrx_pdev_attach = ol_txrx_pdev_attach,
6166*5113495bSYour Name 	.txrx_pdev_attach_target = ol_txrx_pdev_attach_target,
6167*5113495bSYour Name 	.txrx_pdev_post_attach = ol_txrx_pdev_post_attach,
6168*5113495bSYour Name 	.txrx_pdev_pre_detach = ol_txrx_pdev_pre_detach,
6169*5113495bSYour Name 	.txrx_pdev_detach = ol_txrx_pdev_detach,
6170*5113495bSYour Name 	.txrx_peer_create = ol_txrx_peer_attach,
6171*5113495bSYour Name 	.txrx_peer_setup = NULL,
6172*5113495bSYour Name 	.txrx_peer_teardown = NULL,
6173*5113495bSYour Name 	.txrx_peer_delete = ol_txrx_peer_detach,
6174*5113495bSYour Name 	.txrx_peer_delete_sync = ol_txrx_peer_detach_sync,
6175*5113495bSYour Name 	.txrx_vdev_register = ol_txrx_vdev_register,
6176*5113495bSYour Name 	.txrx_soc_detach = ol_txrx_soc_detach,
6177*5113495bSYour Name 	.txrx_get_vdev_mac_addr = ol_txrx_get_vdev_mac_addr,
6178*5113495bSYour Name 	.txrx_get_ctrl_pdev_from_vdev = ol_txrx_get_ctrl_pdev_from_vdev,
6179*5113495bSYour Name 	.txrx_get_mon_vdev_from_pdev = ol_txrx_get_mon_vdev_from_pdev,
6180*5113495bSYour Name 	.txrx_mgmt_send_ext = ol_txrx_mgmt_send_ext,
6181*5113495bSYour Name 	.txrx_mgmt_tx_cb_set = ol_txrx_mgmt_tx_cb_set,
6182*5113495bSYour Name 	.txrx_data_tx_cb_set = ol_txrx_data_tx_cb_set,
6183*5113495bSYour Name 	.txrx_peer_unmap_sync_cb_set = ol_txrx_peer_unmap_sync_cb_set,
6184*5113495bSYour Name 	.flush_cache_rx_queue = ol_txrx_flush_cache_rx_queue,
6185*5113495bSYour Name 	.txrx_fw_stats_get = ol_txrx_fw_stats_get,
6186*5113495bSYour Name 	.display_stats = ol_txrx_display_stats,
6187*5113495bSYour Name 	.txrx_get_cfg = ol_txrx_get_cfg,
6188*5113495bSYour Name 	/* TODO: Add other functions */
6189*5113495bSYour Name };
6190*5113495bSYour Name 
6191*5113495bSYour Name static struct cdp_misc_ops ol_ops_misc = {
6192*5113495bSYour Name 	.set_ibss_vdev_heart_beat_timer =
6193*5113495bSYour Name 		ol_txrx_set_ibss_vdev_heart_beat_timer,
6194*5113495bSYour Name #ifdef CONFIG_HL_SUPPORT
6195*5113495bSYour Name 	.set_wmm_param = ol_txrx_set_wmm_param,
6196*5113495bSYour Name #endif /* CONFIG_HL_SUPPORT */
6197*5113495bSYour Name 	.bad_peer_txctl_set_setting = ol_txrx_bad_peer_txctl_set_setting,
6198*5113495bSYour Name 	.bad_peer_txctl_update_threshold =
6199*5113495bSYour Name 		ol_txrx_bad_peer_txctl_update_threshold,
6200*5113495bSYour Name 	.hl_tdls_flag_reset = ol_txrx_hl_tdls_flag_reset,
6201*5113495bSYour Name 	.tx_non_std = ol_tx_non_std,
6202*5113495bSYour Name 	.get_vdev_id = ol_txrx_get_vdev_id,
6203*5113495bSYour Name 	.get_tx_ack_stats = ol_txrx_get_tx_ack_stats,
6204*5113495bSYour Name 	.set_wisa_mode = ol_txrx_set_wisa_mode,
6205*5113495bSYour Name 	.txrx_data_stall_cb_register = ol_register_data_stall_detect_cb,
6206*5113495bSYour Name 	.txrx_data_stall_cb_deregister = ol_deregister_data_stall_detect_cb,
6207*5113495bSYour Name 	.txrx_post_data_stall_event = ol_txrx_post_data_stall_event,
6208*5113495bSYour Name #ifdef FEATURE_RUNTIME_PM
6209*5113495bSYour Name 	.runtime_suspend = ol_txrx_runtime_suspend,
6210*5113495bSYour Name 	.runtime_resume = ol_txrx_runtime_resume,
6211*5113495bSYour Name #endif /* FEATURE_RUNTIME_PM */
6212*5113495bSYour Name 	.get_opmode = ol_txrx_get_opmode,
6213*5113495bSYour Name 	.mark_first_wakeup_packet = ol_tx_mark_first_wakeup_packet,
6214*5113495bSYour Name 	.update_mac_id = ol_txrx_update_mac_id,
6215*5113495bSYour Name 	.flush_rx_frames = ol_txrx_wrapper_flush_rx_frames,
6216*5113495bSYour Name 	.get_intra_bss_fwd_pkts_count = ol_get_intra_bss_fwd_pkts_count,
6217*5113495bSYour Name 	.pkt_log_init = htt_pkt_log_init,
6218*5113495bSYour Name 	.pkt_log_con_service = ol_txrx_pkt_log_con_service,
6219*5113495bSYour Name 	.pkt_log_exit = ol_txrx_pkt_log_exit,
6220*5113495bSYour Name 	.register_pktdump_cb = ol_register_packetdump_callback,
6221*5113495bSYour Name 	.unregister_pktdump_cb = ol_deregister_packetdump_callback,
6222*5113495bSYour Name #ifdef QCA_SUPPORT_TXRX_DRIVER_TCP_DEL_ACK
6223*5113495bSYour Name 	.pdev_reset_driver_del_ack = ol_tx_pdev_reset_driver_del_ack,
6224*5113495bSYour Name 	.vdev_set_driver_del_ack_enable = ol_tx_vdev_set_driver_del_ack_enable,
6225*5113495bSYour Name #endif
6226*5113495bSYour Name #ifdef WLAN_SUPPORT_TXRX_HL_BUNDLE
6227*5113495bSYour Name 	.vdev_set_bundle_require_flag = ol_tx_vdev_set_bundle_require,
6228*5113495bSYour Name 	.pdev_reset_bundle_require_flag = ol_tx_pdev_reset_bundle_require,
6229*5113495bSYour Name #endif
6230*5113495bSYour Name };
6231*5113495bSYour Name 
6232*5113495bSYour Name static struct cdp_flowctl_ops ol_ops_flowctl = {
6233*5113495bSYour Name 	.register_pause_cb = ol_txrx_register_pause_cb,
6234*5113495bSYour Name #ifdef QCA_LL_TX_FLOW_CONTROL_V2
6235*5113495bSYour Name 	.set_desc_global_pool_size = ol_tx_set_desc_global_pool_size,
6236*5113495bSYour Name 	.dump_flow_pool_info = ol_tx_dump_flow_pool_info,
6237*5113495bSYour Name 	.tx_desc_thresh_reached = ol_tx_desc_thresh_reached,
6238*5113495bSYour Name #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
6239*5113495bSYour Name };
6240*5113495bSYour Name 
6241*5113495bSYour Name #if defined(QCA_LL_LEGACY_TX_FLOW_CONTROL)
6242*5113495bSYour Name static struct cdp_lflowctl_ops ol_ops_l_flowctl = {
6243*5113495bSYour Name 	.register_tx_flow_control = ol_txrx_register_tx_flow_control,
6244*5113495bSYour Name 	.deregister_tx_flow_control_cb = ol_txrx_deregister_tx_flow_control_cb,
6245*5113495bSYour Name 	.flow_control_cb = ol_txrx_flow_control_cb,
6246*5113495bSYour Name 	.get_tx_resource = ol_txrx_get_tx_resource,
6247*5113495bSYour Name 	.ll_set_tx_pause_q_depth = ol_txrx_ll_set_tx_pause_q_depth,
6248*5113495bSYour Name 	.vdev_flush = ol_txrx_vdev_flush,
6249*5113495bSYour Name 	.vdev_pause = ol_txrx_vdev_pause,
6250*5113495bSYour Name 	.vdev_unpause = ol_txrx_vdev_unpause
6251*5113495bSYour Name }; /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
6252*5113495bSYour Name #elif defined(QCA_HL_NETDEV_FLOW_CONTROL)
6253*5113495bSYour Name static struct cdp_lflowctl_ops ol_ops_l_flowctl = {
6254*5113495bSYour Name 	.register_tx_flow_control = ol_txrx_register_hl_flow_control,
6255*5113495bSYour Name 	.vdev_flush = ol_txrx_vdev_flush,
6256*5113495bSYour Name 	.vdev_pause = ol_txrx_vdev_pause,
6257*5113495bSYour Name 	.vdev_unpause = ol_txrx_vdev_unpause,
6258*5113495bSYour Name 	.set_vdev_os_queue_status = ol_txrx_set_vdev_os_queue_status,
6259*5113495bSYour Name 	.set_vdev_tx_desc_limit = ol_txrx_set_vdev_tx_desc_limit
6260*5113495bSYour Name };
6261*5113495bSYour Name #else /* QCA_HL_NETDEV_FLOW_CONTROL */
6262*5113495bSYour Name static struct cdp_lflowctl_ops ol_ops_l_flowctl = { };
6263*5113495bSYour Name #endif
6264*5113495bSYour Name 
6265*5113495bSYour Name #ifdef IPA_OFFLOAD
6266*5113495bSYour Name static struct cdp_ipa_ops ol_ops_ipa = {
6267*5113495bSYour Name 	.ipa_get_resource = ol_txrx_ipa_uc_get_resource,
6268*5113495bSYour Name 	.ipa_set_doorbell_paddr = ol_txrx_ipa_uc_set_doorbell_paddr,
6269*5113495bSYour Name 	.ipa_set_active = ol_txrx_ipa_uc_set_active,
6270*5113495bSYour Name 	.ipa_op_response = ol_txrx_ipa_uc_op_response,
6271*5113495bSYour Name 	.ipa_register_op_cb = ol_txrx_ipa_uc_register_op_cb,
6272*5113495bSYour Name 	.ipa_get_stat = ol_txrx_ipa_uc_get_stat,
6273*5113495bSYour Name 	.ipa_tx_data_frame = ol_tx_send_ipa_data_frame,
6274*5113495bSYour Name 	.ipa_set_uc_tx_partition_base = ol_cfg_set_ipa_uc_tx_partition_base,
6275*5113495bSYour Name 	.ipa_enable_autonomy = ol_txrx_ipa_enable_autonomy,
6276*5113495bSYour Name 	.ipa_disable_autonomy = ol_txrx_ipa_disable_autonomy,
6277*5113495bSYour Name 	.ipa_setup = ol_txrx_ipa_setup,
6278*5113495bSYour Name 	.ipa_cleanup = ol_txrx_ipa_cleanup,
6279*5113495bSYour Name 	.ipa_setup_iface = ol_txrx_ipa_setup_iface,
6280*5113495bSYour Name 	.ipa_cleanup_iface = ol_txrx_ipa_cleanup_iface,
6281*5113495bSYour Name 	.ipa_enable_pipes = ol_txrx_ipa_enable_pipes,
6282*5113495bSYour Name 	.ipa_disable_pipes = ol_txrx_ipa_disable_pipes,
6283*5113495bSYour Name 	.ipa_set_perf_level = ol_txrx_ipa_set_perf_level,
6284*5113495bSYour Name #ifdef FEATURE_METERING
6285*5113495bSYour Name 	.ipa_uc_get_share_stats = ol_txrx_ipa_uc_get_share_stats,
6286*5113495bSYour Name 	.ipa_uc_set_quota = ol_txrx_ipa_uc_set_quota,
6287*5113495bSYour Name #endif
6288*5113495bSYour Name 	.ipa_tx_buf_smmu_mapping = ol_txrx_ipa_tx_buf_smmu_mapping,
6289*5113495bSYour Name 	.ipa_tx_buf_smmu_unmapping = ol_txrx_ipa_tx_buf_smmu_unmapping
6290*5113495bSYour Name };
6291*5113495bSYour Name #endif
6292*5113495bSYour Name 
6293*5113495bSYour Name #ifdef RECEIVE_OFFLOAD
6294*5113495bSYour Name static struct cdp_rx_offld_ops ol_rx_offld_ops = {
6295*5113495bSYour Name 	.register_rx_offld_flush_cb = ol_register_offld_flush_cb,
6296*5113495bSYour Name 	.deregister_rx_offld_flush_cb = ol_deregister_offld_flush_cb
6297*5113495bSYour Name };
6298*5113495bSYour Name #endif
6299*5113495bSYour Name 
6300*5113495bSYour Name static struct cdp_bus_ops ol_ops_bus = {
6301*5113495bSYour Name 	.bus_suspend = ol_txrx_bus_suspend,
6302*5113495bSYour Name 	.bus_resume = ol_txrx_bus_resume
6303*5113495bSYour Name };
6304*5113495bSYour Name 
6305*5113495bSYour Name #ifdef WLAN_FEATURE_DSRC
6306*5113495bSYour Name static struct cdp_ocb_ops ol_ops_ocb = {
6307*5113495bSYour Name 	.set_ocb_chan_info = ol_txrx_set_ocb_chan_info,
6308*5113495bSYour Name 	.get_ocb_chan_info = ol_txrx_get_ocb_chan_info
6309*5113495bSYour Name };
6310*5113495bSYour Name #endif
6311*5113495bSYour Name 
6312*5113495bSYour Name static struct cdp_throttle_ops ol_ops_throttle = {
6313*5113495bSYour Name #ifdef QCA_SUPPORT_TX_THROTTLE
6314*5113495bSYour Name 	.throttle_init_period = ol_tx_throttle_init_period,
6315*5113495bSYour Name 	.throttle_set_level = ol_tx_throttle_set_level
6316*5113495bSYour Name #endif /* QCA_SUPPORT_TX_THROTTLE */
6317*5113495bSYour Name };
6318*5113495bSYour Name 
6319*5113495bSYour Name static struct cdp_mob_stats_ops ol_ops_mob_stats = {
6320*5113495bSYour Name 	.clear_stats = ol_txrx_clear_stats,
6321*5113495bSYour Name 	.stats = ol_txrx_stats
6322*5113495bSYour Name };
6323*5113495bSYour Name 
6324*5113495bSYour Name static struct cdp_cfg_ops ol_ops_cfg = {
6325*5113495bSYour Name 	.set_cfg_rx_fwd_disabled = ol_set_cfg_rx_fwd_disabled,
6326*5113495bSYour Name 	.set_cfg_packet_log_enabled = ol_set_cfg_packet_log_enabled,
6327*5113495bSYour Name 	.cfg_attach = ol_pdev_cfg_attach,
6328*5113495bSYour Name 	.vdev_rx_set_intrabss_fwd = ol_vdev_rx_set_intrabss_fwd,
6329*5113495bSYour Name 	.is_rx_fwd_disabled = ol_txrx_is_rx_fwd_disabled,
6330*5113495bSYour Name 	.tx_set_is_mgmt_over_wmi_enabled = ol_tx_set_is_mgmt_over_wmi_enabled,
6331*5113495bSYour Name 	.is_high_latency = ol_txrx_wrapper_cfg_is_high_latency,
6332*5113495bSYour Name 	.set_flow_control_parameters =
6333*5113495bSYour Name 		ol_txrx_wrapper_set_flow_control_parameters,
6334*5113495bSYour Name 	.set_flow_steering = ol_set_cfg_flow_steering,
6335*5113495bSYour Name 	.set_ptp_rx_opt_enabled = ol_set_cfg_ptp_rx_opt_enabled,
6336*5113495bSYour Name 	.set_new_htt_msg_format =
6337*5113495bSYour Name 		ol_txrx_set_new_htt_msg_format,
6338*5113495bSYour Name 	.set_peer_unmap_conf_support = ol_txrx_set_peer_unmap_conf_support,
6339*5113495bSYour Name 	.get_peer_unmap_conf_support = ol_txrx_get_peer_unmap_conf_support,
6340*5113495bSYour Name 	.set_tx_compl_tsf64 = ol_txrx_set_tx_compl_tsf64,
6341*5113495bSYour Name 	.get_tx_compl_tsf64 = ol_txrx_get_tx_compl_tsf64,
6342*5113495bSYour Name };
6343*5113495bSYour Name 
6344*5113495bSYour Name static struct cdp_peer_ops ol_ops_peer = {
6345*5113495bSYour Name 	.register_peer = ol_txrx_wrapper_register_peer,
6346*5113495bSYour Name 	.clear_peer = ol_txrx_clear_peer,
6347*5113495bSYour Name 	.find_peer_exist = ol_txrx_find_peer_exist,
6348*5113495bSYour Name 	.find_peer_exist_on_vdev = ol_txrx_find_peer_exist_on_vdev,
6349*5113495bSYour Name 	.find_peer_exist_on_other_vdev = ol_txrx_find_peer_exist_on_other_vdev,
6350*5113495bSYour Name 	.peer_state_update = ol_txrx_wrapper_peer_state_update,
6351*5113495bSYour Name 	.get_vdevid = ol_txrx_get_vdevid,
6352*5113495bSYour Name 	.get_vdev_by_peer_addr = ol_txrx_wrapper_get_vdev_by_peer_addr,
6353*5113495bSYour Name 	.register_ocb_peer = ol_txrx_register_ocb_peer,
6354*5113495bSYour Name 	.peer_get_peer_mac_addr = ol_txrx_peer_get_peer_mac_addr,
6355*5113495bSYour Name 	.get_peer_state = ol_txrx_get_peer_state,
6356*5113495bSYour Name 	.update_ibss_add_peer_num_of_vdev =
6357*5113495bSYour Name 		ol_txrx_update_ibss_add_peer_num_of_vdev,
6358*5113495bSYour Name #if defined(CONFIG_HL_SUPPORT) && defined(FEATURE_WLAN_TDLS)
6359*5113495bSYour Name 	.copy_mac_addr_raw = ol_txrx_copy_mac_addr_raw,
6360*5113495bSYour Name 	.add_last_real_peer = ol_txrx_add_last_real_peer,
6361*5113495bSYour Name 	.is_vdev_restore_last_peer = is_vdev_restore_last_peer,
6362*5113495bSYour Name 	.update_last_real_peer = ol_txrx_update_last_real_peer,
6363*5113495bSYour Name 	.set_tdls_offchan_enabled = ol_txrx_set_tdls_offchan_enabled,
6364*5113495bSYour Name 	.set_peer_as_tdls_peer = ol_txrx_set_peer_as_tdls_peer,
6365*5113495bSYour Name #endif /* CONFIG_HL_SUPPORT */
6366*5113495bSYour Name 	.peer_detach_force_delete = ol_txrx_peer_detach_force_delete,
6367*5113495bSYour Name 	.peer_flush_frags = ol_txrx_peer_flush_frags,
6368*5113495bSYour Name };
6369*5113495bSYour Name 
6370*5113495bSYour Name static struct cdp_tx_delay_ops ol_ops_delay = {
6371*5113495bSYour Name #ifdef QCA_COMPUTE_TX_DELAY
6372*5113495bSYour Name 	.tx_delay = ol_tx_delay,
6373*5113495bSYour Name 	.tx_delay_hist = ol_tx_delay_hist,
6374*5113495bSYour Name 	.tx_packet_count = ol_tx_packet_count,
6375*5113495bSYour Name 	.tx_set_compute_interval = ol_tx_set_compute_interval
6376*5113495bSYour Name #endif /* QCA_COMPUTE_TX_DELAY */
6377*5113495bSYour Name };
6378*5113495bSYour Name 
6379*5113495bSYour Name static struct cdp_pmf_ops ol_ops_pmf = {
6380*5113495bSYour Name 	.get_pn_info = ol_txrx_get_pn_info
6381*5113495bSYour Name };
6382*5113495bSYour Name 
6383*5113495bSYour Name static struct cdp_ctrl_ops ol_ops_ctrl = {
6384*5113495bSYour Name 	.txrx_get_pldev = ol_get_pldev,
6385*5113495bSYour Name 	.txrx_wdi_event_sub = wdi_event_sub,
6386*5113495bSYour Name 	.txrx_wdi_event_unsub = wdi_event_unsub,
6387*5113495bSYour Name 	.txrx_get_pdev_param = ol_get_pdev_param,
6388*5113495bSYour Name 	.txrx_set_pdev_param = ol_set_pdev_param
6389*5113495bSYour Name };
6390*5113495bSYour Name 
6391*5113495bSYour Name /* WINplatform specific structures */
6392*5113495bSYour Name static struct cdp_me_ops ol_ops_me = {
6393*5113495bSYour Name 	/* EMPTY FOR MCL */
6394*5113495bSYour Name };
6395*5113495bSYour Name 
6396*5113495bSYour Name static struct cdp_mon_ops ol_ops_mon = {
6397*5113495bSYour Name 	/* EMPTY FOR MCL */
6398*5113495bSYour Name };
6399*5113495bSYour Name 
6400*5113495bSYour Name static struct cdp_host_stats_ops ol_ops_host_stats = {
6401*5113495bSYour Name 	/* EMPTY FOR MCL */
6402*5113495bSYour Name };
6403*5113495bSYour Name 
6404*5113495bSYour Name static struct cdp_wds_ops ol_ops_wds = {
6405*5113495bSYour Name 	/* EMPTY FOR MCL */
6406*5113495bSYour Name };
6407*5113495bSYour Name 
6408*5113495bSYour Name static struct cdp_raw_ops ol_ops_raw = {
6409*5113495bSYour Name 	/* EMPTY FOR MCL */
6410*5113495bSYour Name };
6411*5113495bSYour Name 
6412*5113495bSYour Name static struct cdp_ops ol_txrx_ops = {
6413*5113495bSYour Name 	.cmn_drv_ops = &ol_ops_cmn,
6414*5113495bSYour Name 	.ctrl_ops = &ol_ops_ctrl,
6415*5113495bSYour Name 	.me_ops = &ol_ops_me,
6416*5113495bSYour Name 	.mon_ops = &ol_ops_mon,
6417*5113495bSYour Name 	.host_stats_ops = &ol_ops_host_stats,
6418*5113495bSYour Name 	.wds_ops = &ol_ops_wds,
6419*5113495bSYour Name 	.raw_ops = &ol_ops_raw,
6420*5113495bSYour Name 	.misc_ops = &ol_ops_misc,
6421*5113495bSYour Name 	.cfg_ops = &ol_ops_cfg,
6422*5113495bSYour Name 	.flowctl_ops = &ol_ops_flowctl,
6423*5113495bSYour Name 	.l_flowctl_ops = &ol_ops_l_flowctl,
6424*5113495bSYour Name #ifdef IPA_OFFLOAD
6425*5113495bSYour Name 	.ipa_ops = &ol_ops_ipa,
6426*5113495bSYour Name #endif
6427*5113495bSYour Name #ifdef RECEIVE_OFFLOAD
6428*5113495bSYour Name 	.rx_offld_ops = &ol_rx_offld_ops,
6429*5113495bSYour Name #endif
6430*5113495bSYour Name 	.bus_ops = &ol_ops_bus,
6431*5113495bSYour Name #ifdef WLAN_FEATURE_DSRC
6432*5113495bSYour Name 	.ocb_ops = &ol_ops_ocb,
6433*5113495bSYour Name #endif
6434*5113495bSYour Name 	.peer_ops = &ol_ops_peer,
6435*5113495bSYour Name 	.throttle_ops = &ol_ops_throttle,
6436*5113495bSYour Name 	.mob_stats_ops = &ol_ops_mob_stats,
6437*5113495bSYour Name 	.delay_ops = &ol_ops_delay,
6438*5113495bSYour Name 	.pmf_ops = &ol_ops_pmf,
6439*5113495bSYour Name };
6440*5113495bSYour Name 
ol_txrx_soc_attach(void * scn_handle,struct ol_if_ops * dp_ol_if_ops)6441*5113495bSYour Name ol_txrx_soc_handle ol_txrx_soc_attach(void *scn_handle,
6442*5113495bSYour Name 				      struct ol_if_ops *dp_ol_if_ops)
6443*5113495bSYour Name {
6444*5113495bSYour Name 	struct ol_txrx_soc_t *soc;
6445*5113495bSYour Name 
6446*5113495bSYour Name 	soc = qdf_mem_malloc(sizeof(*soc));
6447*5113495bSYour Name 	if (!soc)
6448*5113495bSYour Name 		return NULL;
6449*5113495bSYour Name 
6450*5113495bSYour Name 	soc->psoc = scn_handle;
6451*5113495bSYour Name 	soc->cdp_soc.ops = &ol_txrx_ops;
6452*5113495bSYour Name 	soc->cdp_soc.ol_ops = dp_ol_if_ops;
6453*5113495bSYour Name 
6454*5113495bSYour Name 	return ol_txrx_soc_t_to_cdp_soc_t(soc);
6455*5113495bSYour Name }
6456*5113495bSYour Name 
ol_txrx_get_new_htt_msg_format(struct ol_txrx_pdev_t * pdev)6457*5113495bSYour Name bool ol_txrx_get_new_htt_msg_format(struct ol_txrx_pdev_t *pdev)
6458*5113495bSYour Name {
6459*5113495bSYour Name 	if (!pdev) {
6460*5113495bSYour Name 		qdf_print("%s: pdev is NULL", __func__);
6461*5113495bSYour Name 		return false;
6462*5113495bSYour Name 	}
6463*5113495bSYour Name 	return pdev->new_htt_msg_format;
6464*5113495bSYour Name }
6465*5113495bSYour Name 
ol_txrx_set_new_htt_msg_format(uint8_t val)6466*5113495bSYour Name void ol_txrx_set_new_htt_msg_format(uint8_t val)
6467*5113495bSYour Name {
6468*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
6469*5113495bSYour Name 	ol_txrx_pdev_handle pdev;
6470*5113495bSYour Name 
6471*5113495bSYour Name 	pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
6472*5113495bSYour Name 	if (!pdev) {
6473*5113495bSYour Name 		qdf_print("%s: pdev is NULL", __func__);
6474*5113495bSYour Name 		return;
6475*5113495bSYour Name 	}
6476*5113495bSYour Name 	pdev->new_htt_msg_format = val;
6477*5113495bSYour Name }
6478*5113495bSYour Name 
ol_txrx_get_peer_unmap_conf_support(void)6479*5113495bSYour Name bool ol_txrx_get_peer_unmap_conf_support(void)
6480*5113495bSYour Name {
6481*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
6482*5113495bSYour Name 	ol_txrx_pdev_handle pdev;
6483*5113495bSYour Name 
6484*5113495bSYour Name 	if (qdf_unlikely(!soc))
6485*5113495bSYour Name 		return false;
6486*5113495bSYour Name 
6487*5113495bSYour Name 	pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
6488*5113495bSYour Name 	if (!pdev) {
6489*5113495bSYour Name 		qdf_print("%s: pdev is NULL", __func__);
6490*5113495bSYour Name 		return false;
6491*5113495bSYour Name 	}
6492*5113495bSYour Name 	return pdev->enable_peer_unmap_conf_support;
6493*5113495bSYour Name }
6494*5113495bSYour Name 
ol_txrx_set_peer_unmap_conf_support(bool val)6495*5113495bSYour Name void ol_txrx_set_peer_unmap_conf_support(bool val)
6496*5113495bSYour Name {
6497*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
6498*5113495bSYour Name 	ol_txrx_pdev_handle pdev;
6499*5113495bSYour Name 
6500*5113495bSYour Name 	if (qdf_unlikely(!soc))
6501*5113495bSYour Name 		return;
6502*5113495bSYour Name 
6503*5113495bSYour Name 	pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
6504*5113495bSYour Name 	if (!pdev) {
6505*5113495bSYour Name 		qdf_print("%s: pdev is NULL", __func__);
6506*5113495bSYour Name 		return;
6507*5113495bSYour Name 	}
6508*5113495bSYour Name 	pdev->enable_peer_unmap_conf_support = val;
6509*5113495bSYour Name }
6510*5113495bSYour Name 
6511*5113495bSYour Name #ifdef WLAN_FEATURE_TSF_PLUS
ol_txrx_get_tx_compl_tsf64(void)6512*5113495bSYour Name bool ol_txrx_get_tx_compl_tsf64(void)
6513*5113495bSYour Name {
6514*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
6515*5113495bSYour Name 	ol_txrx_pdev_handle pdev;
6516*5113495bSYour Name 
6517*5113495bSYour Name 	if (qdf_unlikely(!soc))
6518*5113495bSYour Name 		return false;
6519*5113495bSYour Name 
6520*5113495bSYour Name 	pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
6521*5113495bSYour Name 	if (!pdev) {
6522*5113495bSYour Name 		qdf_print("%s: pdev is NULL", __func__);
6523*5113495bSYour Name 		return false;
6524*5113495bSYour Name 	}
6525*5113495bSYour Name 	return pdev->enable_tx_compl_tsf64;
6526*5113495bSYour Name }
6527*5113495bSYour Name 
ol_txrx_set_tx_compl_tsf64(bool val)6528*5113495bSYour Name void ol_txrx_set_tx_compl_tsf64(bool val)
6529*5113495bSYour Name {
6530*5113495bSYour Name 	struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
6531*5113495bSYour Name 	ol_txrx_pdev_handle pdev;
6532*5113495bSYour Name 
6533*5113495bSYour Name 	if (qdf_unlikely(!soc))
6534*5113495bSYour Name 		return;
6535*5113495bSYour Name 
6536*5113495bSYour Name 	pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
6537*5113495bSYour Name 	if (!pdev) {
6538*5113495bSYour Name 		ol_txrx_err("pdev is NULL");
6539*5113495bSYour Name 		return;
6540*5113495bSYour Name 	}
6541*5113495bSYour Name 
6542*5113495bSYour Name 	pdev->enable_tx_compl_tsf64 = val;
6543*5113495bSYour Name }
6544*5113495bSYour Name #else
ol_txrx_get_tx_compl_tsf64(void)6545*5113495bSYour Name bool ol_txrx_get_tx_compl_tsf64(void)
6546*5113495bSYour Name {
6547*5113495bSYour Name 	return false;
6548*5113495bSYour Name }
6549*5113495bSYour Name 
ol_txrx_set_tx_compl_tsf64(bool val)6550*5113495bSYour Name void ol_txrx_set_tx_compl_tsf64(bool val)
6551*5113495bSYour Name {
6552*5113495bSYour Name }
6553*5113495bSYour Name #endif
6554