xref: /wlan-driver/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_tid.c (revision 5113495b16420b49004c444715d2daae2066e7dc)
1*5113495bSYour Name /*
2*5113495bSYour Name  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3*5113495bSYour Name  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4*5113495bSYour Name  *
5*5113495bSYour Name  * Permission to use, copy, modify, and/or distribute this software for
6*5113495bSYour Name  * any purpose with or without fee is hereby granted, provided that the
7*5113495bSYour Name  * above copyright notice and this permission notice appear in all
8*5113495bSYour Name  * copies.
9*5113495bSYour Name  *
10*5113495bSYour Name  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11*5113495bSYour Name  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12*5113495bSYour Name  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13*5113495bSYour Name  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14*5113495bSYour Name  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15*5113495bSYour Name  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16*5113495bSYour Name  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17*5113495bSYour Name  * PERFORMANCE OF THIS SOFTWARE.
18*5113495bSYour Name  */
19*5113495bSYour Name 
20*5113495bSYour Name #include <qdf_types.h>
21*5113495bSYour Name #include <qdf_lock.h>
22*5113495bSYour Name #include <hal_hw_headers.h>
23*5113495bSYour Name #include "dp_htt.h"
24*5113495bSYour Name #include "dp_types.h"
25*5113495bSYour Name #include "dp_internal.h"
26*5113495bSYour Name #include "dp_peer.h"
27*5113495bSYour Name #include "dp_rx_defrag.h"
28*5113495bSYour Name #include "dp_rx.h"
29*5113495bSYour Name #include <hal_api.h>
30*5113495bSYour Name #include <hal_reo.h>
31*5113495bSYour Name #include <cdp_txrx_handle.h>
32*5113495bSYour Name #include <wlan_cfg.h>
33*5113495bSYour Name #ifdef WIFI_MONITOR_SUPPORT
34*5113495bSYour Name #include <dp_mon.h>
35*5113495bSYour Name #endif
36*5113495bSYour Name #ifdef FEATURE_WDS
37*5113495bSYour Name #include "dp_txrx_wds.h"
38*5113495bSYour Name #endif
39*5113495bSYour Name #include <qdf_module.h>
40*5113495bSYour Name #ifdef QCA_PEER_EXT_STATS
41*5113495bSYour Name #include "dp_hist.h"
42*5113495bSYour Name #endif
43*5113495bSYour Name #ifdef BYPASS_OL_OPS
44*5113495bSYour Name #include <target_if_dp.h>
45*5113495bSYour Name #endif
46*5113495bSYour Name 
47*5113495bSYour Name #ifdef REO_QDESC_HISTORY
48*5113495bSYour Name #define REO_QDESC_HISTORY_SIZE 512
49*5113495bSYour Name uint64_t reo_qdesc_history_idx;
50*5113495bSYour Name struct reo_qdesc_event reo_qdesc_history[REO_QDESC_HISTORY_SIZE];
51*5113495bSYour Name #endif
52*5113495bSYour Name 
53*5113495bSYour Name #ifdef REO_QDESC_HISTORY
54*5113495bSYour Name static inline void
dp_rx_reo_qdesc_history_add(struct reo_desc_list_node * free_desc,enum reo_qdesc_event_type type)55*5113495bSYour Name dp_rx_reo_qdesc_history_add(struct reo_desc_list_node *free_desc,
56*5113495bSYour Name 			    enum reo_qdesc_event_type type)
57*5113495bSYour Name {
58*5113495bSYour Name 	struct reo_qdesc_event *evt;
59*5113495bSYour Name 	struct dp_rx_tid *rx_tid = &free_desc->rx_tid;
60*5113495bSYour Name 	uint32_t idx;
61*5113495bSYour Name 
62*5113495bSYour Name 	reo_qdesc_history_idx++;
63*5113495bSYour Name 	idx = (reo_qdesc_history_idx & (REO_QDESC_HISTORY_SIZE - 1));
64*5113495bSYour Name 
65*5113495bSYour Name 	evt = &reo_qdesc_history[idx];
66*5113495bSYour Name 
67*5113495bSYour Name 	qdf_mem_copy(evt->peer_mac, free_desc->peer_mac, QDF_MAC_ADDR_SIZE);
68*5113495bSYour Name 	evt->qdesc_addr = rx_tid->hw_qdesc_paddr;
69*5113495bSYour Name 	evt->ts = qdf_get_log_timestamp();
70*5113495bSYour Name 	evt->type = type;
71*5113495bSYour Name }
72*5113495bSYour Name 
73*5113495bSYour Name #ifdef WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY
74*5113495bSYour Name static inline void
dp_rx_reo_qdesc_deferred_evt_add(struct reo_desc_deferred_freelist_node * desc,enum reo_qdesc_event_type type)75*5113495bSYour Name dp_rx_reo_qdesc_deferred_evt_add(struct reo_desc_deferred_freelist_node *desc,
76*5113495bSYour Name 				 enum reo_qdesc_event_type type)
77*5113495bSYour Name {
78*5113495bSYour Name 	struct reo_qdesc_event *evt;
79*5113495bSYour Name 	uint32_t idx;
80*5113495bSYour Name 
81*5113495bSYour Name 	reo_qdesc_history_idx++;
82*5113495bSYour Name 	idx = (reo_qdesc_history_idx & (REO_QDESC_HISTORY_SIZE - 1));
83*5113495bSYour Name 
84*5113495bSYour Name 	evt = &reo_qdesc_history[idx];
85*5113495bSYour Name 
86*5113495bSYour Name 	qdf_mem_copy(evt->peer_mac, desc->peer_mac, QDF_MAC_ADDR_SIZE);
87*5113495bSYour Name 	evt->qdesc_addr = desc->hw_qdesc_paddr;
88*5113495bSYour Name 	evt->ts = qdf_get_log_timestamp();
89*5113495bSYour Name 	evt->type = type;
90*5113495bSYour Name }
91*5113495bSYour Name 
92*5113495bSYour Name #define DP_RX_REO_QDESC_DEFERRED_FREE_EVT(desc) \
93*5113495bSYour Name 	dp_rx_reo_qdesc_deferred_evt_add((desc), REO_QDESC_FREE)
94*5113495bSYour Name 
95*5113495bSYour Name #define DP_RX_REO_QDESC_DEFERRED_GET_MAC(desc, freedesc) \
96*5113495bSYour Name 	qdf_mem_copy((desc)->peer_mac, (freedesc)->peer_mac, QDF_MAC_ADDR_SIZE)
97*5113495bSYour Name #endif /* WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY */
98*5113495bSYour Name 
99*5113495bSYour Name #define DP_RX_REO_QDESC_GET_MAC(freedesc, peer) \
100*5113495bSYour Name 	qdf_mem_copy((freedesc)->peer_mac, (peer)->mac_addr.raw, QDF_MAC_ADDR_SIZE)
101*5113495bSYour Name 
102*5113495bSYour Name #define DP_RX_REO_QDESC_UPDATE_EVT(free_desc) \
103*5113495bSYour Name 	dp_rx_reo_qdesc_history_add((free_desc), REO_QDESC_UPDATE_CB)
104*5113495bSYour Name 
105*5113495bSYour Name #define DP_RX_REO_QDESC_FREE_EVT(free_desc) \
106*5113495bSYour Name 	dp_rx_reo_qdesc_history_add((free_desc), REO_QDESC_FREE)
107*5113495bSYour Name 
108*5113495bSYour Name #else
109*5113495bSYour Name #define DP_RX_REO_QDESC_GET_MAC(freedesc, peer)
110*5113495bSYour Name 
111*5113495bSYour Name #define DP_RX_REO_QDESC_UPDATE_EVT(free_desc)
112*5113495bSYour Name 
113*5113495bSYour Name #define DP_RX_REO_QDESC_FREE_EVT(free_desc)
114*5113495bSYour Name 
115*5113495bSYour Name #define DP_RX_REO_QDESC_DEFERRED_FREE_EVT(desc)
116*5113495bSYour Name 
117*5113495bSYour Name #define DP_RX_REO_QDESC_DEFERRED_GET_MAC(desc, freedesc)
118*5113495bSYour Name #endif
119*5113495bSYour Name 
120*5113495bSYour Name static inline void
dp_set_ssn_valid_flag(struct hal_reo_cmd_params * params,uint8_t valid)121*5113495bSYour Name dp_set_ssn_valid_flag(struct hal_reo_cmd_params *params,
122*5113495bSYour Name 		      uint8_t valid)
123*5113495bSYour Name {
124*5113495bSYour Name 	params->u.upd_queue_params.update_svld = 1;
125*5113495bSYour Name 	params->u.upd_queue_params.svld = valid;
126*5113495bSYour Name 	dp_peer_debug("Setting SSN valid bit to %d",
127*5113495bSYour Name 		      valid);
128*5113495bSYour Name }
129*5113495bSYour Name 
130*5113495bSYour Name #ifdef IPA_OFFLOAD
dp_peer_update_tid_stats_from_reo(struct dp_soc * soc,void * cb_ctxt,union hal_reo_status * reo_status)131*5113495bSYour Name void dp_peer_update_tid_stats_from_reo(struct dp_soc *soc, void *cb_ctxt,
132*5113495bSYour Name 				       union hal_reo_status *reo_status)
133*5113495bSYour Name {
134*5113495bSYour Name 	struct dp_peer *peer = NULL;
135*5113495bSYour Name 	struct dp_rx_tid *rx_tid = NULL;
136*5113495bSYour Name 	unsigned long comb_peer_id_tid;
137*5113495bSYour Name 	struct hal_reo_queue_status *queue_status = &reo_status->queue_status;
138*5113495bSYour Name 	uint16_t tid;
139*5113495bSYour Name 	uint16_t peer_id;
140*5113495bSYour Name 
141*5113495bSYour Name 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
142*5113495bSYour Name 		dp_err("REO stats failure %d",
143*5113495bSYour Name 		       queue_status->header.status);
144*5113495bSYour Name 		return;
145*5113495bSYour Name 	}
146*5113495bSYour Name 	comb_peer_id_tid = (unsigned long)cb_ctxt;
147*5113495bSYour Name 	tid = DP_PEER_GET_REO_STATS_TID(comb_peer_id_tid);
148*5113495bSYour Name 	peer_id = DP_PEER_GET_REO_STATS_PEER_ID(comb_peer_id_tid);
149*5113495bSYour Name 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_GENERIC_STATS);
150*5113495bSYour Name 	if (!peer)
151*5113495bSYour Name 		return;
152*5113495bSYour Name 	rx_tid  = &peer->rx_tid[tid];
153*5113495bSYour Name 
154*5113495bSYour Name 	if (!rx_tid) {
155*5113495bSYour Name 		dp_peer_unref_delete(peer, DP_MOD_ID_GENERIC_STATS);
156*5113495bSYour Name 		return;
157*5113495bSYour Name 	}
158*5113495bSYour Name 
159*5113495bSYour Name 	rx_tid->rx_msdu_cnt.bytes += queue_status->total_cnt;
160*5113495bSYour Name 	rx_tid->rx_msdu_cnt.num += queue_status->msdu_frms_cnt;
161*5113495bSYour Name 	dp_peer_unref_delete(peer, DP_MOD_ID_GENERIC_STATS);
162*5113495bSYour Name }
163*5113495bSYour Name 
164*5113495bSYour Name qdf_export_symbol(dp_peer_update_tid_stats_from_reo);
165*5113495bSYour Name #endif
166*5113495bSYour Name 
dp_rx_tid_stats_cb(struct dp_soc * soc,void * cb_ctxt,union hal_reo_status * reo_status)167*5113495bSYour Name void dp_rx_tid_stats_cb(struct dp_soc *soc, void *cb_ctxt,
168*5113495bSYour Name 			union hal_reo_status *reo_status)
169*5113495bSYour Name {
170*5113495bSYour Name 	struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
171*5113495bSYour Name 	struct hal_reo_queue_status *queue_status = &reo_status->queue_status;
172*5113495bSYour Name 
173*5113495bSYour Name 	if (queue_status->header.status == HAL_REO_CMD_DRAIN)
174*5113495bSYour Name 		return;
175*5113495bSYour Name 
176*5113495bSYour Name 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
177*5113495bSYour Name 		DP_PRINT_STATS("REO stats failure %d for TID %d",
178*5113495bSYour Name 			       queue_status->header.status, rx_tid->tid);
179*5113495bSYour Name 		return;
180*5113495bSYour Name 	}
181*5113495bSYour Name 
182*5113495bSYour Name 	DP_PRINT_STATS("REO queue stats (TID: %d):\n"
183*5113495bSYour Name 		       "ssn: %d\n"
184*5113495bSYour Name 		       "curr_idx  : %d\n"
185*5113495bSYour Name 		       "pn_31_0   : %08x\n"
186*5113495bSYour Name 		       "pn_63_32  : %08x\n"
187*5113495bSYour Name 		       "pn_95_64  : %08x\n"
188*5113495bSYour Name 		       "pn_127_96 : %08x\n"
189*5113495bSYour Name 		       "last_rx_enq_tstamp : %08x\n"
190*5113495bSYour Name 		       "last_rx_deq_tstamp : %08x\n"
191*5113495bSYour Name 		       "rx_bitmap_31_0     : %08x\n"
192*5113495bSYour Name 		       "rx_bitmap_63_32    : %08x\n"
193*5113495bSYour Name 		       "rx_bitmap_95_64    : %08x\n"
194*5113495bSYour Name 		       "rx_bitmap_127_96   : %08x\n"
195*5113495bSYour Name 		       "rx_bitmap_159_128  : %08x\n"
196*5113495bSYour Name 		       "rx_bitmap_191_160  : %08x\n"
197*5113495bSYour Name 		       "rx_bitmap_223_192  : %08x\n"
198*5113495bSYour Name 		       "rx_bitmap_255_224  : %08x\n",
199*5113495bSYour Name 		       rx_tid->tid,
200*5113495bSYour Name 		       queue_status->ssn, queue_status->curr_idx,
201*5113495bSYour Name 		       queue_status->pn_31_0, queue_status->pn_63_32,
202*5113495bSYour Name 		       queue_status->pn_95_64, queue_status->pn_127_96,
203*5113495bSYour Name 		       queue_status->last_rx_enq_tstamp,
204*5113495bSYour Name 		       queue_status->last_rx_deq_tstamp,
205*5113495bSYour Name 		       queue_status->rx_bitmap_31_0,
206*5113495bSYour Name 		       queue_status->rx_bitmap_63_32,
207*5113495bSYour Name 		       queue_status->rx_bitmap_95_64,
208*5113495bSYour Name 		       queue_status->rx_bitmap_127_96,
209*5113495bSYour Name 		       queue_status->rx_bitmap_159_128,
210*5113495bSYour Name 		       queue_status->rx_bitmap_191_160,
211*5113495bSYour Name 		       queue_status->rx_bitmap_223_192,
212*5113495bSYour Name 		       queue_status->rx_bitmap_255_224);
213*5113495bSYour Name 
214*5113495bSYour Name 	DP_PRINT_STATS(
215*5113495bSYour Name 		       "curr_mpdu_cnt      : %d\n"
216*5113495bSYour Name 		       "curr_msdu_cnt      : %d\n"
217*5113495bSYour Name 		       "fwd_timeout_cnt    : %d\n"
218*5113495bSYour Name 		       "fwd_bar_cnt        : %d\n"
219*5113495bSYour Name 		       "dup_cnt            : %d\n"
220*5113495bSYour Name 		       "frms_in_order_cnt  : %d\n"
221*5113495bSYour Name 		       "bar_rcvd_cnt       : %d\n"
222*5113495bSYour Name 		       "mpdu_frms_cnt      : %d\n"
223*5113495bSYour Name 		       "msdu_frms_cnt      : %d\n"
224*5113495bSYour Name 		       "total_byte_cnt     : %d\n"
225*5113495bSYour Name 		       "late_recv_mpdu_cnt : %d\n"
226*5113495bSYour Name 		       "win_jump_2k        : %d\n"
227*5113495bSYour Name 		       "hole_cnt           : %d\n",
228*5113495bSYour Name 		       queue_status->curr_mpdu_cnt,
229*5113495bSYour Name 		       queue_status->curr_msdu_cnt,
230*5113495bSYour Name 		       queue_status->fwd_timeout_cnt,
231*5113495bSYour Name 		       queue_status->fwd_bar_cnt,
232*5113495bSYour Name 		       queue_status->dup_cnt,
233*5113495bSYour Name 		       queue_status->frms_in_order_cnt,
234*5113495bSYour Name 		       queue_status->bar_rcvd_cnt,
235*5113495bSYour Name 		       queue_status->mpdu_frms_cnt,
236*5113495bSYour Name 		       queue_status->msdu_frms_cnt,
237*5113495bSYour Name 		       queue_status->total_cnt,
238*5113495bSYour Name 		       queue_status->late_recv_mpdu_cnt,
239*5113495bSYour Name 		       queue_status->win_jump_2k,
240*5113495bSYour Name 		       queue_status->hole_cnt);
241*5113495bSYour Name 
242*5113495bSYour Name 	DP_PRINT_STATS("Addba Req          : %d\n"
243*5113495bSYour Name 			"Addba Resp         : %d\n"
244*5113495bSYour Name 			"Addba Resp success : %d\n"
245*5113495bSYour Name 			"Addba Resp failed  : %d\n"
246*5113495bSYour Name 			"Delba Req received : %d\n"
247*5113495bSYour Name 			"Delba Tx success   : %d\n"
248*5113495bSYour Name 			"Delba Tx Fail      : %d\n"
249*5113495bSYour Name 			"BA window size     : %d\n"
250*5113495bSYour Name 			"Pn size            : %d\n",
251*5113495bSYour Name 			rx_tid->num_of_addba_req,
252*5113495bSYour Name 			rx_tid->num_of_addba_resp,
253*5113495bSYour Name 			rx_tid->num_addba_rsp_success,
254*5113495bSYour Name 			rx_tid->num_addba_rsp_failed,
255*5113495bSYour Name 			rx_tid->num_of_delba_req,
256*5113495bSYour Name 			rx_tid->delba_tx_success_cnt,
257*5113495bSYour Name 			rx_tid->delba_tx_fail_cnt,
258*5113495bSYour Name 			rx_tid->ba_win_size,
259*5113495bSYour Name 			rx_tid->pn_size);
260*5113495bSYour Name }
261*5113495bSYour Name 
dp_rx_tid_update_cb(struct dp_soc * soc,void * cb_ctxt,union hal_reo_status * reo_status)262*5113495bSYour Name static void dp_rx_tid_update_cb(struct dp_soc *soc, void *cb_ctxt,
263*5113495bSYour Name 				union hal_reo_status *reo_status)
264*5113495bSYour Name {
265*5113495bSYour Name 	struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
266*5113495bSYour Name 
267*5113495bSYour Name 	if ((reo_status->rx_queue_status.header.status !=
268*5113495bSYour Name 		HAL_REO_CMD_SUCCESS) &&
269*5113495bSYour Name 		(reo_status->rx_queue_status.header.status !=
270*5113495bSYour Name 		HAL_REO_CMD_DRAIN)) {
271*5113495bSYour Name 		/* Should not happen normally. Just print error for now */
272*5113495bSYour Name 		dp_peer_err("%pK: Rx tid HW desc update failed(%d): tid %d",
273*5113495bSYour Name 			    soc, reo_status->rx_queue_status.header.status,
274*5113495bSYour Name 			    rx_tid->tid);
275*5113495bSYour Name 	}
276*5113495bSYour Name }
277*5113495bSYour Name 
dp_get_peer_vdev_roaming_in_progress(struct dp_peer * peer)278*5113495bSYour Name bool dp_get_peer_vdev_roaming_in_progress(struct dp_peer *peer)
279*5113495bSYour Name {
280*5113495bSYour Name 	struct ol_if_ops *ol_ops = NULL;
281*5113495bSYour Name 	bool is_roaming = false;
282*5113495bSYour Name 	uint8_t vdev_id = -1;
283*5113495bSYour Name 	struct cdp_soc_t *soc;
284*5113495bSYour Name 
285*5113495bSYour Name 	if (!peer) {
286*5113495bSYour Name 		dp_peer_info("Peer is NULL. No roaming possible");
287*5113495bSYour Name 		return false;
288*5113495bSYour Name 	}
289*5113495bSYour Name 
290*5113495bSYour Name 	soc = dp_soc_to_cdp_soc_t(peer->vdev->pdev->soc);
291*5113495bSYour Name 	ol_ops = peer->vdev->pdev->soc->cdp_soc.ol_ops;
292*5113495bSYour Name 
293*5113495bSYour Name 	if (ol_ops && ol_ops->is_roam_inprogress) {
294*5113495bSYour Name 		dp_get_vdevid(soc, peer->mac_addr.raw, &vdev_id);
295*5113495bSYour Name 		is_roaming = ol_ops->is_roam_inprogress(vdev_id);
296*5113495bSYour Name 	}
297*5113495bSYour Name 
298*5113495bSYour Name 	dp_peer_info("peer: " QDF_MAC_ADDR_FMT ", vdev_id: %d, is_roaming: %d",
299*5113495bSYour Name 		     QDF_MAC_ADDR_REF(peer->mac_addr.raw), vdev_id, is_roaming);
300*5113495bSYour Name 
301*5113495bSYour Name 	return is_roaming;
302*5113495bSYour Name }
303*5113495bSYour Name 
304*5113495bSYour Name #ifdef WLAN_FEATURE_11BE_MLO
305*5113495bSYour Name /**
306*5113495bSYour Name  * dp_rx_tid_setup_allow() - check if rx_tid and reo queue desc
307*5113495bSYour Name  *			     setup is necessary
308*5113495bSYour Name  * @peer: DP peer handle
309*5113495bSYour Name  *
310*5113495bSYour Name  * Return: true - allow, false - disallow
311*5113495bSYour Name  */
312*5113495bSYour Name static inline
dp_rx_tid_setup_allow(struct dp_peer * peer)313*5113495bSYour Name bool dp_rx_tid_setup_allow(struct dp_peer *peer)
314*5113495bSYour Name {
315*5113495bSYour Name 	if (IS_MLO_DP_LINK_PEER(peer) && !peer->first_link)
316*5113495bSYour Name 		return false;
317*5113495bSYour Name 
318*5113495bSYour Name 	return true;
319*5113495bSYour Name }
320*5113495bSYour Name 
321*5113495bSYour Name /**
322*5113495bSYour Name  * dp_rx_tid_update_allow() - check if rx_tid update needed
323*5113495bSYour Name  * @peer: DP peer handle
324*5113495bSYour Name  *
325*5113495bSYour Name  * Return: true - allow, false - disallow
326*5113495bSYour Name  */
327*5113495bSYour Name static inline
dp_rx_tid_update_allow(struct dp_peer * peer)328*5113495bSYour Name bool dp_rx_tid_update_allow(struct dp_peer *peer)
329*5113495bSYour Name {
330*5113495bSYour Name 	/* not as expected for MLO connection link peer */
331*5113495bSYour Name 	if (IS_MLO_DP_LINK_PEER(peer)) {
332*5113495bSYour Name 		QDF_BUG(0);
333*5113495bSYour Name 		return false;
334*5113495bSYour Name 	}
335*5113495bSYour Name 
336*5113495bSYour Name 	return true;
337*5113495bSYour Name }
338*5113495bSYour Name #else
339*5113495bSYour Name static inline
dp_rx_tid_setup_allow(struct dp_peer * peer)340*5113495bSYour Name bool dp_rx_tid_setup_allow(struct dp_peer *peer)
341*5113495bSYour Name {
342*5113495bSYour Name 	return true;
343*5113495bSYour Name }
344*5113495bSYour Name 
345*5113495bSYour Name static inline
dp_rx_tid_update_allow(struct dp_peer * peer)346*5113495bSYour Name bool dp_rx_tid_update_allow(struct dp_peer *peer)
347*5113495bSYour Name {
348*5113495bSYour Name 	return true;
349*5113495bSYour Name }
350*5113495bSYour Name #endif
351*5113495bSYour Name 
352*5113495bSYour Name QDF_STATUS
dp_rx_tid_update_wifi3(struct dp_peer * peer,int tid,uint32_t ba_window_size,uint32_t start_seq,bool bar_update)353*5113495bSYour Name dp_rx_tid_update_wifi3(struct dp_peer *peer, int tid, uint32_t ba_window_size,
354*5113495bSYour Name 		       uint32_t start_seq, bool bar_update)
355*5113495bSYour Name {
356*5113495bSYour Name 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
357*5113495bSYour Name 	struct dp_soc *soc = peer->vdev->pdev->soc;
358*5113495bSYour Name 	struct hal_reo_cmd_params params;
359*5113495bSYour Name 
360*5113495bSYour Name 	if (!dp_rx_tid_update_allow(peer)) {
361*5113495bSYour Name 		dp_peer_err("skip tid update for peer:" QDF_MAC_ADDR_FMT,
362*5113495bSYour Name 			    QDF_MAC_ADDR_REF(peer->mac_addr.raw));
363*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
364*5113495bSYour Name 	}
365*5113495bSYour Name 
366*5113495bSYour Name 	qdf_mem_zero(&params, sizeof(params));
367*5113495bSYour Name 
368*5113495bSYour Name 	params.std.need_status = 1;
369*5113495bSYour Name 	params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
370*5113495bSYour Name 	params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
371*5113495bSYour Name 	params.u.upd_queue_params.update_ba_window_size = 1;
372*5113495bSYour Name 	params.u.upd_queue_params.ba_window_size = ba_window_size;
373*5113495bSYour Name 
374*5113495bSYour Name 	if (start_seq < IEEE80211_SEQ_MAX) {
375*5113495bSYour Name 		params.u.upd_queue_params.update_ssn = 1;
376*5113495bSYour Name 		params.u.upd_queue_params.ssn = start_seq;
377*5113495bSYour Name 	} else {
378*5113495bSYour Name 	    dp_set_ssn_valid_flag(&params, 0);
379*5113495bSYour Name 	}
380*5113495bSYour Name 
381*5113495bSYour Name 	if (dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params,
382*5113495bSYour Name 			    dp_rx_tid_update_cb, rx_tid)) {
383*5113495bSYour Name 		dp_err_log("failed to send reo cmd CMD_UPDATE_RX_REO_QUEUE");
384*5113495bSYour Name 		DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
385*5113495bSYour Name 	}
386*5113495bSYour Name 
387*5113495bSYour Name 	rx_tid->ba_win_size = ba_window_size;
388*5113495bSYour Name 
389*5113495bSYour Name 	if (dp_get_peer_vdev_roaming_in_progress(peer))
390*5113495bSYour Name 		return QDF_STATUS_E_PERM;
391*5113495bSYour Name 
392*5113495bSYour Name 	if (!bar_update)
393*5113495bSYour Name 		dp_peer_rx_reorder_queue_setup(soc, peer,
394*5113495bSYour Name 					       BIT(tid), ba_window_size);
395*5113495bSYour Name 
396*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
397*5113495bSYour Name }
398*5113495bSYour Name 
399*5113495bSYour Name #ifdef WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY
400*5113495bSYour Name /**
401*5113495bSYour Name  * dp_reo_desc_defer_free_enqueue() - enqueue REO QDESC to be freed into
402*5113495bSYour Name  *                                    the deferred list
403*5113495bSYour Name  * @soc: Datapath soc handle
404*5113495bSYour Name  * @freedesc: REO DESC reference that needs to be freed
405*5113495bSYour Name  *
406*5113495bSYour Name  * Return: true if enqueued, else false
407*5113495bSYour Name  */
dp_reo_desc_defer_free_enqueue(struct dp_soc * soc,struct reo_desc_list_node * freedesc)408*5113495bSYour Name static bool dp_reo_desc_defer_free_enqueue(struct dp_soc *soc,
409*5113495bSYour Name 					   struct reo_desc_list_node *freedesc)
410*5113495bSYour Name {
411*5113495bSYour Name 	struct reo_desc_deferred_freelist_node *desc;
412*5113495bSYour Name 
413*5113495bSYour Name 	if (!qdf_atomic_read(&soc->cmn_init_done))
414*5113495bSYour Name 		return false;
415*5113495bSYour Name 
416*5113495bSYour Name 	desc = qdf_mem_malloc(sizeof(*desc));
417*5113495bSYour Name 	if (!desc)
418*5113495bSYour Name 		return false;
419*5113495bSYour Name 
420*5113495bSYour Name 	desc->hw_qdesc_paddr = freedesc->rx_tid.hw_qdesc_paddr;
421*5113495bSYour Name 	desc->hw_qdesc_alloc_size = freedesc->rx_tid.hw_qdesc_alloc_size;
422*5113495bSYour Name 	desc->hw_qdesc_vaddr_unaligned =
423*5113495bSYour Name 			freedesc->rx_tid.hw_qdesc_vaddr_unaligned;
424*5113495bSYour Name 	desc->free_ts = qdf_get_system_timestamp();
425*5113495bSYour Name 	DP_RX_REO_QDESC_DEFERRED_GET_MAC(desc, freedesc);
426*5113495bSYour Name 
427*5113495bSYour Name 	qdf_spin_lock_bh(&soc->reo_desc_deferred_freelist_lock);
428*5113495bSYour Name 	if (!soc->reo_desc_deferred_freelist_init) {
429*5113495bSYour Name 		qdf_mem_free(desc);
430*5113495bSYour Name 		qdf_spin_unlock_bh(&soc->reo_desc_deferred_freelist_lock);
431*5113495bSYour Name 		return false;
432*5113495bSYour Name 	}
433*5113495bSYour Name 	qdf_list_insert_back(&soc->reo_desc_deferred_freelist,
434*5113495bSYour Name 			     (qdf_list_node_t *)desc);
435*5113495bSYour Name 	qdf_spin_unlock_bh(&soc->reo_desc_deferred_freelist_lock);
436*5113495bSYour Name 
437*5113495bSYour Name 	return true;
438*5113495bSYour Name }
439*5113495bSYour Name 
440*5113495bSYour Name /**
441*5113495bSYour Name  * dp_reo_desc_defer_free() - free the REO QDESC in the deferred list
442*5113495bSYour Name  *                            based on time threshold
443*5113495bSYour Name  * @soc: Datapath soc handle
444*5113495bSYour Name  *
445*5113495bSYour Name  * Return: true if enqueued, else false
446*5113495bSYour Name  */
dp_reo_desc_defer_free(struct dp_soc * soc)447*5113495bSYour Name static void dp_reo_desc_defer_free(struct dp_soc *soc)
448*5113495bSYour Name {
449*5113495bSYour Name 	struct reo_desc_deferred_freelist_node *desc;
450*5113495bSYour Name 	unsigned long curr_ts = qdf_get_system_timestamp();
451*5113495bSYour Name 
452*5113495bSYour Name 	qdf_spin_lock_bh(&soc->reo_desc_deferred_freelist_lock);
453*5113495bSYour Name 
454*5113495bSYour Name 	while ((qdf_list_peek_front(&soc->reo_desc_deferred_freelist,
455*5113495bSYour Name 	       (qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) &&
456*5113495bSYour Name 	       (curr_ts > (desc->free_ts + REO_DESC_DEFERRED_FREE_MS))) {
457*5113495bSYour Name 		qdf_list_remove_front(&soc->reo_desc_deferred_freelist,
458*5113495bSYour Name 				      (qdf_list_node_t **)&desc);
459*5113495bSYour Name 
460*5113495bSYour Name 		DP_RX_REO_QDESC_DEFERRED_FREE_EVT(desc);
461*5113495bSYour Name 
462*5113495bSYour Name 		qdf_mem_unmap_nbytes_single(soc->osdev,
463*5113495bSYour Name 					    desc->hw_qdesc_paddr,
464*5113495bSYour Name 					    QDF_DMA_BIDIRECTIONAL,
465*5113495bSYour Name 					    desc->hw_qdesc_alloc_size);
466*5113495bSYour Name 		qdf_mem_free(desc->hw_qdesc_vaddr_unaligned);
467*5113495bSYour Name 		qdf_mem_free(desc);
468*5113495bSYour Name 
469*5113495bSYour Name 		curr_ts = qdf_get_system_timestamp();
470*5113495bSYour Name 	}
471*5113495bSYour Name 
472*5113495bSYour Name 	qdf_spin_unlock_bh(&soc->reo_desc_deferred_freelist_lock);
473*5113495bSYour Name }
474*5113495bSYour Name #else
475*5113495bSYour Name static inline bool
dp_reo_desc_defer_free_enqueue(struct dp_soc * soc,struct reo_desc_list_node * freedesc)476*5113495bSYour Name dp_reo_desc_defer_free_enqueue(struct dp_soc *soc,
477*5113495bSYour Name 			       struct reo_desc_list_node *freedesc)
478*5113495bSYour Name {
479*5113495bSYour Name 	return false;
480*5113495bSYour Name }
481*5113495bSYour Name 
dp_reo_desc_defer_free(struct dp_soc * soc)482*5113495bSYour Name static void dp_reo_desc_defer_free(struct dp_soc *soc)
483*5113495bSYour Name {
484*5113495bSYour Name }
485*5113495bSYour Name #endif /* !WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY */
486*5113495bSYour Name 
check_free_list_for_invalid_flush(struct dp_soc * soc)487*5113495bSYour Name void check_free_list_for_invalid_flush(struct dp_soc *soc)
488*5113495bSYour Name {
489*5113495bSYour Name 	uint32_t i;
490*5113495bSYour Name 	uint32_t *addr_deref_val;
491*5113495bSYour Name 	unsigned long curr_ts = qdf_get_system_timestamp();
492*5113495bSYour Name 	uint32_t max_list_size;
493*5113495bSYour Name 
494*5113495bSYour Name 	max_list_size = soc->wlan_cfg_ctx->qref_control_size;
495*5113495bSYour Name 
496*5113495bSYour Name 	if (max_list_size == 0)
497*5113495bSYour Name 		return;
498*5113495bSYour Name 
499*5113495bSYour Name 	for (i = 0; i < soc->free_addr_list_idx; i++) {
500*5113495bSYour Name 		addr_deref_val = (uint32_t *)
501*5113495bSYour Name 			    soc->list_qdesc_addr_free[i].hw_qdesc_vaddr_unalign;
502*5113495bSYour Name 
503*5113495bSYour Name 		if (*addr_deref_val == 0xDDBEEF84 ||
504*5113495bSYour Name 		    *addr_deref_val == 0xADBEEF84 ||
505*5113495bSYour Name 		    *addr_deref_val == 0xBDBEEF84 ||
506*5113495bSYour Name 		    *addr_deref_val == 0xCDBEEF84) {
507*5113495bSYour Name 			if (soc->list_qdesc_addr_free[i].ts_hw_flush_back == 0)
508*5113495bSYour Name 				soc->list_qdesc_addr_free[i].ts_hw_flush_back =
509*5113495bSYour Name 									curr_ts;
510*5113495bSYour Name 		}
511*5113495bSYour Name 	}
512*5113495bSYour Name }
513*5113495bSYour Name 
514*5113495bSYour Name /**
515*5113495bSYour Name  * dp_reo_desc_free() - Callback free reo descriptor memory after
516*5113495bSYour Name  * HW cache flush
517*5113495bSYour Name  *
518*5113495bSYour Name  * @soc: DP SOC handle
519*5113495bSYour Name  * @cb_ctxt: Callback context
520*5113495bSYour Name  * @reo_status: REO command status
521*5113495bSYour Name  */
dp_reo_desc_free(struct dp_soc * soc,void * cb_ctxt,union hal_reo_status * reo_status)522*5113495bSYour Name static void dp_reo_desc_free(struct dp_soc *soc, void *cb_ctxt,
523*5113495bSYour Name 			     union hal_reo_status *reo_status)
524*5113495bSYour Name {
525*5113495bSYour Name 	struct reo_desc_list_node *freedesc =
526*5113495bSYour Name 		(struct reo_desc_list_node *)cb_ctxt;
527*5113495bSYour Name 	struct dp_rx_tid *rx_tid = &freedesc->rx_tid;
528*5113495bSYour Name 	unsigned long curr_ts = qdf_get_system_timestamp();
529*5113495bSYour Name 
530*5113495bSYour Name 	if ((reo_status->fl_cache_status.header.status !=
531*5113495bSYour Name 		HAL_REO_CMD_SUCCESS) &&
532*5113495bSYour Name 		(reo_status->fl_cache_status.header.status !=
533*5113495bSYour Name 		HAL_REO_CMD_DRAIN)) {
534*5113495bSYour Name 		dp_peer_err("%pK: Rx tid HW desc flush failed(%d): tid %d",
535*5113495bSYour Name 			    soc, reo_status->rx_queue_status.header.status,
536*5113495bSYour Name 			    freedesc->rx_tid.tid);
537*5113495bSYour Name 	}
538*5113495bSYour Name 	dp_peer_info("%pK: %lu hw_qdesc_paddr: %pK, tid:%d", soc,
539*5113495bSYour Name 		     curr_ts, (void *)(rx_tid->hw_qdesc_paddr),
540*5113495bSYour Name 		     rx_tid->tid);
541*5113495bSYour Name 
542*5113495bSYour Name 	/* REO desc is enqueued to be freed at a later point
543*5113495bSYour Name 	 * in time, just free the freedesc alone and return
544*5113495bSYour Name 	 */
545*5113495bSYour Name 	if (dp_reo_desc_defer_free_enqueue(soc, freedesc))
546*5113495bSYour Name 		goto out;
547*5113495bSYour Name 
548*5113495bSYour Name 	DP_RX_REO_QDESC_FREE_EVT(freedesc);
549*5113495bSYour Name 	add_entry_free_list(soc, rx_tid);
550*5113495bSYour Name 
551*5113495bSYour Name 	hal_reo_shared_qaddr_cache_clear(soc->hal_soc);
552*5113495bSYour Name 	qdf_mem_unmap_nbytes_single(soc->osdev,
553*5113495bSYour Name 				    rx_tid->hw_qdesc_paddr,
554*5113495bSYour Name 				    QDF_DMA_BIDIRECTIONAL,
555*5113495bSYour Name 				    rx_tid->hw_qdesc_alloc_size);
556*5113495bSYour Name 	check_free_list_for_invalid_flush(soc);
557*5113495bSYour Name 
558*5113495bSYour Name 	*(uint32_t *)rx_tid->hw_qdesc_vaddr_unaligned = 0;
559*5113495bSYour Name 	qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
560*5113495bSYour Name out:
561*5113495bSYour Name 	qdf_mem_free(freedesc);
562*5113495bSYour Name }
563*5113495bSYour Name 
564*5113495bSYour Name #if defined(CONFIG_WIFI_EMULATION_WIFI_3_0) && defined(BUILD_X86)
565*5113495bSYour Name /* Hawkeye emulation requires bus address to be >= 0x50000000 */
dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)566*5113495bSYour Name static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)
567*5113495bSYour Name {
568*5113495bSYour Name 	if (dma_addr < 0x50000000)
569*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
570*5113495bSYour Name 	else
571*5113495bSYour Name 		return QDF_STATUS_SUCCESS;
572*5113495bSYour Name }
573*5113495bSYour Name #else
dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)574*5113495bSYour Name static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)
575*5113495bSYour Name {
576*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
577*5113495bSYour Name }
578*5113495bSYour Name #endif
579*5113495bSYour Name 
580*5113495bSYour Name static inline void
dp_rx_tid_setup_error_process(uint32_t tid_bitmap,struct dp_peer * peer)581*5113495bSYour Name dp_rx_tid_setup_error_process(uint32_t tid_bitmap, struct dp_peer *peer)
582*5113495bSYour Name {
583*5113495bSYour Name 	struct dp_rx_tid *rx_tid;
584*5113495bSYour Name 	int tid;
585*5113495bSYour Name 	struct dp_soc *soc = peer->vdev->pdev->soc;
586*5113495bSYour Name 
587*5113495bSYour Name 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
588*5113495bSYour Name 		if (!(BIT(tid) & tid_bitmap))
589*5113495bSYour Name 			continue;
590*5113495bSYour Name 
591*5113495bSYour Name 		rx_tid = &peer->rx_tid[tid];
592*5113495bSYour Name 		if (!rx_tid->hw_qdesc_vaddr_unaligned)
593*5113495bSYour Name 			continue;
594*5113495bSYour Name 
595*5113495bSYour Name 		if (dp_reo_desc_addr_chk(rx_tid->hw_qdesc_paddr) ==
596*5113495bSYour Name 		    QDF_STATUS_SUCCESS)
597*5113495bSYour Name 			qdf_mem_unmap_nbytes_single(
598*5113495bSYour Name 				soc->osdev,
599*5113495bSYour Name 				rx_tid->hw_qdesc_paddr,
600*5113495bSYour Name 				QDF_DMA_BIDIRECTIONAL,
601*5113495bSYour Name 				rx_tid->hw_qdesc_alloc_size);
602*5113495bSYour Name 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
603*5113495bSYour Name 		rx_tid->hw_qdesc_vaddr_unaligned = NULL;
604*5113495bSYour Name 		rx_tid->hw_qdesc_paddr = 0;
605*5113495bSYour Name 	}
606*5113495bSYour Name }
607*5113495bSYour Name 
608*5113495bSYour Name static QDF_STATUS
dp_single_rx_tid_setup(struct dp_peer * peer,int tid,uint32_t ba_window_size,uint32_t start_seq)609*5113495bSYour Name dp_single_rx_tid_setup(struct dp_peer *peer, int tid,
610*5113495bSYour Name 		       uint32_t ba_window_size, uint32_t start_seq)
611*5113495bSYour Name {
612*5113495bSYour Name 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
613*5113495bSYour Name 	struct dp_vdev *vdev = peer->vdev;
614*5113495bSYour Name 	struct dp_soc *soc = vdev->pdev->soc;
615*5113495bSYour Name 	uint32_t hw_qdesc_size;
616*5113495bSYour Name 	uint32_t hw_qdesc_align;
617*5113495bSYour Name 	int hal_pn_type;
618*5113495bSYour Name 	void *hw_qdesc_vaddr;
619*5113495bSYour Name 	uint32_t alloc_tries = 0, ret;
620*5113495bSYour Name 	QDF_STATUS status = QDF_STATUS_SUCCESS;
621*5113495bSYour Name 	struct dp_txrx_peer *txrx_peer;
622*5113495bSYour Name 
623*5113495bSYour Name 	rx_tid->delba_tx_status = 0;
624*5113495bSYour Name 	rx_tid->ppdu_id_2k = 0;
625*5113495bSYour Name 	rx_tid->num_of_addba_req = 0;
626*5113495bSYour Name 	rx_tid->num_of_delba_req = 0;
627*5113495bSYour Name 	rx_tid->num_of_addba_resp = 0;
628*5113495bSYour Name 	rx_tid->num_addba_rsp_failed = 0;
629*5113495bSYour Name 	rx_tid->num_addba_rsp_success = 0;
630*5113495bSYour Name 	rx_tid->delba_tx_success_cnt = 0;
631*5113495bSYour Name 	rx_tid->delba_tx_fail_cnt = 0;
632*5113495bSYour Name 	rx_tid->statuscode = 0;
633*5113495bSYour Name 
634*5113495bSYour Name 	/* TODO: Allocating HW queue descriptors based on max BA window size
635*5113495bSYour Name 	 * for all QOS TIDs so that same descriptor can be used later when
636*5113495bSYour Name 	 * ADDBA request is received. This should be changed to allocate HW
637*5113495bSYour Name 	 * queue descriptors based on BA window size being negotiated (0 for
638*5113495bSYour Name 	 * non BA cases), and reallocate when BA window size changes and also
639*5113495bSYour Name 	 * send WMI message to FW to change the REO queue descriptor in Rx
640*5113495bSYour Name 	 * peer entry as part of dp_rx_tid_update.
641*5113495bSYour Name 	 */
642*5113495bSYour Name 	hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc,
643*5113495bSYour Name 					       ba_window_size, tid);
644*5113495bSYour Name 
645*5113495bSYour Name 	hw_qdesc_align = hal_get_reo_qdesc_align(soc->hal_soc);
646*5113495bSYour Name 	/* To avoid unnecessary extra allocation for alignment, try allocating
647*5113495bSYour Name 	 * exact size and see if we already have aligned address.
648*5113495bSYour Name 	 */
649*5113495bSYour Name 	rx_tid->hw_qdesc_alloc_size = hw_qdesc_size;
650*5113495bSYour Name 
651*5113495bSYour Name try_desc_alloc:
652*5113495bSYour Name 	rx_tid->hw_qdesc_vaddr_unaligned =
653*5113495bSYour Name 		qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size);
654*5113495bSYour Name 
655*5113495bSYour Name 	if (!rx_tid->hw_qdesc_vaddr_unaligned) {
656*5113495bSYour Name 		dp_peer_err("%pK: Rx tid HW desc alloc failed: tid %d",
657*5113495bSYour Name 			    soc, tid);
658*5113495bSYour Name 		return QDF_STATUS_E_NOMEM;
659*5113495bSYour Name 	}
660*5113495bSYour Name 
661*5113495bSYour Name 	if ((unsigned long)(rx_tid->hw_qdesc_vaddr_unaligned) %
662*5113495bSYour Name 		hw_qdesc_align) {
663*5113495bSYour Name 		/* Address allocated above is not aligned. Allocate extra
664*5113495bSYour Name 		 * memory for alignment
665*5113495bSYour Name 		 */
666*5113495bSYour Name 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
667*5113495bSYour Name 		rx_tid->hw_qdesc_vaddr_unaligned =
668*5113495bSYour Name 			qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size +
669*5113495bSYour Name 					hw_qdesc_align - 1);
670*5113495bSYour Name 
671*5113495bSYour Name 		if (!rx_tid->hw_qdesc_vaddr_unaligned) {
672*5113495bSYour Name 			dp_peer_err("%pK: Rx tid HW desc alloc failed: tid %d",
673*5113495bSYour Name 				    soc, tid);
674*5113495bSYour Name 			return QDF_STATUS_E_NOMEM;
675*5113495bSYour Name 		}
676*5113495bSYour Name 
677*5113495bSYour Name 		hw_qdesc_vaddr = (void *)qdf_align((unsigned long)
678*5113495bSYour Name 			rx_tid->hw_qdesc_vaddr_unaligned,
679*5113495bSYour Name 			hw_qdesc_align);
680*5113495bSYour Name 
681*5113495bSYour Name 		dp_peer_debug("%pK: Total Size %d Aligned Addr %pK",
682*5113495bSYour Name 			      soc, rx_tid->hw_qdesc_alloc_size,
683*5113495bSYour Name 			      hw_qdesc_vaddr);
684*5113495bSYour Name 
685*5113495bSYour Name 	} else {
686*5113495bSYour Name 		hw_qdesc_vaddr = rx_tid->hw_qdesc_vaddr_unaligned;
687*5113495bSYour Name 	}
688*5113495bSYour Name 	rx_tid->hw_qdesc_vaddr_aligned = hw_qdesc_vaddr;
689*5113495bSYour Name 
690*5113495bSYour Name 	txrx_peer = dp_get_txrx_peer(peer);
691*5113495bSYour Name 
692*5113495bSYour Name 	/* TODO: Ensure that sec_type is set before ADDBA is received.
693*5113495bSYour Name 	 * Currently this is set based on htt indication
694*5113495bSYour Name 	 * HTT_T2H_MSG_TYPE_SEC_IND from target
695*5113495bSYour Name 	 */
696*5113495bSYour Name 	switch (txrx_peer->security[dp_sec_ucast].sec_type) {
697*5113495bSYour Name 	case cdp_sec_type_tkip_nomic:
698*5113495bSYour Name 	case cdp_sec_type_aes_ccmp:
699*5113495bSYour Name 	case cdp_sec_type_aes_ccmp_256:
700*5113495bSYour Name 	case cdp_sec_type_aes_gcmp:
701*5113495bSYour Name 	case cdp_sec_type_aes_gcmp_256:
702*5113495bSYour Name 		hal_pn_type = HAL_PN_WPA;
703*5113495bSYour Name 		break;
704*5113495bSYour Name 	case cdp_sec_type_wapi:
705*5113495bSYour Name 		if (vdev->opmode == wlan_op_mode_ap)
706*5113495bSYour Name 			hal_pn_type = HAL_PN_WAPI_EVEN;
707*5113495bSYour Name 		else
708*5113495bSYour Name 			hal_pn_type = HAL_PN_WAPI_UNEVEN;
709*5113495bSYour Name 		break;
710*5113495bSYour Name 	default:
711*5113495bSYour Name 		hal_pn_type = HAL_PN_NONE;
712*5113495bSYour Name 		break;
713*5113495bSYour Name 	}
714*5113495bSYour Name 
715*5113495bSYour Name 	hal_reo_qdesc_setup(soc->hal_soc, tid, ba_window_size, start_seq,
716*5113495bSYour Name 		hw_qdesc_vaddr, rx_tid->hw_qdesc_paddr, hal_pn_type,
717*5113495bSYour Name 		vdev->vdev_stats_id);
718*5113495bSYour Name 
719*5113495bSYour Name 	ret = qdf_mem_map_nbytes_single(soc->osdev, hw_qdesc_vaddr,
720*5113495bSYour Name 					QDF_DMA_BIDIRECTIONAL,
721*5113495bSYour Name 					rx_tid->hw_qdesc_alloc_size,
722*5113495bSYour Name 					&rx_tid->hw_qdesc_paddr);
723*5113495bSYour Name 
724*5113495bSYour Name 	if (!ret)
725*5113495bSYour Name 		add_entry_alloc_list(soc, rx_tid, peer, hw_qdesc_vaddr);
726*5113495bSYour Name 
727*5113495bSYour Name 	if (dp_reo_desc_addr_chk(rx_tid->hw_qdesc_paddr) !=
728*5113495bSYour Name 			QDF_STATUS_SUCCESS || ret) {
729*5113495bSYour Name 		if (alloc_tries++ < 10) {
730*5113495bSYour Name 			qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
731*5113495bSYour Name 			rx_tid->hw_qdesc_vaddr_unaligned = NULL;
732*5113495bSYour Name 			goto try_desc_alloc;
733*5113495bSYour Name 		} else {
734*5113495bSYour Name 			dp_peer_err("%pK: Rx tid %d desc alloc fail (lowmem)",
735*5113495bSYour Name 				    soc, tid);
736*5113495bSYour Name 			status = QDF_STATUS_E_NOMEM;
737*5113495bSYour Name 			goto error;
738*5113495bSYour Name 		}
739*5113495bSYour Name 	}
740*5113495bSYour Name 
741*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
742*5113495bSYour Name 
743*5113495bSYour Name error:
744*5113495bSYour Name 	dp_rx_tid_setup_error_process(1 << tid, peer);
745*5113495bSYour Name 
746*5113495bSYour Name 	return status;
747*5113495bSYour Name }
748*5113495bSYour Name 
dp_rx_tid_setup_wifi3(struct dp_peer * peer,uint32_t tid_bitmap,uint32_t ba_window_size,uint32_t start_seq)749*5113495bSYour Name QDF_STATUS dp_rx_tid_setup_wifi3(struct dp_peer *peer,
750*5113495bSYour Name 				 uint32_t tid_bitmap,
751*5113495bSYour Name 				 uint32_t ba_window_size,
752*5113495bSYour Name 				 uint32_t start_seq)
753*5113495bSYour Name {
754*5113495bSYour Name 	QDF_STATUS status;
755*5113495bSYour Name 	int tid;
756*5113495bSYour Name 	struct dp_rx_tid *rx_tid;
757*5113495bSYour Name 	struct dp_vdev *vdev = peer->vdev;
758*5113495bSYour Name 	struct dp_soc *soc = vdev->pdev->soc;
759*5113495bSYour Name 	uint8_t setup_fail_cnt = 0;
760*5113495bSYour Name 
761*5113495bSYour Name 	if (!qdf_atomic_read(&peer->is_default_route_set))
762*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
763*5113495bSYour Name 
764*5113495bSYour Name 	if (!dp_rx_tid_setup_allow(peer)) {
765*5113495bSYour Name 		dp_peer_info("skip rx tid setup for peer" QDF_MAC_ADDR_FMT,
766*5113495bSYour Name 			     QDF_MAC_ADDR_REF(peer->mac_addr.raw));
767*5113495bSYour Name 		goto send_wmi_reo_cmd;
768*5113495bSYour Name 	}
769*5113495bSYour Name 
770*5113495bSYour Name 	dp_peer_info("tid_bitmap 0x%x, ba_window_size %d, start_seq %d",
771*5113495bSYour Name 		     tid_bitmap, ba_window_size, start_seq);
772*5113495bSYour Name 
773*5113495bSYour Name 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
774*5113495bSYour Name 		if (!(BIT(tid) & tid_bitmap))
775*5113495bSYour Name 			continue;
776*5113495bSYour Name 
777*5113495bSYour Name 		rx_tid = &peer->rx_tid[tid];
778*5113495bSYour Name 		rx_tid->ba_win_size = ba_window_size;
779*5113495bSYour Name 		if (rx_tid->hw_qdesc_vaddr_unaligned) {
780*5113495bSYour Name 			status = dp_rx_tid_update_wifi3(peer, tid,
781*5113495bSYour Name 					ba_window_size, start_seq, false);
782*5113495bSYour Name 			if (QDF_IS_STATUS_ERROR(status)) {
783*5113495bSYour Name 				/* Not continue to update other tid(s) and
784*5113495bSYour Name 				 * return even if they have not been set up.
785*5113495bSYour Name 				 */
786*5113495bSYour Name 				dp_peer_err("Update tid %d fail", tid);
787*5113495bSYour Name 				return status;
788*5113495bSYour Name 			}
789*5113495bSYour Name 
790*5113495bSYour Name 			dp_peer_info("Update tid %d", tid);
791*5113495bSYour Name 			tid_bitmap &= ~BIT(tid);
792*5113495bSYour Name 			continue;
793*5113495bSYour Name 		}
794*5113495bSYour Name 
795*5113495bSYour Name 		status = dp_single_rx_tid_setup(peer, tid,
796*5113495bSYour Name 						ba_window_size, start_seq);
797*5113495bSYour Name 		if (QDF_IS_STATUS_ERROR(status)) {
798*5113495bSYour Name 			dp_peer_err("Set up tid %d fail, status=%d",
799*5113495bSYour Name 				    tid, status);
800*5113495bSYour Name 			tid_bitmap &= ~BIT(tid);
801*5113495bSYour Name 			setup_fail_cnt++;
802*5113495bSYour Name 			continue;
803*5113495bSYour Name 		}
804*5113495bSYour Name 	}
805*5113495bSYour Name 
806*5113495bSYour Name 	/* tid_bitmap == 0 means there is no tid(s) for further setup */
807*5113495bSYour Name 	if (!tid_bitmap) {
808*5113495bSYour Name 		dp_peer_info("tid_bitmap=0, no tid setup, setup_fail_cnt %d",
809*5113495bSYour Name 			     setup_fail_cnt);
810*5113495bSYour Name 
811*5113495bSYour Name 		/*  If setup_fail_cnt==0, all tid(s) has been
812*5113495bSYour Name 		 * successfully updated, so we return success.
813*5113495bSYour Name 		 */
814*5113495bSYour Name 		if (!setup_fail_cnt)
815*5113495bSYour Name 			return QDF_STATUS_SUCCESS;
816*5113495bSYour Name 		else
817*5113495bSYour Name 			return QDF_STATUS_E_FAILURE;
818*5113495bSYour Name 	}
819*5113495bSYour Name 
820*5113495bSYour Name send_wmi_reo_cmd:
821*5113495bSYour Name 	if (dp_get_peer_vdev_roaming_in_progress(peer)) {
822*5113495bSYour Name 		status = QDF_STATUS_E_PERM;
823*5113495bSYour Name 		goto error;
824*5113495bSYour Name 	}
825*5113495bSYour Name 
826*5113495bSYour Name 	dp_peer_info("peer %pK, tids 0x%x, multi_reo %d, s_seq %d, w_size %d",
827*5113495bSYour Name 		      peer, tid_bitmap,
828*5113495bSYour Name 		      soc->features.multi_rx_reorder_q_setup_support,
829*5113495bSYour Name 		      start_seq, ba_window_size);
830*5113495bSYour Name 
831*5113495bSYour Name 	status = dp_peer_rx_reorder_queue_setup(soc, peer,
832*5113495bSYour Name 						tid_bitmap,
833*5113495bSYour Name 						ba_window_size);
834*5113495bSYour Name 	if (QDF_IS_STATUS_SUCCESS(status))
835*5113495bSYour Name 		return status;
836*5113495bSYour Name 
837*5113495bSYour Name error:
838*5113495bSYour Name 	dp_rx_tid_setup_error_process(tid_bitmap, peer);
839*5113495bSYour Name 
840*5113495bSYour Name 	return status;
841*5113495bSYour Name }
842*5113495bSYour Name 
843*5113495bSYour Name #ifdef DP_UMAC_HW_RESET_SUPPORT
844*5113495bSYour Name static
dp_peer_rst_tids(struct dp_soc * soc,struct dp_peer * peer,void * arg)845*5113495bSYour Name void dp_peer_rst_tids(struct dp_soc *soc, struct dp_peer *peer, void *arg)
846*5113495bSYour Name {
847*5113495bSYour Name 	int tid;
848*5113495bSYour Name 
849*5113495bSYour Name 	for (tid = 0; tid < (DP_MAX_TIDS - 1); tid++) {
850*5113495bSYour Name 		struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
851*5113495bSYour Name 		void *vaddr = rx_tid->hw_qdesc_vaddr_aligned;
852*5113495bSYour Name 
853*5113495bSYour Name 		if (vaddr)
854*5113495bSYour Name 			dp_reset_rx_reo_tid_queue(soc, vaddr,
855*5113495bSYour Name 						  rx_tid->hw_qdesc_alloc_size);
856*5113495bSYour Name 	}
857*5113495bSYour Name }
858*5113495bSYour Name 
dp_reset_tid_q_setup(struct dp_soc * soc)859*5113495bSYour Name void dp_reset_tid_q_setup(struct dp_soc *soc)
860*5113495bSYour Name {
861*5113495bSYour Name 	dp_soc_iterate_peer(soc, dp_peer_rst_tids, NULL, DP_MOD_ID_UMAC_RESET);
862*5113495bSYour Name }
863*5113495bSYour Name #endif
864*5113495bSYour Name #ifdef REO_DESC_DEFER_FREE
865*5113495bSYour Name /**
866*5113495bSYour Name  * dp_reo_desc_clean_up() - If cmd to flush base desc fails add
867*5113495bSYour Name  * desc back to freelist and defer the deletion
868*5113495bSYour Name  *
869*5113495bSYour Name  * @soc: DP SOC handle
870*5113495bSYour Name  * @desc: Base descriptor to be freed
871*5113495bSYour Name  * @reo_status: REO command status
872*5113495bSYour Name  */
dp_reo_desc_clean_up(struct dp_soc * soc,struct reo_desc_list_node * desc,union hal_reo_status * reo_status)873*5113495bSYour Name static void dp_reo_desc_clean_up(struct dp_soc *soc,
874*5113495bSYour Name 				 struct reo_desc_list_node *desc,
875*5113495bSYour Name 				 union hal_reo_status *reo_status)
876*5113495bSYour Name {
877*5113495bSYour Name 	desc->free_ts = qdf_get_system_timestamp();
878*5113495bSYour Name 	DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
879*5113495bSYour Name 	qdf_list_insert_back(&soc->reo_desc_freelist,
880*5113495bSYour Name 			     (qdf_list_node_t *)desc);
881*5113495bSYour Name }
882*5113495bSYour Name 
883*5113495bSYour Name /**
884*5113495bSYour Name  * dp_reo_limit_clean_batch_sz() - Limit number REO CMD queued to cmd
885*5113495bSYour Name  * ring in avoid of REO hang
886*5113495bSYour Name  *
887*5113495bSYour Name  * @list_size: REO desc list size to be cleaned
888*5113495bSYour Name  */
dp_reo_limit_clean_batch_sz(uint32_t * list_size)889*5113495bSYour Name static inline void dp_reo_limit_clean_batch_sz(uint32_t *list_size)
890*5113495bSYour Name {
891*5113495bSYour Name 	unsigned long curr_ts = qdf_get_system_timestamp();
892*5113495bSYour Name 
893*5113495bSYour Name 	if ((*list_size) > REO_DESC_FREELIST_SIZE) {
894*5113495bSYour Name 		dp_err_log("%lu:freedesc number %d in freelist",
895*5113495bSYour Name 			   curr_ts, *list_size);
896*5113495bSYour Name 		/* limit the batch queue size */
897*5113495bSYour Name 		*list_size = REO_DESC_FREELIST_SIZE;
898*5113495bSYour Name 	}
899*5113495bSYour Name }
900*5113495bSYour Name #else
901*5113495bSYour Name /**
902*5113495bSYour Name  * dp_reo_desc_clean_up() - If send cmd to REO inorder to flush
903*5113495bSYour Name  * cache fails free the base REO desc anyway
904*5113495bSYour Name  *
905*5113495bSYour Name  * @soc: DP SOC handle
906*5113495bSYour Name  * @desc: Base descriptor to be freed
907*5113495bSYour Name  * @reo_status: REO command status
908*5113495bSYour Name  */
dp_reo_desc_clean_up(struct dp_soc * soc,struct reo_desc_list_node * desc,union hal_reo_status * reo_status)909*5113495bSYour Name static void dp_reo_desc_clean_up(struct dp_soc *soc,
910*5113495bSYour Name 				 struct reo_desc_list_node *desc,
911*5113495bSYour Name 				 union hal_reo_status *reo_status)
912*5113495bSYour Name {
913*5113495bSYour Name 	if (reo_status) {
914*5113495bSYour Name 		qdf_mem_zero(reo_status, sizeof(*reo_status));
915*5113495bSYour Name 		reo_status->fl_cache_status.header.status = 0;
916*5113495bSYour Name 		dp_reo_desc_free(soc, (void *)desc, reo_status);
917*5113495bSYour Name 	}
918*5113495bSYour Name }
919*5113495bSYour Name 
920*5113495bSYour Name /**
921*5113495bSYour Name  * dp_reo_limit_clean_batch_sz() - Limit number REO CMD queued to cmd
922*5113495bSYour Name  * ring in avoid of REO hang
923*5113495bSYour Name  *
924*5113495bSYour Name  * @list_size: REO desc list size to be cleaned
925*5113495bSYour Name  */
dp_reo_limit_clean_batch_sz(uint32_t * list_size)926*5113495bSYour Name static inline void dp_reo_limit_clean_batch_sz(uint32_t *list_size)
927*5113495bSYour Name {
928*5113495bSYour Name }
929*5113495bSYour Name #endif
930*5113495bSYour Name 
931*5113495bSYour Name /**
932*5113495bSYour Name  * dp_resend_update_reo_cmd() - Resend the UPDATE_REO_QUEUE
933*5113495bSYour Name  * cmd and re-insert desc into free list if send fails.
934*5113495bSYour Name  *
935*5113495bSYour Name  * @soc: DP SOC handle
936*5113495bSYour Name  * @desc: desc with resend update cmd flag set
937*5113495bSYour Name  * @rx_tid: Desc RX tid associated with update cmd for resetting
938*5113495bSYour Name  * valid field to 0 in h/w
939*5113495bSYour Name  *
940*5113495bSYour Name  * Return: QDF status
941*5113495bSYour Name  */
942*5113495bSYour Name static QDF_STATUS
dp_resend_update_reo_cmd(struct dp_soc * soc,struct reo_desc_list_node * desc,struct dp_rx_tid * rx_tid)943*5113495bSYour Name dp_resend_update_reo_cmd(struct dp_soc *soc,
944*5113495bSYour Name 			 struct reo_desc_list_node *desc,
945*5113495bSYour Name 			 struct dp_rx_tid *rx_tid)
946*5113495bSYour Name {
947*5113495bSYour Name 	struct hal_reo_cmd_params params;
948*5113495bSYour Name 
949*5113495bSYour Name 	qdf_mem_zero(&params, sizeof(params));
950*5113495bSYour Name 	params.std.need_status = 1;
951*5113495bSYour Name 	params.std.addr_lo =
952*5113495bSYour Name 		rx_tid->hw_qdesc_paddr & 0xffffffff;
953*5113495bSYour Name 	params.std.addr_hi =
954*5113495bSYour Name 		(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
955*5113495bSYour Name 	params.u.upd_queue_params.update_vld = 1;
956*5113495bSYour Name 	params.u.upd_queue_params.vld = 0;
957*5113495bSYour Name 	desc->resend_update_reo_cmd = false;
958*5113495bSYour Name 	/*
959*5113495bSYour Name 	 * If the cmd send fails then set resend_update_reo_cmd flag
960*5113495bSYour Name 	 * and insert the desc at the end of the free list to retry.
961*5113495bSYour Name 	 */
962*5113495bSYour Name 	if (dp_reo_send_cmd(soc,
963*5113495bSYour Name 			    CMD_UPDATE_RX_REO_QUEUE,
964*5113495bSYour Name 			    &params,
965*5113495bSYour Name 			    dp_rx_tid_delete_cb,
966*5113495bSYour Name 			    (void *)desc)
967*5113495bSYour Name 	    != QDF_STATUS_SUCCESS) {
968*5113495bSYour Name 		desc->resend_update_reo_cmd = true;
969*5113495bSYour Name 		desc->free_ts = qdf_get_system_timestamp();
970*5113495bSYour Name 		qdf_list_insert_back(&soc->reo_desc_freelist,
971*5113495bSYour Name 				     (qdf_list_node_t *)desc);
972*5113495bSYour Name 		dp_err_log("failed to send reo cmd CMD_UPDATE_RX_REO_QUEUE");
973*5113495bSYour Name 		DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
974*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
975*5113495bSYour Name 	}
976*5113495bSYour Name 
977*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
978*5113495bSYour Name }
979*5113495bSYour Name 
dp_rx_tid_delete_cb(struct dp_soc * soc,void * cb_ctxt,union hal_reo_status * reo_status)980*5113495bSYour Name void dp_rx_tid_delete_cb(struct dp_soc *soc, void *cb_ctxt,
981*5113495bSYour Name 			 union hal_reo_status *reo_status)
982*5113495bSYour Name {
983*5113495bSYour Name 	struct reo_desc_list_node *freedesc =
984*5113495bSYour Name 		(struct reo_desc_list_node *)cb_ctxt;
985*5113495bSYour Name 	uint32_t list_size;
986*5113495bSYour Name 	struct reo_desc_list_node *desc = NULL;
987*5113495bSYour Name 	unsigned long curr_ts = qdf_get_system_timestamp();
988*5113495bSYour Name 	uint32_t desc_size, tot_desc_size;
989*5113495bSYour Name 	struct hal_reo_cmd_params params;
990*5113495bSYour Name 	bool flush_failure = false;
991*5113495bSYour Name 
992*5113495bSYour Name 	DP_RX_REO_QDESC_UPDATE_EVT(freedesc);
993*5113495bSYour Name 
994*5113495bSYour Name 	if (reo_status->rx_queue_status.header.status == HAL_REO_CMD_DRAIN) {
995*5113495bSYour Name 		qdf_mem_zero(reo_status, sizeof(*reo_status));
996*5113495bSYour Name 		reo_status->fl_cache_status.header.status = HAL_REO_CMD_DRAIN;
997*5113495bSYour Name 		dp_reo_desc_free(soc, (void *)freedesc, reo_status);
998*5113495bSYour Name 		DP_STATS_INC(soc, rx.err.reo_cmd_send_drain, 1);
999*5113495bSYour Name 		return;
1000*5113495bSYour Name 	} else if (reo_status->rx_queue_status.header.status !=
1001*5113495bSYour Name 		HAL_REO_CMD_SUCCESS) {
1002*5113495bSYour Name 		/* Should not happen normally. Just print error for now */
1003*5113495bSYour Name 		dp_info_rl("Rx tid HW desc deletion failed(%d): tid %d",
1004*5113495bSYour Name 			   reo_status->rx_queue_status.header.status,
1005*5113495bSYour Name 			   freedesc->rx_tid.tid);
1006*5113495bSYour Name 	}
1007*5113495bSYour Name 
1008*5113495bSYour Name 	dp_peer_info("%pK: rx_tid: %d status: %d",
1009*5113495bSYour Name 		     soc, freedesc->rx_tid.tid,
1010*5113495bSYour Name 		     reo_status->rx_queue_status.header.status);
1011*5113495bSYour Name 
1012*5113495bSYour Name 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
1013*5113495bSYour Name 	freedesc->free_ts = curr_ts;
1014*5113495bSYour Name 	qdf_list_insert_back_size(&soc->reo_desc_freelist,
1015*5113495bSYour Name 				  (qdf_list_node_t *)freedesc, &list_size);
1016*5113495bSYour Name 
1017*5113495bSYour Name 	/* MCL path add the desc back to reo_desc_freelist when REO FLUSH
1018*5113495bSYour Name 	 * failed. it may cause the number of REO queue pending  in free
1019*5113495bSYour Name 	 * list is even larger than REO_CMD_RING max size and lead REO CMD
1020*5113495bSYour Name 	 * flood then cause REO HW in an unexpected condition. So it's
1021*5113495bSYour Name 	 * needed to limit the number REO cmds in a batch operation.
1022*5113495bSYour Name 	 */
1023*5113495bSYour Name 	dp_reo_limit_clean_batch_sz(&list_size);
1024*5113495bSYour Name 
1025*5113495bSYour Name 	while ((qdf_list_peek_front(&soc->reo_desc_freelist,
1026*5113495bSYour Name 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) &&
1027*5113495bSYour Name 		((list_size >= REO_DESC_FREELIST_SIZE) ||
1028*5113495bSYour Name 		(curr_ts > (desc->free_ts + REO_DESC_FREE_DEFER_MS)) ||
1029*5113495bSYour Name 		(desc->resend_update_reo_cmd && list_size))) {
1030*5113495bSYour Name 		struct dp_rx_tid *rx_tid;
1031*5113495bSYour Name 
1032*5113495bSYour Name 		qdf_list_remove_front(&soc->reo_desc_freelist,
1033*5113495bSYour Name 				      (qdf_list_node_t **)&desc);
1034*5113495bSYour Name 		list_size--;
1035*5113495bSYour Name 		rx_tid = &desc->rx_tid;
1036*5113495bSYour Name 
1037*5113495bSYour Name 		/* First process descs with resend_update_reo_cmd set */
1038*5113495bSYour Name 		if (desc->resend_update_reo_cmd) {
1039*5113495bSYour Name 			if (dp_resend_update_reo_cmd(soc, desc, rx_tid) !=
1040*5113495bSYour Name 			    QDF_STATUS_SUCCESS)
1041*5113495bSYour Name 				break;
1042*5113495bSYour Name 			else
1043*5113495bSYour Name 				continue;
1044*5113495bSYour Name 		}
1045*5113495bSYour Name 
1046*5113495bSYour Name 		/* Flush and invalidate REO descriptor from HW cache: Base and
1047*5113495bSYour Name 		 * extension descriptors should be flushed separately
1048*5113495bSYour Name 		 */
1049*5113495bSYour Name 		if (desc->pending_ext_desc_size)
1050*5113495bSYour Name 			tot_desc_size = desc->pending_ext_desc_size;
1051*5113495bSYour Name 		else
1052*5113495bSYour Name 			tot_desc_size = rx_tid->hw_qdesc_alloc_size;
1053*5113495bSYour Name 		/* Get base descriptor size by passing non-qos TID */
1054*5113495bSYour Name 		desc_size = hal_get_reo_qdesc_size(soc->hal_soc, 0,
1055*5113495bSYour Name 						   DP_NON_QOS_TID);
1056*5113495bSYour Name 
1057*5113495bSYour Name 		/* Flush reo extension descriptors */
1058*5113495bSYour Name 		while ((tot_desc_size -= desc_size) > 0) {
1059*5113495bSYour Name 			qdf_mem_zero(&params, sizeof(params));
1060*5113495bSYour Name 			params.std.addr_lo =
1061*5113495bSYour Name 				((uint64_t)(rx_tid->hw_qdesc_paddr) +
1062*5113495bSYour Name 				tot_desc_size) & 0xffffffff;
1063*5113495bSYour Name 			params.std.addr_hi =
1064*5113495bSYour Name 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1065*5113495bSYour Name 
1066*5113495bSYour Name 			if (QDF_STATUS_SUCCESS !=
1067*5113495bSYour Name 			    dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, &params,
1068*5113495bSYour Name 					    NULL, NULL)) {
1069*5113495bSYour Name 				dp_info_rl("fail to send CMD_CACHE_FLUSH:"
1070*5113495bSYour Name 					   "tid %d desc %pK", rx_tid->tid,
1071*5113495bSYour Name 					   (void *)(rx_tid->hw_qdesc_paddr));
1072*5113495bSYour Name 				desc->pending_ext_desc_size = tot_desc_size +
1073*5113495bSYour Name 								      desc_size;
1074*5113495bSYour Name 				dp_reo_desc_clean_up(soc, desc, reo_status);
1075*5113495bSYour Name 				flush_failure = true;
1076*5113495bSYour Name 				break;
1077*5113495bSYour Name 			}
1078*5113495bSYour Name 		}
1079*5113495bSYour Name 
1080*5113495bSYour Name 		if (flush_failure)
1081*5113495bSYour Name 			break;
1082*5113495bSYour Name 
1083*5113495bSYour Name 		desc->pending_ext_desc_size = desc_size;
1084*5113495bSYour Name 
1085*5113495bSYour Name 		/* Flush base descriptor */
1086*5113495bSYour Name 		qdf_mem_zero(&params, sizeof(params));
1087*5113495bSYour Name 		params.std.need_status = 1;
1088*5113495bSYour Name 		params.std.addr_lo =
1089*5113495bSYour Name 			(uint64_t)(rx_tid->hw_qdesc_paddr) & 0xffffffff;
1090*5113495bSYour Name 		params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1091*5113495bSYour Name 		if (rx_tid->ba_win_size > 256)
1092*5113495bSYour Name 			params.u.fl_cache_params.flush_q_1k_desc = 1;
1093*5113495bSYour Name 		params.u.fl_cache_params.fwd_mpdus_in_queue = 1;
1094*5113495bSYour Name 
1095*5113495bSYour Name 		if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc,
1096*5113495bSYour Name 							  CMD_FLUSH_CACHE,
1097*5113495bSYour Name 							  &params,
1098*5113495bSYour Name 							  dp_reo_desc_free,
1099*5113495bSYour Name 							  (void *)desc)) {
1100*5113495bSYour Name 			union hal_reo_status reo_status;
1101*5113495bSYour Name 			/*
1102*5113495bSYour Name 			 * If dp_reo_send_cmd return failure, related TID queue desc
1103*5113495bSYour Name 			 * should be unmapped. Also locally reo_desc, together with
1104*5113495bSYour Name 			 * TID queue desc also need to be freed accordingly.
1105*5113495bSYour Name 			 *
1106*5113495bSYour Name 			 * Here invoke desc_free function directly to do clean up.
1107*5113495bSYour Name 			 *
1108*5113495bSYour Name 			 * In case of MCL path add the desc back to the free
1109*5113495bSYour Name 			 * desc list and defer deletion.
1110*5113495bSYour Name 			 */
1111*5113495bSYour Name 			dp_info_rl("fail to send REO cmd to flush cache: tid %d",
1112*5113495bSYour Name 				   rx_tid->tid);
1113*5113495bSYour Name 			dp_reo_desc_clean_up(soc, desc, &reo_status);
1114*5113495bSYour Name 			DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
1115*5113495bSYour Name 			break;
1116*5113495bSYour Name 		}
1117*5113495bSYour Name 	}
1118*5113495bSYour Name 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
1119*5113495bSYour Name 
1120*5113495bSYour Name 	dp_reo_desc_defer_free(soc);
1121*5113495bSYour Name }
1122*5113495bSYour Name 
1123*5113495bSYour Name /**
1124*5113495bSYour Name  * dp_rx_tid_delete_wifi3() - Delete receive TID queue
1125*5113495bSYour Name  * @peer: Datapath peer handle
1126*5113495bSYour Name  * @tid: TID
1127*5113495bSYour Name  *
1128*5113495bSYour Name  * Return: 0 on success, error code on failure
1129*5113495bSYour Name  */
dp_rx_tid_delete_wifi3(struct dp_peer * peer,int tid)1130*5113495bSYour Name static int dp_rx_tid_delete_wifi3(struct dp_peer *peer, int tid)
1131*5113495bSYour Name {
1132*5113495bSYour Name 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1133*5113495bSYour Name 	struct dp_soc *soc = peer->vdev->pdev->soc;
1134*5113495bSYour Name 	union hal_reo_status reo_status;
1135*5113495bSYour Name 	struct hal_reo_cmd_params params;
1136*5113495bSYour Name 	struct reo_desc_list_node *freedesc =
1137*5113495bSYour Name 		qdf_mem_malloc(sizeof(*freedesc));
1138*5113495bSYour Name 
1139*5113495bSYour Name 	if (!freedesc) {
1140*5113495bSYour Name 		dp_peer_err("%pK: malloc failed for freedesc: tid %d",
1141*5113495bSYour Name 			    soc, tid);
1142*5113495bSYour Name 		qdf_assert(0);
1143*5113495bSYour Name 		return -ENOMEM;
1144*5113495bSYour Name 	}
1145*5113495bSYour Name 
1146*5113495bSYour Name 	freedesc->rx_tid = *rx_tid;
1147*5113495bSYour Name 	freedesc->resend_update_reo_cmd = false;
1148*5113495bSYour Name 
1149*5113495bSYour Name 	qdf_mem_zero(&params, sizeof(params));
1150*5113495bSYour Name 
1151*5113495bSYour Name 	DP_RX_REO_QDESC_GET_MAC(freedesc, peer);
1152*5113495bSYour Name 
1153*5113495bSYour Name 	reo_status.rx_queue_status.header.status = HAL_REO_CMD_SUCCESS;
1154*5113495bSYour Name 	dp_rx_tid_delete_cb(soc, freedesc, &reo_status);
1155*5113495bSYour Name 
1156*5113495bSYour Name 	rx_tid->hw_qdesc_vaddr_unaligned = NULL;
1157*5113495bSYour Name 	rx_tid->hw_qdesc_alloc_size = 0;
1158*5113495bSYour Name 	rx_tid->hw_qdesc_paddr = 0;
1159*5113495bSYour Name 
1160*5113495bSYour Name 	return 0;
1161*5113495bSYour Name }
1162*5113495bSYour Name 
1163*5113495bSYour Name #ifdef DP_LFR
dp_peer_setup_remaining_tids(struct dp_peer * peer)1164*5113495bSYour Name static void dp_peer_setup_remaining_tids(struct dp_peer *peer)
1165*5113495bSYour Name {
1166*5113495bSYour Name 	int tid;
1167*5113495bSYour Name 	uint32_t tid_bitmap = 0;
1168*5113495bSYour Name 
1169*5113495bSYour Name 	for (tid = 1; tid < DP_MAX_TIDS-1; tid++)
1170*5113495bSYour Name 		tid_bitmap |= BIT(tid);
1171*5113495bSYour Name 
1172*5113495bSYour Name 	dp_peer_info("Sett up tid_bitmap 0x%x for peer %pK peer->local_id %d",
1173*5113495bSYour Name 		     tid_bitmap, peer, peer->local_id);
1174*5113495bSYour Name 	dp_rx_tid_setup_wifi3(peer, tid_bitmap, 1, 0);
1175*5113495bSYour Name }
1176*5113495bSYour Name 
1177*5113495bSYour Name #else
dp_peer_setup_remaining_tids(struct dp_peer * peer)1178*5113495bSYour Name static void dp_peer_setup_remaining_tids(struct dp_peer *peer) {};
1179*5113495bSYour Name #endif
1180*5113495bSYour Name 
1181*5113495bSYour Name #ifdef WLAN_FEATURE_11BE_MLO
1182*5113495bSYour Name /**
1183*5113495bSYour Name  * dp_peer_rx_tids_init() - initialize each tids in peer
1184*5113495bSYour Name  * @peer: peer pointer
1185*5113495bSYour Name  *
1186*5113495bSYour Name  * Return: None
1187*5113495bSYour Name  */
dp_peer_rx_tids_init(struct dp_peer * peer)1188*5113495bSYour Name static void dp_peer_rx_tids_init(struct dp_peer *peer)
1189*5113495bSYour Name {
1190*5113495bSYour Name 	int tid;
1191*5113495bSYour Name 	struct dp_rx_tid *rx_tid;
1192*5113495bSYour Name 	struct dp_rx_tid_defrag *rx_tid_defrag;
1193*5113495bSYour Name 
1194*5113495bSYour Name 	if (!IS_MLO_DP_LINK_PEER(peer)) {
1195*5113495bSYour Name 		for (tid = 0; tid < DP_MAX_TIDS; tid++) {
1196*5113495bSYour Name 			rx_tid_defrag = &peer->txrx_peer->rx_tid[tid];
1197*5113495bSYour Name 
1198*5113495bSYour Name 			rx_tid_defrag->array = &rx_tid_defrag->base;
1199*5113495bSYour Name 			rx_tid_defrag->defrag_timeout_ms = 0;
1200*5113495bSYour Name 			rx_tid_defrag->defrag_waitlist_elem.tqe_next = NULL;
1201*5113495bSYour Name 			rx_tid_defrag->defrag_waitlist_elem.tqe_prev = NULL;
1202*5113495bSYour Name 			rx_tid_defrag->base.head = NULL;
1203*5113495bSYour Name 			rx_tid_defrag->base.tail = NULL;
1204*5113495bSYour Name 			rx_tid_defrag->tid = tid;
1205*5113495bSYour Name 			rx_tid_defrag->defrag_peer = peer->txrx_peer;
1206*5113495bSYour Name 		}
1207*5113495bSYour Name 	}
1208*5113495bSYour Name 
1209*5113495bSYour Name 	/* if not first assoc link peer,
1210*5113495bSYour Name 	 * not to initialize rx_tids again.
1211*5113495bSYour Name 	 */
1212*5113495bSYour Name 	if (IS_MLO_DP_LINK_PEER(peer) && !peer->first_link)
1213*5113495bSYour Name 		return;
1214*5113495bSYour Name 
1215*5113495bSYour Name 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
1216*5113495bSYour Name 		rx_tid = &peer->rx_tid[tid];
1217*5113495bSYour Name 		rx_tid->tid = tid;
1218*5113495bSYour Name 		rx_tid->ba_win_size = 0;
1219*5113495bSYour Name 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
1220*5113495bSYour Name 	}
1221*5113495bSYour Name }
1222*5113495bSYour Name #else
dp_peer_rx_tids_init(struct dp_peer * peer)1223*5113495bSYour Name static void dp_peer_rx_tids_init(struct dp_peer *peer)
1224*5113495bSYour Name {
1225*5113495bSYour Name 	int tid;
1226*5113495bSYour Name 	struct dp_rx_tid *rx_tid;
1227*5113495bSYour Name 	struct dp_rx_tid_defrag *rx_tid_defrag;
1228*5113495bSYour Name 
1229*5113495bSYour Name 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
1230*5113495bSYour Name 		rx_tid = &peer->rx_tid[tid];
1231*5113495bSYour Name 
1232*5113495bSYour Name 		rx_tid_defrag = &peer->txrx_peer->rx_tid[tid];
1233*5113495bSYour Name 		rx_tid->tid = tid;
1234*5113495bSYour Name 		rx_tid->ba_win_size = 0;
1235*5113495bSYour Name 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
1236*5113495bSYour Name 
1237*5113495bSYour Name 		rx_tid_defrag->base.head = NULL;
1238*5113495bSYour Name 		rx_tid_defrag->base.tail = NULL;
1239*5113495bSYour Name 		rx_tid_defrag->tid = tid;
1240*5113495bSYour Name 		rx_tid_defrag->array = &rx_tid_defrag->base;
1241*5113495bSYour Name 		rx_tid_defrag->defrag_timeout_ms = 0;
1242*5113495bSYour Name 		rx_tid_defrag->defrag_waitlist_elem.tqe_next = NULL;
1243*5113495bSYour Name 		rx_tid_defrag->defrag_waitlist_elem.tqe_prev = NULL;
1244*5113495bSYour Name 		rx_tid_defrag->defrag_peer = peer->txrx_peer;
1245*5113495bSYour Name 	}
1246*5113495bSYour Name }
1247*5113495bSYour Name #endif
1248*5113495bSYour Name 
dp_peer_rx_tid_setup(struct dp_peer * peer)1249*5113495bSYour Name void dp_peer_rx_tid_setup(struct dp_peer *peer)
1250*5113495bSYour Name {
1251*5113495bSYour Name 	struct dp_soc *soc = peer->vdev->pdev->soc;
1252*5113495bSYour Name 	struct dp_txrx_peer *txrx_peer = dp_get_txrx_peer(peer);
1253*5113495bSYour Name 	struct dp_vdev *vdev = peer->vdev;
1254*5113495bSYour Name 
1255*5113495bSYour Name 	dp_peer_rx_tids_init(peer);
1256*5113495bSYour Name 
1257*5113495bSYour Name 	/* Setup default (non-qos) rx tid queue */
1258*5113495bSYour Name 	dp_rx_tid_setup_wifi3(peer, BIT(DP_NON_QOS_TID), 1, 0);
1259*5113495bSYour Name 
1260*5113495bSYour Name 	/* Setup rx tid queue for TID 0.
1261*5113495bSYour Name 	 * Other queues will be setup on receiving first packet, which will cause
1262*5113495bSYour Name 	 * NULL REO queue error. For Mesh peer, if on one of the mesh AP the
1263*5113495bSYour Name 	 * mesh peer is not deleted, the new addition of mesh peer on other mesh AP
1264*5113495bSYour Name 	 * doesn't do BA negotiation leading to mismatch in BA windows.
1265*5113495bSYour Name 	 * To avoid this send max BA window during init.
1266*5113495bSYour Name 	 */
1267*5113495bSYour Name 	if (qdf_unlikely(vdev->mesh_vdev) ||
1268*5113495bSYour Name 	    qdf_unlikely(txrx_peer->nawds_enabled))
1269*5113495bSYour Name 		dp_rx_tid_setup_wifi3(
1270*5113495bSYour Name 				peer, BIT(0),
1271*5113495bSYour Name 				hal_get_rx_max_ba_window(soc->hal_soc, 0),
1272*5113495bSYour Name 				0);
1273*5113495bSYour Name 	else
1274*5113495bSYour Name 		dp_rx_tid_setup_wifi3(peer, BIT(0), 1, 0);
1275*5113495bSYour Name 
1276*5113495bSYour Name 	/*
1277*5113495bSYour Name 	 * Setup the rest of TID's to handle LFR
1278*5113495bSYour Name 	 */
1279*5113495bSYour Name 	dp_peer_setup_remaining_tids(peer);
1280*5113495bSYour Name }
1281*5113495bSYour Name 
dp_peer_rx_cleanup(struct dp_vdev * vdev,struct dp_peer * peer)1282*5113495bSYour Name void dp_peer_rx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
1283*5113495bSYour Name {
1284*5113495bSYour Name 	int tid;
1285*5113495bSYour Name 	uint32_t tid_delete_mask = 0;
1286*5113495bSYour Name 
1287*5113495bSYour Name 	if (!peer->txrx_peer)
1288*5113495bSYour Name 		return;
1289*5113495bSYour Name 
1290*5113495bSYour Name 	dp_info("Remove tids for peer: %pK", peer);
1291*5113495bSYour Name 
1292*5113495bSYour Name 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
1293*5113495bSYour Name 		struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1294*5113495bSYour Name 		struct dp_rx_tid_defrag *defrag_rx_tid =
1295*5113495bSYour Name 				&peer->txrx_peer->rx_tid[tid];
1296*5113495bSYour Name 
1297*5113495bSYour Name 		qdf_spin_lock_bh(&defrag_rx_tid->defrag_tid_lock);
1298*5113495bSYour Name 		if (!peer->bss_peer || peer->vdev->opmode == wlan_op_mode_sta) {
1299*5113495bSYour Name 			/* Cleanup defrag related resource */
1300*5113495bSYour Name 			dp_rx_defrag_waitlist_remove(peer->txrx_peer, tid);
1301*5113495bSYour Name 			dp_rx_reorder_flush_frag(peer->txrx_peer, tid);
1302*5113495bSYour Name 		}
1303*5113495bSYour Name 		qdf_spin_unlock_bh(&defrag_rx_tid->defrag_tid_lock);
1304*5113495bSYour Name 
1305*5113495bSYour Name 		qdf_spin_lock_bh(&rx_tid->tid_lock);
1306*5113495bSYour Name 		if (peer->rx_tid[tid].hw_qdesc_vaddr_unaligned) {
1307*5113495bSYour Name 			dp_rx_tid_delete_wifi3(peer, tid);
1308*5113495bSYour Name 
1309*5113495bSYour Name 			tid_delete_mask |= (1 << tid);
1310*5113495bSYour Name 		}
1311*5113495bSYour Name 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
1312*5113495bSYour Name 	}
1313*5113495bSYour Name #ifdef notyet /* See if FW can remove queues as part of peer cleanup */
1314*5113495bSYour Name 	if (soc->ol_ops->peer_rx_reorder_queue_remove) {
1315*5113495bSYour Name 		soc->ol_ops->peer_rx_reorder_queue_remove(soc->ctrl_psoc,
1316*5113495bSYour Name 			peer->vdev->pdev->pdev_id,
1317*5113495bSYour Name 			peer->vdev->vdev_id, peer->mac_addr.raw,
1318*5113495bSYour Name 			tid_delete_mask);
1319*5113495bSYour Name 	}
1320*5113495bSYour Name #endif
1321*5113495bSYour Name }
1322*5113495bSYour Name 
1323*5113495bSYour Name /**
1324*5113495bSYour Name  * dp_teardown_256_ba_sessions() - Teardown sessions using 256
1325*5113495bSYour Name  *                                window size when a request with
1326*5113495bSYour Name  *                                64 window size is received.
1327*5113495bSYour Name  *                                This is done as a WAR since HW can
1328*5113495bSYour Name  *                                have only one setting per peer (64 or 256).
1329*5113495bSYour Name  *                                For HKv2, we use per tid buffersize setting
1330*5113495bSYour Name  *                                for 0 to per_tid_basize_max_tid. For tid
1331*5113495bSYour Name  *                                more than per_tid_basize_max_tid we use HKv1
1332*5113495bSYour Name  *                                method.
1333*5113495bSYour Name  * @peer: Datapath peer
1334*5113495bSYour Name  *
1335*5113495bSYour Name  * Return: void
1336*5113495bSYour Name  */
dp_teardown_256_ba_sessions(struct dp_peer * peer)1337*5113495bSYour Name static void dp_teardown_256_ba_sessions(struct dp_peer *peer)
1338*5113495bSYour Name {
1339*5113495bSYour Name 	uint8_t delba_rcode = 0;
1340*5113495bSYour Name 	int tid;
1341*5113495bSYour Name 	struct dp_rx_tid *rx_tid = NULL;
1342*5113495bSYour Name 
1343*5113495bSYour Name 	tid = peer->vdev->pdev->soc->per_tid_basize_max_tid;
1344*5113495bSYour Name 	for (; tid < DP_MAX_TIDS; tid++) {
1345*5113495bSYour Name 		rx_tid = &peer->rx_tid[tid];
1346*5113495bSYour Name 		qdf_spin_lock_bh(&rx_tid->tid_lock);
1347*5113495bSYour Name 
1348*5113495bSYour Name 		if (rx_tid->ba_win_size <= 64) {
1349*5113495bSYour Name 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
1350*5113495bSYour Name 			continue;
1351*5113495bSYour Name 		} else {
1352*5113495bSYour Name 			if (rx_tid->ba_status == DP_RX_BA_ACTIVE ||
1353*5113495bSYour Name 			    rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
1354*5113495bSYour Name 				/* send delba */
1355*5113495bSYour Name 				if (!rx_tid->delba_tx_status) {
1356*5113495bSYour Name 					rx_tid->delba_tx_retry++;
1357*5113495bSYour Name 					rx_tid->delba_tx_status = 1;
1358*5113495bSYour Name 					rx_tid->delba_rcode =
1359*5113495bSYour Name 					IEEE80211_REASON_QOS_SETUP_REQUIRED;
1360*5113495bSYour Name 					delba_rcode = rx_tid->delba_rcode;
1361*5113495bSYour Name 
1362*5113495bSYour Name 					qdf_spin_unlock_bh(&rx_tid->tid_lock);
1363*5113495bSYour Name 					if (peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba)
1364*5113495bSYour Name 						peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba(
1365*5113495bSYour Name 							peer->vdev->pdev->soc->ctrl_psoc,
1366*5113495bSYour Name 							peer->vdev->vdev_id,
1367*5113495bSYour Name 							peer->mac_addr.raw,
1368*5113495bSYour Name 							tid, delba_rcode,
1369*5113495bSYour Name 							CDP_DELBA_REASON_NONE);
1370*5113495bSYour Name 				} else {
1371*5113495bSYour Name 					qdf_spin_unlock_bh(&rx_tid->tid_lock);
1372*5113495bSYour Name 				}
1373*5113495bSYour Name 			} else {
1374*5113495bSYour Name 				qdf_spin_unlock_bh(&rx_tid->tid_lock);
1375*5113495bSYour Name 			}
1376*5113495bSYour Name 		}
1377*5113495bSYour Name 	}
1378*5113495bSYour Name }
1379*5113495bSYour Name 
dp_addba_resp_tx_completion_wifi3(struct cdp_soc_t * cdp_soc,uint8_t * peer_mac,uint16_t vdev_id,uint8_t tid,int status)1380*5113495bSYour Name int dp_addba_resp_tx_completion_wifi3(struct cdp_soc_t *cdp_soc,
1381*5113495bSYour Name 				      uint8_t *peer_mac,
1382*5113495bSYour Name 				      uint16_t vdev_id,
1383*5113495bSYour Name 				      uint8_t tid, int status)
1384*5113495bSYour Name {
1385*5113495bSYour Name 	struct dp_peer *peer = dp_peer_get_tgt_peer_hash_find(
1386*5113495bSYour Name 					(struct dp_soc *)cdp_soc,
1387*5113495bSYour Name 					peer_mac, 0, vdev_id,
1388*5113495bSYour Name 					DP_MOD_ID_CDP);
1389*5113495bSYour Name 	struct dp_rx_tid *rx_tid = NULL;
1390*5113495bSYour Name 
1391*5113495bSYour Name 	if (!peer) {
1392*5113495bSYour Name 		dp_peer_debug("%pK: Peer is NULL!", cdp_soc);
1393*5113495bSYour Name 		goto fail;
1394*5113495bSYour Name 	}
1395*5113495bSYour Name 	rx_tid = &peer->rx_tid[tid];
1396*5113495bSYour Name 	qdf_spin_lock_bh(&rx_tid->tid_lock);
1397*5113495bSYour Name 	if (status) {
1398*5113495bSYour Name 		rx_tid->num_addba_rsp_failed++;
1399*5113495bSYour Name 		if (rx_tid->hw_qdesc_vaddr_unaligned)
1400*5113495bSYour Name 			dp_rx_tid_update_wifi3(peer, tid, 1,
1401*5113495bSYour Name 					       IEEE80211_SEQ_MAX, false);
1402*5113495bSYour Name 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
1403*5113495bSYour Name 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
1404*5113495bSYour Name 		dp_err("RxTid- %d addba rsp tx completion failed", tid);
1405*5113495bSYour Name 
1406*5113495bSYour Name 		goto success;
1407*5113495bSYour Name 	}
1408*5113495bSYour Name 
1409*5113495bSYour Name 	rx_tid->num_addba_rsp_success++;
1410*5113495bSYour Name 	if (rx_tid->ba_status == DP_RX_BA_INACTIVE) {
1411*5113495bSYour Name 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
1412*5113495bSYour Name 		dp_peer_err("%pK: Rx Tid- %d hw qdesc is not in IN_PROGRESS",
1413*5113495bSYour Name 			    cdp_soc, tid);
1414*5113495bSYour Name 		goto fail;
1415*5113495bSYour Name 	}
1416*5113495bSYour Name 
1417*5113495bSYour Name 	if (!qdf_atomic_read(&peer->is_default_route_set)) {
1418*5113495bSYour Name 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
1419*5113495bSYour Name 		dp_peer_debug("%pK: default route is not set for peer: " QDF_MAC_ADDR_FMT,
1420*5113495bSYour Name 			      cdp_soc, QDF_MAC_ADDR_REF(peer->mac_addr.raw));
1421*5113495bSYour Name 		goto fail;
1422*5113495bSYour Name 	}
1423*5113495bSYour Name 
1424*5113495bSYour Name 	if (dp_rx_tid_update_wifi3(peer, tid,
1425*5113495bSYour Name 				   rx_tid->ba_win_size,
1426*5113495bSYour Name 				   rx_tid->startseqnum,
1427*5113495bSYour Name 				   false)) {
1428*5113495bSYour Name 		dp_err("Failed update REO SSN");
1429*5113495bSYour Name 	}
1430*5113495bSYour Name 
1431*5113495bSYour Name 	dp_info("tid %u window_size %u start_seq_num %u",
1432*5113495bSYour Name 		tid, rx_tid->ba_win_size,
1433*5113495bSYour Name 		rx_tid->startseqnum);
1434*5113495bSYour Name 
1435*5113495bSYour Name 	/* First Session */
1436*5113495bSYour Name 	if (peer->active_ba_session_cnt == 0) {
1437*5113495bSYour Name 		if (rx_tid->ba_win_size > 64 && rx_tid->ba_win_size <= 256)
1438*5113495bSYour Name 			peer->hw_buffer_size = 256;
1439*5113495bSYour Name 		else if (rx_tid->ba_win_size <= 1024 &&
1440*5113495bSYour Name 			 rx_tid->ba_win_size > 256)
1441*5113495bSYour Name 			peer->hw_buffer_size = 1024;
1442*5113495bSYour Name 		else
1443*5113495bSYour Name 			peer->hw_buffer_size = 64;
1444*5113495bSYour Name 	}
1445*5113495bSYour Name 
1446*5113495bSYour Name 	rx_tid->ba_status = DP_RX_BA_ACTIVE;
1447*5113495bSYour Name 
1448*5113495bSYour Name 	peer->active_ba_session_cnt++;
1449*5113495bSYour Name 
1450*5113495bSYour Name 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
1451*5113495bSYour Name 
1452*5113495bSYour Name 	/* Kill any session having 256 buffer size
1453*5113495bSYour Name 	 * when 64 buffer size request is received.
1454*5113495bSYour Name 	 * Also, latch on to 64 as new buffer size.
1455*5113495bSYour Name 	 */
1456*5113495bSYour Name 	if (peer->kill_256_sessions) {
1457*5113495bSYour Name 		dp_teardown_256_ba_sessions(peer);
1458*5113495bSYour Name 		peer->kill_256_sessions = 0;
1459*5113495bSYour Name 	}
1460*5113495bSYour Name 
1461*5113495bSYour Name success:
1462*5113495bSYour Name 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
1463*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
1464*5113495bSYour Name 
1465*5113495bSYour Name fail:
1466*5113495bSYour Name 	if (peer)
1467*5113495bSYour Name 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
1468*5113495bSYour Name 
1469*5113495bSYour Name 	return QDF_STATUS_E_FAILURE;
1470*5113495bSYour Name }
1471*5113495bSYour Name 
1472*5113495bSYour Name QDF_STATUS
dp_addba_responsesetup_wifi3(struct cdp_soc_t * cdp_soc,uint8_t * peer_mac,uint16_t vdev_id,uint8_t tid,uint8_t * dialogtoken,uint16_t * statuscode,uint16_t * buffersize,uint16_t * batimeout)1473*5113495bSYour Name dp_addba_responsesetup_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
1474*5113495bSYour Name 			     uint16_t vdev_id, uint8_t tid,
1475*5113495bSYour Name 			     uint8_t *dialogtoken, uint16_t *statuscode,
1476*5113495bSYour Name 			     uint16_t *buffersize, uint16_t *batimeout)
1477*5113495bSYour Name {
1478*5113495bSYour Name 	struct dp_rx_tid *rx_tid = NULL;
1479*5113495bSYour Name 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1480*5113495bSYour Name 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
1481*5113495bSYour Name 						       peer_mac, 0, vdev_id,
1482*5113495bSYour Name 						       DP_MOD_ID_CDP);
1483*5113495bSYour Name 
1484*5113495bSYour Name 	if (!peer) {
1485*5113495bSYour Name 		dp_peer_debug("%pK: Peer is NULL!", cdp_soc);
1486*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
1487*5113495bSYour Name 	}
1488*5113495bSYour Name 	rx_tid = &peer->rx_tid[tid];
1489*5113495bSYour Name 	qdf_spin_lock_bh(&rx_tid->tid_lock);
1490*5113495bSYour Name 	rx_tid->num_of_addba_resp++;
1491*5113495bSYour Name 	/* setup ADDBA response parameters */
1492*5113495bSYour Name 	*dialogtoken = rx_tid->dialogtoken;
1493*5113495bSYour Name 	*statuscode = rx_tid->statuscode;
1494*5113495bSYour Name 	*buffersize = rx_tid->ba_win_size;
1495*5113495bSYour Name 	*batimeout  = 0;
1496*5113495bSYour Name 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
1497*5113495bSYour Name 
1498*5113495bSYour Name 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
1499*5113495bSYour Name 
1500*5113495bSYour Name 	return status;
1501*5113495bSYour Name }
1502*5113495bSYour Name 
1503*5113495bSYour Name /**
1504*5113495bSYour Name  * dp_check_ba_buffersize() - Check buffer size in request
1505*5113495bSYour Name  *                            and latch onto this size based on
1506*5113495bSYour Name  *                            size used in first active session.
1507*5113495bSYour Name  * @peer: Datapath peer
1508*5113495bSYour Name  * @tid: Tid
1509*5113495bSYour Name  * @buffersize: Block ack window size
1510*5113495bSYour Name  *
1511*5113495bSYour Name  * Return: void
1512*5113495bSYour Name  */
dp_check_ba_buffersize(struct dp_peer * peer,uint16_t tid,uint16_t buffersize)1513*5113495bSYour Name static void dp_check_ba_buffersize(struct dp_peer *peer,
1514*5113495bSYour Name 				   uint16_t tid,
1515*5113495bSYour Name 				   uint16_t buffersize)
1516*5113495bSYour Name {
1517*5113495bSYour Name 	struct dp_rx_tid *rx_tid = NULL;
1518*5113495bSYour Name 	struct dp_soc *soc = peer->vdev->pdev->soc;
1519*5113495bSYour Name 	uint16_t max_ba_window;
1520*5113495bSYour Name 
1521*5113495bSYour Name 	max_ba_window = hal_get_rx_max_ba_window(soc->hal_soc, tid);
1522*5113495bSYour Name 	dp_info("Input buffersize %d, max dp allowed %d",
1523*5113495bSYour Name 		buffersize, max_ba_window);
1524*5113495bSYour Name 	/* Adjust BA window size, restrict it to max DP allowed */
1525*5113495bSYour Name 	buffersize = QDF_MIN(buffersize, max_ba_window);
1526*5113495bSYour Name 
1527*5113495bSYour Name 	dp_info(QDF_MAC_ADDR_FMT" per_tid_basize_max_tid %d tid %d buffersize %d hw_buffer_size %d",
1528*5113495bSYour Name 		QDF_MAC_ADDR_REF(peer->mac_addr.raw),
1529*5113495bSYour Name 		soc->per_tid_basize_max_tid, tid, buffersize,
1530*5113495bSYour Name 		peer->hw_buffer_size);
1531*5113495bSYour Name 
1532*5113495bSYour Name 	rx_tid = &peer->rx_tid[tid];
1533*5113495bSYour Name 	if (soc->per_tid_basize_max_tid &&
1534*5113495bSYour Name 	    tid < soc->per_tid_basize_max_tid) {
1535*5113495bSYour Name 		rx_tid->ba_win_size = buffersize;
1536*5113495bSYour Name 		goto out;
1537*5113495bSYour Name 	} else {
1538*5113495bSYour Name 		if (peer->active_ba_session_cnt == 0) {
1539*5113495bSYour Name 			rx_tid->ba_win_size = buffersize;
1540*5113495bSYour Name 		} else {
1541*5113495bSYour Name 			if (peer->hw_buffer_size == 64) {
1542*5113495bSYour Name 				if (buffersize <= 64)
1543*5113495bSYour Name 					rx_tid->ba_win_size = buffersize;
1544*5113495bSYour Name 				else
1545*5113495bSYour Name 					rx_tid->ba_win_size = peer->hw_buffer_size;
1546*5113495bSYour Name 			} else if (peer->hw_buffer_size == 256) {
1547*5113495bSYour Name 				if (buffersize > 64) {
1548*5113495bSYour Name 					rx_tid->ba_win_size = buffersize;
1549*5113495bSYour Name 				} else {
1550*5113495bSYour Name 					rx_tid->ba_win_size = buffersize;
1551*5113495bSYour Name 					peer->hw_buffer_size = 64;
1552*5113495bSYour Name 					peer->kill_256_sessions = 1;
1553*5113495bSYour Name 				}
1554*5113495bSYour Name 			} else if (buffersize <= 1024) {
1555*5113495bSYour Name 				/*
1556*5113495bSYour Name 				 * Above checks are only for HK V2
1557*5113495bSYour Name 				 * Set incoming buffer size for others
1558*5113495bSYour Name 				 */
1559*5113495bSYour Name 				rx_tid->ba_win_size = buffersize;
1560*5113495bSYour Name 			} else {
1561*5113495bSYour Name 				dp_err("Invalid buffer size %d", buffersize);
1562*5113495bSYour Name 				qdf_assert_always(0);
1563*5113495bSYour Name 			}
1564*5113495bSYour Name 		}
1565*5113495bSYour Name 	}
1566*5113495bSYour Name 
1567*5113495bSYour Name out:
1568*5113495bSYour Name 	dp_info("rx_tid->ba_win_size %d peer->hw_buffer_size %d peer->kill_256_sessions %d",
1569*5113495bSYour Name 		rx_tid->ba_win_size,
1570*5113495bSYour Name 		peer->hw_buffer_size,
1571*5113495bSYour Name 		peer->kill_256_sessions);
1572*5113495bSYour Name }
1573*5113495bSYour Name 
dp_rx_tid_update_ba_win_size(struct cdp_soc_t * cdp_soc,uint8_t * peer_mac,uint16_t vdev_id,uint8_t tid,uint16_t buffersize)1574*5113495bSYour Name QDF_STATUS dp_rx_tid_update_ba_win_size(struct cdp_soc_t *cdp_soc,
1575*5113495bSYour Name 					uint8_t *peer_mac, uint16_t vdev_id,
1576*5113495bSYour Name 					uint8_t tid, uint16_t buffersize)
1577*5113495bSYour Name {
1578*5113495bSYour Name 	struct dp_rx_tid *rx_tid = NULL;
1579*5113495bSYour Name 	struct dp_peer *peer;
1580*5113495bSYour Name 
1581*5113495bSYour Name 	peer = dp_peer_get_tgt_peer_hash_find((struct dp_soc *)cdp_soc,
1582*5113495bSYour Name 					      peer_mac, 0, vdev_id,
1583*5113495bSYour Name 					      DP_MOD_ID_CDP);
1584*5113495bSYour Name 	if (!peer) {
1585*5113495bSYour Name 		dp_peer_debug("%pK: Peer is NULL!", cdp_soc);
1586*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
1587*5113495bSYour Name 	}
1588*5113495bSYour Name 
1589*5113495bSYour Name 	rx_tid = &peer->rx_tid[tid];
1590*5113495bSYour Name 
1591*5113495bSYour Name 	qdf_spin_lock_bh(&rx_tid->tid_lock);
1592*5113495bSYour Name 	rx_tid->ba_win_size = buffersize;
1593*5113495bSYour Name 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
1594*5113495bSYour Name 
1595*5113495bSYour Name 	dp_info("peer "QDF_MAC_ADDR_FMT", tid %d, update BA win size to %d",
1596*5113495bSYour Name 		QDF_MAC_ADDR_REF(peer->mac_addr.raw), tid, buffersize);
1597*5113495bSYour Name 
1598*5113495bSYour Name 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
1599*5113495bSYour Name 
1600*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
1601*5113495bSYour Name }
1602*5113495bSYour Name 
1603*5113495bSYour Name #define DP_RX_BA_SESSION_DISABLE  1
1604*5113495bSYour Name 
dp_addba_requestprocess_wifi3(struct cdp_soc_t * cdp_soc,uint8_t * peer_mac,uint16_t vdev_id,uint8_t dialogtoken,uint16_t tid,uint16_t batimeout,uint16_t buffersize,uint16_t startseqnum)1605*5113495bSYour Name int dp_addba_requestprocess_wifi3(struct cdp_soc_t *cdp_soc,
1606*5113495bSYour Name 				  uint8_t *peer_mac,
1607*5113495bSYour Name 				  uint16_t vdev_id,
1608*5113495bSYour Name 				  uint8_t dialogtoken,
1609*5113495bSYour Name 				  uint16_t tid, uint16_t batimeout,
1610*5113495bSYour Name 				  uint16_t buffersize,
1611*5113495bSYour Name 				  uint16_t startseqnum)
1612*5113495bSYour Name {
1613*5113495bSYour Name 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1614*5113495bSYour Name 	struct dp_rx_tid *rx_tid = NULL;
1615*5113495bSYour Name 	struct dp_peer *peer;
1616*5113495bSYour Name 
1617*5113495bSYour Name 	peer = dp_peer_get_tgt_peer_hash_find((struct dp_soc *)cdp_soc,
1618*5113495bSYour Name 					      peer_mac,
1619*5113495bSYour Name 					      0, vdev_id,
1620*5113495bSYour Name 					      DP_MOD_ID_CDP);
1621*5113495bSYour Name 
1622*5113495bSYour Name 	if (!peer) {
1623*5113495bSYour Name 		dp_peer_debug("%pK: Peer is NULL!", cdp_soc);
1624*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
1625*5113495bSYour Name 	}
1626*5113495bSYour Name 	rx_tid = &peer->rx_tid[tid];
1627*5113495bSYour Name 	qdf_spin_lock_bh(&rx_tid->tid_lock);
1628*5113495bSYour Name 	rx_tid->num_of_addba_req++;
1629*5113495bSYour Name 	if ((rx_tid->ba_status == DP_RX_BA_ACTIVE &&
1630*5113495bSYour Name 	     rx_tid->hw_qdesc_vaddr_unaligned)) {
1631*5113495bSYour Name 		dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX, false);
1632*5113495bSYour Name 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
1633*5113495bSYour Name 		peer->active_ba_session_cnt--;
1634*5113495bSYour Name 		dp_peer_debug("%pK: Rx Tid- %d hw qdesc is already setup",
1635*5113495bSYour Name 			      cdp_soc, tid);
1636*5113495bSYour Name 	}
1637*5113495bSYour Name 
1638*5113495bSYour Name 	if (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
1639*5113495bSYour Name 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
1640*5113495bSYour Name 		status = QDF_STATUS_E_FAILURE;
1641*5113495bSYour Name 		goto fail;
1642*5113495bSYour Name 	}
1643*5113495bSYour Name 
1644*5113495bSYour Name 	if (rx_tid->rx_ba_win_size_override == DP_RX_BA_SESSION_DISABLE) {
1645*5113495bSYour Name 		dp_peer_info("%pK: disable BA session",
1646*5113495bSYour Name 			     cdp_soc);
1647*5113495bSYour Name 
1648*5113495bSYour Name 		buffersize = 1;
1649*5113495bSYour Name 	} else if (rx_tid->rx_ba_win_size_override) {
1650*5113495bSYour Name 		dp_peer_info("%pK: override BA win to %d", cdp_soc,
1651*5113495bSYour Name 			     rx_tid->rx_ba_win_size_override);
1652*5113495bSYour Name 
1653*5113495bSYour Name 		buffersize = rx_tid->rx_ba_win_size_override;
1654*5113495bSYour Name 	} else {
1655*5113495bSYour Name 		dp_peer_info("%pK: restore BA win %d based on addba req", cdp_soc,
1656*5113495bSYour Name 			     buffersize);
1657*5113495bSYour Name 	}
1658*5113495bSYour Name 
1659*5113495bSYour Name 	dp_check_ba_buffersize(peer, tid, buffersize);
1660*5113495bSYour Name 
1661*5113495bSYour Name 	if (dp_rx_tid_setup_wifi3(peer, BIT(tid),
1662*5113495bSYour Name 	    rx_tid->ba_win_size, startseqnum)) {
1663*5113495bSYour Name 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
1664*5113495bSYour Name 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
1665*5113495bSYour Name 		status = QDF_STATUS_E_FAILURE;
1666*5113495bSYour Name 		goto fail;
1667*5113495bSYour Name 	}
1668*5113495bSYour Name 	rx_tid->ba_status = DP_RX_BA_IN_PROGRESS;
1669*5113495bSYour Name 
1670*5113495bSYour Name 	rx_tid->dialogtoken = dialogtoken;
1671*5113495bSYour Name 	rx_tid->startseqnum = startseqnum;
1672*5113495bSYour Name 
1673*5113495bSYour Name 	if (rx_tid->userstatuscode != IEEE80211_STATUS_SUCCESS)
1674*5113495bSYour Name 		rx_tid->statuscode = rx_tid->userstatuscode;
1675*5113495bSYour Name 	else
1676*5113495bSYour Name 		rx_tid->statuscode = IEEE80211_STATUS_SUCCESS;
1677*5113495bSYour Name 
1678*5113495bSYour Name 	if (rx_tid->rx_ba_win_size_override == DP_RX_BA_SESSION_DISABLE)
1679*5113495bSYour Name 		rx_tid->statuscode = IEEE80211_STATUS_REFUSED;
1680*5113495bSYour Name 
1681*5113495bSYour Name 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
1682*5113495bSYour Name 
1683*5113495bSYour Name fail:
1684*5113495bSYour Name 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
1685*5113495bSYour Name 
1686*5113495bSYour Name 	return status;
1687*5113495bSYour Name }
1688*5113495bSYour Name 
1689*5113495bSYour Name QDF_STATUS
dp_set_addba_response(struct cdp_soc_t * cdp_soc,uint8_t * peer_mac,uint16_t vdev_id,uint8_t tid,uint16_t statuscode)1690*5113495bSYour Name dp_set_addba_response(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
1691*5113495bSYour Name 		      uint16_t vdev_id, uint8_t tid, uint16_t statuscode)
1692*5113495bSYour Name {
1693*5113495bSYour Name 	struct dp_peer *peer = dp_peer_get_tgt_peer_hash_find(
1694*5113495bSYour Name 					(struct dp_soc *)cdp_soc,
1695*5113495bSYour Name 					peer_mac, 0, vdev_id,
1696*5113495bSYour Name 					DP_MOD_ID_CDP);
1697*5113495bSYour Name 	struct dp_rx_tid *rx_tid;
1698*5113495bSYour Name 
1699*5113495bSYour Name 	if (!peer) {
1700*5113495bSYour Name 		dp_peer_debug("%pK: Peer is NULL!", cdp_soc);
1701*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
1702*5113495bSYour Name 	}
1703*5113495bSYour Name 
1704*5113495bSYour Name 	rx_tid = &peer->rx_tid[tid];
1705*5113495bSYour Name 	qdf_spin_lock_bh(&rx_tid->tid_lock);
1706*5113495bSYour Name 	rx_tid->userstatuscode = statuscode;
1707*5113495bSYour Name 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
1708*5113495bSYour Name 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
1709*5113495bSYour Name 
1710*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
1711*5113495bSYour Name }
1712*5113495bSYour Name 
dp_delba_process_wifi3(struct cdp_soc_t * cdp_soc,uint8_t * peer_mac,uint16_t vdev_id,int tid,uint16_t reasoncode)1713*5113495bSYour Name int dp_delba_process_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
1714*5113495bSYour Name 			   uint16_t vdev_id, int tid, uint16_t reasoncode)
1715*5113495bSYour Name {
1716*5113495bSYour Name 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1717*5113495bSYour Name 	struct dp_rx_tid *rx_tid;
1718*5113495bSYour Name 	struct dp_peer *peer = dp_peer_get_tgt_peer_hash_find(
1719*5113495bSYour Name 					(struct dp_soc *)cdp_soc,
1720*5113495bSYour Name 					peer_mac, 0, vdev_id,
1721*5113495bSYour Name 					DP_MOD_ID_CDP);
1722*5113495bSYour Name 
1723*5113495bSYour Name 	if (!peer) {
1724*5113495bSYour Name 		dp_peer_debug("%pK: Peer is NULL!", cdp_soc);
1725*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
1726*5113495bSYour Name 	}
1727*5113495bSYour Name 	rx_tid = &peer->rx_tid[tid];
1728*5113495bSYour Name 	qdf_spin_lock_bh(&rx_tid->tid_lock);
1729*5113495bSYour Name 	if (rx_tid->ba_status == DP_RX_BA_INACTIVE ||
1730*5113495bSYour Name 	    rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
1731*5113495bSYour Name 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
1732*5113495bSYour Name 		status = QDF_STATUS_E_FAILURE;
1733*5113495bSYour Name 		goto fail;
1734*5113495bSYour Name 	}
1735*5113495bSYour Name 	/* TODO: See if we can delete the existing REO queue descriptor and
1736*5113495bSYour Name 	 * replace with a new one without queue extension descript to save
1737*5113495bSYour Name 	 * memory
1738*5113495bSYour Name 	 */
1739*5113495bSYour Name 	rx_tid->delba_rcode = reasoncode;
1740*5113495bSYour Name 	rx_tid->num_of_delba_req++;
1741*5113495bSYour Name 	dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX, false);
1742*5113495bSYour Name 
1743*5113495bSYour Name 	rx_tid->ba_status = DP_RX_BA_INACTIVE;
1744*5113495bSYour Name 	peer->active_ba_session_cnt--;
1745*5113495bSYour Name 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
1746*5113495bSYour Name fail:
1747*5113495bSYour Name 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
1748*5113495bSYour Name 
1749*5113495bSYour Name 	return status;
1750*5113495bSYour Name }
1751*5113495bSYour Name 
dp_delba_tx_completion_wifi3(struct cdp_soc_t * cdp_soc,uint8_t * peer_mac,uint16_t vdev_id,uint8_t tid,int status)1752*5113495bSYour Name int dp_delba_tx_completion_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
1753*5113495bSYour Name 				 uint16_t vdev_id,
1754*5113495bSYour Name 				 uint8_t tid, int status)
1755*5113495bSYour Name {
1756*5113495bSYour Name 	QDF_STATUS ret = QDF_STATUS_SUCCESS;
1757*5113495bSYour Name 	struct dp_rx_tid *rx_tid = NULL;
1758*5113495bSYour Name 	struct dp_peer *peer = dp_peer_get_tgt_peer_hash_find(
1759*5113495bSYour Name 					(struct dp_soc *)cdp_soc,
1760*5113495bSYour Name 					peer_mac, 0, vdev_id,
1761*5113495bSYour Name 					DP_MOD_ID_CDP);
1762*5113495bSYour Name 
1763*5113495bSYour Name 	if (!peer) {
1764*5113495bSYour Name 		dp_peer_debug("%pK: Peer is NULL!", cdp_soc);
1765*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
1766*5113495bSYour Name 	}
1767*5113495bSYour Name 	rx_tid = &peer->rx_tid[tid];
1768*5113495bSYour Name 	qdf_spin_lock_bh(&rx_tid->tid_lock);
1769*5113495bSYour Name 	if (status) {
1770*5113495bSYour Name 		rx_tid->delba_tx_fail_cnt++;
1771*5113495bSYour Name 		if (rx_tid->delba_tx_retry >= DP_MAX_DELBA_RETRY) {
1772*5113495bSYour Name 			rx_tid->delba_tx_retry = 0;
1773*5113495bSYour Name 			rx_tid->delba_tx_status = 0;
1774*5113495bSYour Name 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
1775*5113495bSYour Name 		} else {
1776*5113495bSYour Name 			rx_tid->delba_tx_retry++;
1777*5113495bSYour Name 			rx_tid->delba_tx_status = 1;
1778*5113495bSYour Name 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
1779*5113495bSYour Name 			if (peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba)
1780*5113495bSYour Name 				peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba(
1781*5113495bSYour Name 					peer->vdev->pdev->soc->ctrl_psoc,
1782*5113495bSYour Name 					peer->vdev->vdev_id,
1783*5113495bSYour Name 					peer->mac_addr.raw, tid,
1784*5113495bSYour Name 					rx_tid->delba_rcode,
1785*5113495bSYour Name 					CDP_DELBA_REASON_NONE);
1786*5113495bSYour Name 		}
1787*5113495bSYour Name 		goto end;
1788*5113495bSYour Name 	} else {
1789*5113495bSYour Name 		rx_tid->delba_tx_success_cnt++;
1790*5113495bSYour Name 		rx_tid->delba_tx_retry = 0;
1791*5113495bSYour Name 		rx_tid->delba_tx_status = 0;
1792*5113495bSYour Name 	}
1793*5113495bSYour Name 	if (rx_tid->ba_status == DP_RX_BA_ACTIVE) {
1794*5113495bSYour Name 		dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX, false);
1795*5113495bSYour Name 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
1796*5113495bSYour Name 		peer->active_ba_session_cnt--;
1797*5113495bSYour Name 	}
1798*5113495bSYour Name 	if (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
1799*5113495bSYour Name 		dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX, false);
1800*5113495bSYour Name 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
1801*5113495bSYour Name 	}
1802*5113495bSYour Name 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
1803*5113495bSYour Name 
1804*5113495bSYour Name end:
1805*5113495bSYour Name 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
1806*5113495bSYour Name 
1807*5113495bSYour Name 	return ret;
1808*5113495bSYour Name }
1809*5113495bSYour Name 
1810*5113495bSYour Name QDF_STATUS
dp_set_pn_check_wifi3(struct cdp_soc_t * soc_t,uint8_t vdev_id,uint8_t * peer_mac,enum cdp_sec_type sec_type,uint32_t * rx_pn)1811*5113495bSYour Name dp_set_pn_check_wifi3(struct cdp_soc_t *soc_t, uint8_t vdev_id,
1812*5113495bSYour Name 		      uint8_t *peer_mac, enum cdp_sec_type sec_type,
1813*5113495bSYour Name 		      uint32_t *rx_pn)
1814*5113495bSYour Name {
1815*5113495bSYour Name 	struct dp_pdev *pdev;
1816*5113495bSYour Name 	int i;
1817*5113495bSYour Name 	uint8_t pn_size;
1818*5113495bSYour Name 	struct hal_reo_cmd_params params;
1819*5113495bSYour Name 	struct dp_peer *peer = NULL;
1820*5113495bSYour Name 	struct dp_vdev *vdev = NULL;
1821*5113495bSYour Name 	struct dp_soc *soc = NULL;
1822*5113495bSYour Name 
1823*5113495bSYour Name 	peer = dp_peer_get_tgt_peer_hash_find((struct dp_soc *)soc_t,
1824*5113495bSYour Name 					      peer_mac, 0, vdev_id,
1825*5113495bSYour Name 					      DP_MOD_ID_CDP);
1826*5113495bSYour Name 
1827*5113495bSYour Name 	if (!peer) {
1828*5113495bSYour Name 		dp_peer_debug("%pK: Peer is NULL!", soc);
1829*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
1830*5113495bSYour Name 	}
1831*5113495bSYour Name 
1832*5113495bSYour Name 	vdev = peer->vdev;
1833*5113495bSYour Name 
1834*5113495bSYour Name 	if (!vdev) {
1835*5113495bSYour Name 		dp_peer_debug("%pK: VDEV is NULL!", soc);
1836*5113495bSYour Name 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
1837*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
1838*5113495bSYour Name 	}
1839*5113495bSYour Name 
1840*5113495bSYour Name 	pdev = vdev->pdev;
1841*5113495bSYour Name 	soc = pdev->soc;
1842*5113495bSYour Name 	qdf_mem_zero(&params, sizeof(params));
1843*5113495bSYour Name 
1844*5113495bSYour Name 	params.std.need_status = 1;
1845*5113495bSYour Name 	params.u.upd_queue_params.update_pn_valid = 1;
1846*5113495bSYour Name 	params.u.upd_queue_params.update_pn_size = 1;
1847*5113495bSYour Name 	params.u.upd_queue_params.update_pn = 1;
1848*5113495bSYour Name 	params.u.upd_queue_params.update_pn_check_needed = 1;
1849*5113495bSYour Name 	params.u.upd_queue_params.update_svld = 1;
1850*5113495bSYour Name 	params.u.upd_queue_params.svld = 0;
1851*5113495bSYour Name 
1852*5113495bSYour Name 	switch (sec_type) {
1853*5113495bSYour Name 	case cdp_sec_type_tkip_nomic:
1854*5113495bSYour Name 	case cdp_sec_type_aes_ccmp:
1855*5113495bSYour Name 	case cdp_sec_type_aes_ccmp_256:
1856*5113495bSYour Name 	case cdp_sec_type_aes_gcmp:
1857*5113495bSYour Name 	case cdp_sec_type_aes_gcmp_256:
1858*5113495bSYour Name 		params.u.upd_queue_params.pn_check_needed = 1;
1859*5113495bSYour Name 		params.u.upd_queue_params.pn_size = PN_SIZE_48;
1860*5113495bSYour Name 		pn_size = 48;
1861*5113495bSYour Name 		break;
1862*5113495bSYour Name 	case cdp_sec_type_wapi:
1863*5113495bSYour Name 		params.u.upd_queue_params.pn_check_needed = 1;
1864*5113495bSYour Name 		params.u.upd_queue_params.pn_size = PN_SIZE_128;
1865*5113495bSYour Name 		pn_size = 128;
1866*5113495bSYour Name 		if (vdev->opmode == wlan_op_mode_ap) {
1867*5113495bSYour Name 			params.u.upd_queue_params.pn_even = 1;
1868*5113495bSYour Name 			params.u.upd_queue_params.update_pn_even = 1;
1869*5113495bSYour Name 		} else {
1870*5113495bSYour Name 			params.u.upd_queue_params.pn_uneven = 1;
1871*5113495bSYour Name 			params.u.upd_queue_params.update_pn_uneven = 1;
1872*5113495bSYour Name 		}
1873*5113495bSYour Name 		break;
1874*5113495bSYour Name 	default:
1875*5113495bSYour Name 		params.u.upd_queue_params.pn_check_needed = 0;
1876*5113495bSYour Name 		pn_size = 0;
1877*5113495bSYour Name 		break;
1878*5113495bSYour Name 	}
1879*5113495bSYour Name 
1880*5113495bSYour Name 	for (i = 0; i < DP_MAX_TIDS; i++) {
1881*5113495bSYour Name 		struct dp_rx_tid *rx_tid = &peer->rx_tid[i];
1882*5113495bSYour Name 
1883*5113495bSYour Name 		qdf_spin_lock_bh(&rx_tid->tid_lock);
1884*5113495bSYour Name 		if (rx_tid->hw_qdesc_vaddr_unaligned) {
1885*5113495bSYour Name 			params.std.addr_lo =
1886*5113495bSYour Name 				rx_tid->hw_qdesc_paddr & 0xffffffff;
1887*5113495bSYour Name 			params.std.addr_hi =
1888*5113495bSYour Name 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1889*5113495bSYour Name 
1890*5113495bSYour Name 			if (pn_size) {
1891*5113495bSYour Name 				dp_peer_info("%pK: PN set for TID:%d pn:%x:%x:%x:%x",
1892*5113495bSYour Name 					     soc, i, rx_pn[3], rx_pn[2],
1893*5113495bSYour Name 					     rx_pn[1], rx_pn[0]);
1894*5113495bSYour Name 				params.u.upd_queue_params.update_pn_valid = 1;
1895*5113495bSYour Name 				params.u.upd_queue_params.pn_31_0 = rx_pn[0];
1896*5113495bSYour Name 				params.u.upd_queue_params.pn_63_32 = rx_pn[1];
1897*5113495bSYour Name 				params.u.upd_queue_params.pn_95_64 = rx_pn[2];
1898*5113495bSYour Name 				params.u.upd_queue_params.pn_127_96 = rx_pn[3];
1899*5113495bSYour Name 			}
1900*5113495bSYour Name 			rx_tid->pn_size = pn_size;
1901*5113495bSYour Name 			if (dp_reo_send_cmd(soc,
1902*5113495bSYour Name 					    CMD_UPDATE_RX_REO_QUEUE,
1903*5113495bSYour Name 					    &params, dp_rx_tid_update_cb,
1904*5113495bSYour Name 					    rx_tid)) {
1905*5113495bSYour Name 				dp_err_log("fail to send CMD_UPDATE_RX_REO_QUEUE"
1906*5113495bSYour Name 					   "tid %d desc %pK", rx_tid->tid,
1907*5113495bSYour Name 					   (void *)(rx_tid->hw_qdesc_paddr));
1908*5113495bSYour Name 				DP_STATS_INC(soc,
1909*5113495bSYour Name 					     rx.err.reo_cmd_send_fail, 1);
1910*5113495bSYour Name 			}
1911*5113495bSYour Name 		} else {
1912*5113495bSYour Name 			dp_peer_info("%pK: PN Check not setup for TID :%d ", soc, i);
1913*5113495bSYour Name 		}
1914*5113495bSYour Name 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
1915*5113495bSYour Name 	}
1916*5113495bSYour Name 
1917*5113495bSYour Name 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
1918*5113495bSYour Name 
1919*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
1920*5113495bSYour Name }
1921*5113495bSYour Name 
1922*5113495bSYour Name QDF_STATUS
dp_rx_delba_ind_handler(void * soc_handle,uint16_t peer_id,uint8_t tid,uint16_t win_sz)1923*5113495bSYour Name dp_rx_delba_ind_handler(void *soc_handle, uint16_t peer_id,
1924*5113495bSYour Name 			uint8_t tid, uint16_t win_sz)
1925*5113495bSYour Name {
1926*5113495bSYour Name 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
1927*5113495bSYour Name 	struct dp_peer *peer;
1928*5113495bSYour Name 	struct dp_rx_tid *rx_tid;
1929*5113495bSYour Name 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1930*5113495bSYour Name 
1931*5113495bSYour Name 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_HTT);
1932*5113495bSYour Name 
1933*5113495bSYour Name 	if (!peer) {
1934*5113495bSYour Name 		dp_peer_err("%pK: Couldn't find peer from ID %d",
1935*5113495bSYour Name 			    soc, peer_id);
1936*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
1937*5113495bSYour Name 	}
1938*5113495bSYour Name 
1939*5113495bSYour Name 	qdf_assert_always(tid < DP_MAX_TIDS);
1940*5113495bSYour Name 
1941*5113495bSYour Name 	rx_tid = &peer->rx_tid[tid];
1942*5113495bSYour Name 
1943*5113495bSYour Name 	if (rx_tid->hw_qdesc_vaddr_unaligned) {
1944*5113495bSYour Name 		if (!rx_tid->delba_tx_status) {
1945*5113495bSYour Name 			dp_peer_info("%pK: PEER_ID: %d TID: %d, BA win: %d ",
1946*5113495bSYour Name 				     soc, peer_id, tid, win_sz);
1947*5113495bSYour Name 
1948*5113495bSYour Name 			qdf_spin_lock_bh(&rx_tid->tid_lock);
1949*5113495bSYour Name 
1950*5113495bSYour Name 			rx_tid->delba_tx_status = 1;
1951*5113495bSYour Name 
1952*5113495bSYour Name 			rx_tid->rx_ba_win_size_override =
1953*5113495bSYour Name 			    qdf_min((uint16_t)63, win_sz);
1954*5113495bSYour Name 
1955*5113495bSYour Name 			rx_tid->delba_rcode =
1956*5113495bSYour Name 			    IEEE80211_REASON_QOS_SETUP_REQUIRED;
1957*5113495bSYour Name 
1958*5113495bSYour Name 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
1959*5113495bSYour Name 
1960*5113495bSYour Name 			if (soc->cdp_soc.ol_ops->send_delba)
1961*5113495bSYour Name 				soc->cdp_soc.ol_ops->send_delba(
1962*5113495bSYour Name 					peer->vdev->pdev->soc->ctrl_psoc,
1963*5113495bSYour Name 					peer->vdev->vdev_id,
1964*5113495bSYour Name 					peer->mac_addr.raw,
1965*5113495bSYour Name 					tid,
1966*5113495bSYour Name 					rx_tid->delba_rcode,
1967*5113495bSYour Name 					CDP_DELBA_REASON_NONE);
1968*5113495bSYour Name 		}
1969*5113495bSYour Name 	} else {
1970*5113495bSYour Name 		dp_peer_err("%pK: BA session is not setup for TID:%d ",
1971*5113495bSYour Name 			    soc, tid);
1972*5113495bSYour Name 		status = QDF_STATUS_E_FAILURE;
1973*5113495bSYour Name 	}
1974*5113495bSYour Name 
1975*5113495bSYour Name 	dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
1976*5113495bSYour Name 
1977*5113495bSYour Name 	return status;
1978*5113495bSYour Name }
1979*5113495bSYour Name 
1980*5113495bSYour Name #ifdef IPA_OFFLOAD
dp_peer_get_rxtid_stats_ipa(struct dp_peer * peer,dp_rxtid_stats_cmd_cb dp_stats_cmd_cb)1981*5113495bSYour Name int dp_peer_get_rxtid_stats_ipa(struct dp_peer *peer,
1982*5113495bSYour Name 				dp_rxtid_stats_cmd_cb dp_stats_cmd_cb)
1983*5113495bSYour Name {
1984*5113495bSYour Name 	struct dp_soc *soc = peer->vdev->pdev->soc;
1985*5113495bSYour Name 	struct hal_reo_cmd_params params;
1986*5113495bSYour Name 	int i;
1987*5113495bSYour Name 	int stats_cmd_sent_cnt = 0;
1988*5113495bSYour Name 	QDF_STATUS status;
1989*5113495bSYour Name 	uint16_t peer_id = peer->peer_id;
1990*5113495bSYour Name 	unsigned long comb_peer_id_tid;
1991*5113495bSYour Name 	struct dp_rx_tid *rx_tid;
1992*5113495bSYour Name 
1993*5113495bSYour Name 	if (!dp_stats_cmd_cb)
1994*5113495bSYour Name 		return stats_cmd_sent_cnt;
1995*5113495bSYour Name 
1996*5113495bSYour Name 	qdf_mem_zero(&params, sizeof(params));
1997*5113495bSYour Name 	for (i = 0; i < DP_MAX_TIDS; i++) {
1998*5113495bSYour Name 		if ((i >= CDP_DATA_TID_MAX) && (i != CDP_DATA_NON_QOS_TID))
1999*5113495bSYour Name 			continue;
2000*5113495bSYour Name 
2001*5113495bSYour Name 		rx_tid = &peer->rx_tid[i];
2002*5113495bSYour Name 		if (rx_tid->hw_qdesc_vaddr_unaligned) {
2003*5113495bSYour Name 			params.std.need_status = 1;
2004*5113495bSYour Name 			params.std.addr_lo =
2005*5113495bSYour Name 				rx_tid->hw_qdesc_paddr & 0xffffffff;
2006*5113495bSYour Name 			params.std.addr_hi =
2007*5113495bSYour Name 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2008*5113495bSYour Name 			params.u.stats_params.clear = 1;
2009*5113495bSYour Name 			comb_peer_id_tid = ((i << DP_PEER_REO_STATS_TID_SHIFT)
2010*5113495bSYour Name 					    | peer_id);
2011*5113495bSYour Name 			status = dp_reo_send_cmd(soc, CMD_GET_QUEUE_STATS,
2012*5113495bSYour Name 						 &params, dp_stats_cmd_cb,
2013*5113495bSYour Name 						 (void *)comb_peer_id_tid);
2014*5113495bSYour Name 			if (QDF_IS_STATUS_SUCCESS(status))
2015*5113495bSYour Name 				stats_cmd_sent_cnt++;
2016*5113495bSYour Name 
2017*5113495bSYour Name 			/* Flush REO descriptor from HW cache to update stats
2018*5113495bSYour Name 			 * in descriptor memory. This is to help debugging
2019*5113495bSYour Name 			 */
2020*5113495bSYour Name 			qdf_mem_zero(&params, sizeof(params));
2021*5113495bSYour Name 			params.std.need_status = 0;
2022*5113495bSYour Name 			params.std.addr_lo =
2023*5113495bSYour Name 				rx_tid->hw_qdesc_paddr & 0xffffffff;
2024*5113495bSYour Name 			params.std.addr_hi =
2025*5113495bSYour Name 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2026*5113495bSYour Name 			params.u.fl_cache_params.flush_no_inval = 1;
2027*5113495bSYour Name 			dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, &params, NULL,
2028*5113495bSYour Name 					NULL);
2029*5113495bSYour Name 		}
2030*5113495bSYour Name 	}
2031*5113495bSYour Name 
2032*5113495bSYour Name 	return stats_cmd_sent_cnt;
2033*5113495bSYour Name }
2034*5113495bSYour Name 
2035*5113495bSYour Name qdf_export_symbol(dp_peer_get_rxtid_stats_ipa);
2036*5113495bSYour Name 
2037*5113495bSYour Name #endif
dp_peer_rxtid_stats(struct dp_peer * peer,dp_rxtid_stats_cmd_cb dp_stats_cmd_cb,void * cb_ctxt)2038*5113495bSYour Name int dp_peer_rxtid_stats(struct dp_peer *peer,
2039*5113495bSYour Name 			dp_rxtid_stats_cmd_cb dp_stats_cmd_cb,
2040*5113495bSYour Name 			void *cb_ctxt)
2041*5113495bSYour Name {
2042*5113495bSYour Name 	struct dp_soc *soc = peer->vdev->pdev->soc;
2043*5113495bSYour Name 	struct hal_reo_cmd_params params;
2044*5113495bSYour Name 	int i;
2045*5113495bSYour Name 	int stats_cmd_sent_cnt = 0;
2046*5113495bSYour Name 	QDF_STATUS status;
2047*5113495bSYour Name 	struct dp_rx_tid *rx_tid;
2048*5113495bSYour Name 
2049*5113495bSYour Name 	if (!dp_stats_cmd_cb)
2050*5113495bSYour Name 		return stats_cmd_sent_cnt;
2051*5113495bSYour Name 
2052*5113495bSYour Name 	qdf_mem_zero(&params, sizeof(params));
2053*5113495bSYour Name 	for (i = 0; i < DP_MAX_TIDS; i++) {
2054*5113495bSYour Name 		if ((i >= CDP_DATA_TID_MAX) && (i != CDP_DATA_NON_QOS_TID))
2055*5113495bSYour Name 			continue;
2056*5113495bSYour Name 
2057*5113495bSYour Name 		rx_tid = &peer->rx_tid[i];
2058*5113495bSYour Name 		if (rx_tid->hw_qdesc_vaddr_unaligned) {
2059*5113495bSYour Name 			params.std.need_status = 1;
2060*5113495bSYour Name 			params.std.addr_lo =
2061*5113495bSYour Name 				rx_tid->hw_qdesc_paddr & 0xffffffff;
2062*5113495bSYour Name 			params.std.addr_hi =
2063*5113495bSYour Name 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2064*5113495bSYour Name 
2065*5113495bSYour Name 			if (cb_ctxt) {
2066*5113495bSYour Name 				status = dp_reo_send_cmd(
2067*5113495bSYour Name 						soc, CMD_GET_QUEUE_STATS,
2068*5113495bSYour Name 						&params, dp_stats_cmd_cb,
2069*5113495bSYour Name 						cb_ctxt);
2070*5113495bSYour Name 			} else {
2071*5113495bSYour Name 				status = dp_reo_send_cmd(
2072*5113495bSYour Name 						soc, CMD_GET_QUEUE_STATS,
2073*5113495bSYour Name 						&params, dp_stats_cmd_cb,
2074*5113495bSYour Name 						rx_tid);
2075*5113495bSYour Name 			}
2076*5113495bSYour Name 
2077*5113495bSYour Name 			if (QDF_IS_STATUS_SUCCESS(status))
2078*5113495bSYour Name 				stats_cmd_sent_cnt++;
2079*5113495bSYour Name 
2080*5113495bSYour Name 			/* Flush REO descriptor from HW cache to update stats
2081*5113495bSYour Name 			 * in descriptor memory. This is to help debugging
2082*5113495bSYour Name 			 */
2083*5113495bSYour Name 			qdf_mem_zero(&params, sizeof(params));
2084*5113495bSYour Name 			params.std.need_status = 0;
2085*5113495bSYour Name 			params.std.addr_lo =
2086*5113495bSYour Name 				rx_tid->hw_qdesc_paddr & 0xffffffff;
2087*5113495bSYour Name 			params.std.addr_hi =
2088*5113495bSYour Name 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2089*5113495bSYour Name 			params.u.fl_cache_params.flush_no_inval = 1;
2090*5113495bSYour Name 			dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, &params, NULL,
2091*5113495bSYour Name 					NULL);
2092*5113495bSYour Name 		}
2093*5113495bSYour Name 	}
2094*5113495bSYour Name 
2095*5113495bSYour Name 	return stats_cmd_sent_cnt;
2096*5113495bSYour Name }
2097*5113495bSYour Name 
dp_peer_rx_tids_create(struct dp_peer * peer)2098*5113495bSYour Name QDF_STATUS dp_peer_rx_tids_create(struct dp_peer *peer)
2099*5113495bSYour Name {
2100*5113495bSYour Name 	uint8_t i;
2101*5113495bSYour Name 
2102*5113495bSYour Name 	if (IS_MLO_DP_MLD_PEER(peer)) {
2103*5113495bSYour Name 		dp_peer_info("skip for mld peer");
2104*5113495bSYour Name 		return QDF_STATUS_SUCCESS;
2105*5113495bSYour Name 	}
2106*5113495bSYour Name 
2107*5113495bSYour Name 	if (peer->rx_tid) {
2108*5113495bSYour Name 		QDF_BUG(0);
2109*5113495bSYour Name 		dp_peer_err("peer rx_tid mem already exist");
2110*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
2111*5113495bSYour Name 	}
2112*5113495bSYour Name 
2113*5113495bSYour Name 	peer->rx_tid = qdf_mem_malloc(DP_MAX_TIDS *
2114*5113495bSYour Name 			sizeof(struct dp_rx_tid));
2115*5113495bSYour Name 
2116*5113495bSYour Name 	if (!peer->rx_tid) {
2117*5113495bSYour Name 		dp_err("fail to alloc tid for peer" QDF_MAC_ADDR_FMT,
2118*5113495bSYour Name 		       QDF_MAC_ADDR_REF(peer->mac_addr.raw));
2119*5113495bSYour Name 		return QDF_STATUS_E_NOMEM;
2120*5113495bSYour Name 	}
2121*5113495bSYour Name 
2122*5113495bSYour Name 	qdf_mem_zero(peer->rx_tid, DP_MAX_TIDS * sizeof(struct dp_rx_tid));
2123*5113495bSYour Name 	for (i = 0; i < DP_MAX_TIDS; i++)
2124*5113495bSYour Name 		qdf_spinlock_create(&peer->rx_tid[i].tid_lock);
2125*5113495bSYour Name 
2126*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
2127*5113495bSYour Name }
2128*5113495bSYour Name 
dp_peer_rx_tids_destroy(struct dp_peer * peer)2129*5113495bSYour Name void dp_peer_rx_tids_destroy(struct dp_peer *peer)
2130*5113495bSYour Name {
2131*5113495bSYour Name 	uint8_t i;
2132*5113495bSYour Name 
2133*5113495bSYour Name 	if (!IS_MLO_DP_LINK_PEER(peer)) {
2134*5113495bSYour Name 		for (i = 0; i < DP_MAX_TIDS; i++)
2135*5113495bSYour Name 			qdf_spinlock_destroy(&peer->rx_tid[i].tid_lock);
2136*5113495bSYour Name 
2137*5113495bSYour Name 		qdf_mem_free(peer->rx_tid);
2138*5113495bSYour Name 	}
2139*5113495bSYour Name 
2140*5113495bSYour Name 	peer->rx_tid = NULL;
2141*5113495bSYour Name }
2142*5113495bSYour Name 
2143*5113495bSYour Name #ifdef DUMP_REO_QUEUE_INFO_IN_DDR
dp_dump_rx_reo_queue_info(struct dp_soc * soc,void * cb_ctxt,union hal_reo_status * reo_status)2144*5113495bSYour Name void dp_dump_rx_reo_queue_info(
2145*5113495bSYour Name 	struct dp_soc *soc, void *cb_ctxt, union hal_reo_status *reo_status)
2146*5113495bSYour Name {
2147*5113495bSYour Name 	struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
2148*5113495bSYour Name 
2149*5113495bSYour Name 	if (!rx_tid)
2150*5113495bSYour Name 		return;
2151*5113495bSYour Name 
2152*5113495bSYour Name 	if (reo_status->fl_cache_status.header.status !=
2153*5113495bSYour Name 		HAL_REO_CMD_SUCCESS) {
2154*5113495bSYour Name 		dp_err_rl("Rx tid REO HW desc flush failed(%d)",
2155*5113495bSYour Name 			  reo_status->rx_queue_status.header.status);
2156*5113495bSYour Name 		return;
2157*5113495bSYour Name 	}
2158*5113495bSYour Name 	qdf_spin_lock_bh(&rx_tid->tid_lock);
2159*5113495bSYour Name 	hal_dump_rx_reo_queue_desc(rx_tid->hw_qdesc_vaddr_aligned);
2160*5113495bSYour Name 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
2161*5113495bSYour Name }
2162*5113495bSYour Name 
dp_send_cache_flush_for_rx_tid(struct dp_soc * soc,struct dp_peer * peer)2163*5113495bSYour Name void dp_send_cache_flush_for_rx_tid(
2164*5113495bSYour Name 	struct dp_soc *soc, struct dp_peer *peer)
2165*5113495bSYour Name {
2166*5113495bSYour Name 	int i;
2167*5113495bSYour Name 	struct dp_rx_tid *rx_tid;
2168*5113495bSYour Name 	struct hal_reo_cmd_params params;
2169*5113495bSYour Name 
2170*5113495bSYour Name 	if (!peer) {
2171*5113495bSYour Name 		dp_err_rl("Peer is NULL");
2172*5113495bSYour Name 		return;
2173*5113495bSYour Name 	}
2174*5113495bSYour Name 
2175*5113495bSYour Name 	for (i = 0; i < DP_MAX_TIDS; i++) {
2176*5113495bSYour Name 		rx_tid = &peer->rx_tid[i];
2177*5113495bSYour Name 		if (!rx_tid)
2178*5113495bSYour Name 			continue;
2179*5113495bSYour Name 		qdf_spin_lock_bh(&rx_tid->tid_lock);
2180*5113495bSYour Name 		if (rx_tid->hw_qdesc_vaddr_aligned) {
2181*5113495bSYour Name 			qdf_mem_zero(&params, sizeof(params));
2182*5113495bSYour Name 			params.std.need_status = 1;
2183*5113495bSYour Name 			params.std.addr_lo =
2184*5113495bSYour Name 				rx_tid->hw_qdesc_paddr & 0xffffffff;
2185*5113495bSYour Name 			params.std.addr_hi =
2186*5113495bSYour Name 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2187*5113495bSYour Name 			params.u.fl_cache_params.flush_no_inval = 0;
2188*5113495bSYour Name 
2189*5113495bSYour Name 			if (rx_tid->ba_win_size > 256)
2190*5113495bSYour Name 				params.u.fl_cache_params.flush_q_1k_desc = 1;
2191*5113495bSYour Name 			params.u.fl_cache_params.fwd_mpdus_in_queue = 1;
2192*5113495bSYour Name 
2193*5113495bSYour Name 			if (QDF_STATUS_SUCCESS !=
2194*5113495bSYour Name 				dp_reo_send_cmd(
2195*5113495bSYour Name 					soc, CMD_FLUSH_CACHE,
2196*5113495bSYour Name 					&params, dp_dump_rx_reo_queue_info,
2197*5113495bSYour Name 					(void *)rx_tid)) {
2198*5113495bSYour Name 				dp_err_rl("cache flush send failed tid %d",
2199*5113495bSYour Name 					  rx_tid->tid);
2200*5113495bSYour Name 				qdf_spin_unlock_bh(&rx_tid->tid_lock);
2201*5113495bSYour Name 				break;
2202*5113495bSYour Name 			}
2203*5113495bSYour Name 		}
2204*5113495bSYour Name 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2205*5113495bSYour Name 	}
2206*5113495bSYour Name }
2207*5113495bSYour Name 
dp_get_rx_reo_queue_info(struct cdp_soc_t * soc_hdl,uint8_t vdev_id)2208*5113495bSYour Name void dp_get_rx_reo_queue_info(
2209*5113495bSYour Name 	struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
2210*5113495bSYour Name {
2211*5113495bSYour Name 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
2212*5113495bSYour Name 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
2213*5113495bSYour Name 						     DP_MOD_ID_GENERIC_STATS);
2214*5113495bSYour Name 	struct dp_peer *peer = NULL;
2215*5113495bSYour Name 
2216*5113495bSYour Name 	if (!vdev) {
2217*5113495bSYour Name 		dp_err_rl("vdev is null for vdev_id: %u", vdev_id);
2218*5113495bSYour Name 		goto failed;
2219*5113495bSYour Name 	}
2220*5113495bSYour Name 
2221*5113495bSYour Name 	peer = dp_vdev_bss_peer_ref_n_get(soc, vdev, DP_MOD_ID_GENERIC_STATS);
2222*5113495bSYour Name 
2223*5113495bSYour Name 	if (!peer) {
2224*5113495bSYour Name 		dp_err_rl("Peer is NULL");
2225*5113495bSYour Name 		goto failed;
2226*5113495bSYour Name 	}
2227*5113495bSYour Name 	dp_send_cache_flush_for_rx_tid(soc, peer);
2228*5113495bSYour Name failed:
2229*5113495bSYour Name 	if (peer)
2230*5113495bSYour Name 		dp_peer_unref_delete(peer, DP_MOD_ID_GENERIC_STATS);
2231*5113495bSYour Name 	if (vdev)
2232*5113495bSYour Name 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_GENERIC_STATS);
2233*5113495bSYour Name }
2234*5113495bSYour Name #endif /* DUMP_REO_QUEUE_INFO_IN_DDR */
2235*5113495bSYour Name 
2236