xref: /wlan-driver/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_tid.c (revision 5113495b16420b49004c444715d2daae2066e7dc)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <qdf_types.h>
21 #include <qdf_lock.h>
22 #include <hal_hw_headers.h>
23 #include "dp_htt.h"
24 #include "dp_types.h"
25 #include "dp_internal.h"
26 #include "dp_peer.h"
27 #include "dp_rx_defrag.h"
28 #include "dp_rx.h"
29 #include <hal_api.h>
30 #include <hal_reo.h>
31 #include <cdp_txrx_handle.h>
32 #include <wlan_cfg.h>
33 #ifdef WIFI_MONITOR_SUPPORT
34 #include <dp_mon.h>
35 #endif
36 #ifdef FEATURE_WDS
37 #include "dp_txrx_wds.h"
38 #endif
39 #include <qdf_module.h>
40 #ifdef QCA_PEER_EXT_STATS
41 #include "dp_hist.h"
42 #endif
43 #ifdef BYPASS_OL_OPS
44 #include <target_if_dp.h>
45 #endif
46 
47 #ifdef REO_QDESC_HISTORY
48 #define REO_QDESC_HISTORY_SIZE 512
49 uint64_t reo_qdesc_history_idx;
50 struct reo_qdesc_event reo_qdesc_history[REO_QDESC_HISTORY_SIZE];
51 #endif
52 
53 #ifdef REO_QDESC_HISTORY
54 static inline void
dp_rx_reo_qdesc_history_add(struct reo_desc_list_node * free_desc,enum reo_qdesc_event_type type)55 dp_rx_reo_qdesc_history_add(struct reo_desc_list_node *free_desc,
56 			    enum reo_qdesc_event_type type)
57 {
58 	struct reo_qdesc_event *evt;
59 	struct dp_rx_tid *rx_tid = &free_desc->rx_tid;
60 	uint32_t idx;
61 
62 	reo_qdesc_history_idx++;
63 	idx = (reo_qdesc_history_idx & (REO_QDESC_HISTORY_SIZE - 1));
64 
65 	evt = &reo_qdesc_history[idx];
66 
67 	qdf_mem_copy(evt->peer_mac, free_desc->peer_mac, QDF_MAC_ADDR_SIZE);
68 	evt->qdesc_addr = rx_tid->hw_qdesc_paddr;
69 	evt->ts = qdf_get_log_timestamp();
70 	evt->type = type;
71 }
72 
73 #ifdef WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY
74 static inline void
dp_rx_reo_qdesc_deferred_evt_add(struct reo_desc_deferred_freelist_node * desc,enum reo_qdesc_event_type type)75 dp_rx_reo_qdesc_deferred_evt_add(struct reo_desc_deferred_freelist_node *desc,
76 				 enum reo_qdesc_event_type type)
77 {
78 	struct reo_qdesc_event *evt;
79 	uint32_t idx;
80 
81 	reo_qdesc_history_idx++;
82 	idx = (reo_qdesc_history_idx & (REO_QDESC_HISTORY_SIZE - 1));
83 
84 	evt = &reo_qdesc_history[idx];
85 
86 	qdf_mem_copy(evt->peer_mac, desc->peer_mac, QDF_MAC_ADDR_SIZE);
87 	evt->qdesc_addr = desc->hw_qdesc_paddr;
88 	evt->ts = qdf_get_log_timestamp();
89 	evt->type = type;
90 }
91 
92 #define DP_RX_REO_QDESC_DEFERRED_FREE_EVT(desc) \
93 	dp_rx_reo_qdesc_deferred_evt_add((desc), REO_QDESC_FREE)
94 
95 #define DP_RX_REO_QDESC_DEFERRED_GET_MAC(desc, freedesc) \
96 	qdf_mem_copy((desc)->peer_mac, (freedesc)->peer_mac, QDF_MAC_ADDR_SIZE)
97 #endif /* WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY */
98 
99 #define DP_RX_REO_QDESC_GET_MAC(freedesc, peer) \
100 	qdf_mem_copy((freedesc)->peer_mac, (peer)->mac_addr.raw, QDF_MAC_ADDR_SIZE)
101 
102 #define DP_RX_REO_QDESC_UPDATE_EVT(free_desc) \
103 	dp_rx_reo_qdesc_history_add((free_desc), REO_QDESC_UPDATE_CB)
104 
105 #define DP_RX_REO_QDESC_FREE_EVT(free_desc) \
106 	dp_rx_reo_qdesc_history_add((free_desc), REO_QDESC_FREE)
107 
108 #else
109 #define DP_RX_REO_QDESC_GET_MAC(freedesc, peer)
110 
111 #define DP_RX_REO_QDESC_UPDATE_EVT(free_desc)
112 
113 #define DP_RX_REO_QDESC_FREE_EVT(free_desc)
114 
115 #define DP_RX_REO_QDESC_DEFERRED_FREE_EVT(desc)
116 
117 #define DP_RX_REO_QDESC_DEFERRED_GET_MAC(desc, freedesc)
118 #endif
119 
120 static inline void
dp_set_ssn_valid_flag(struct hal_reo_cmd_params * params,uint8_t valid)121 dp_set_ssn_valid_flag(struct hal_reo_cmd_params *params,
122 		      uint8_t valid)
123 {
124 	params->u.upd_queue_params.update_svld = 1;
125 	params->u.upd_queue_params.svld = valid;
126 	dp_peer_debug("Setting SSN valid bit to %d",
127 		      valid);
128 }
129 
130 #ifdef IPA_OFFLOAD
dp_peer_update_tid_stats_from_reo(struct dp_soc * soc,void * cb_ctxt,union hal_reo_status * reo_status)131 void dp_peer_update_tid_stats_from_reo(struct dp_soc *soc, void *cb_ctxt,
132 				       union hal_reo_status *reo_status)
133 {
134 	struct dp_peer *peer = NULL;
135 	struct dp_rx_tid *rx_tid = NULL;
136 	unsigned long comb_peer_id_tid;
137 	struct hal_reo_queue_status *queue_status = &reo_status->queue_status;
138 	uint16_t tid;
139 	uint16_t peer_id;
140 
141 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
142 		dp_err("REO stats failure %d",
143 		       queue_status->header.status);
144 		return;
145 	}
146 	comb_peer_id_tid = (unsigned long)cb_ctxt;
147 	tid = DP_PEER_GET_REO_STATS_TID(comb_peer_id_tid);
148 	peer_id = DP_PEER_GET_REO_STATS_PEER_ID(comb_peer_id_tid);
149 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_GENERIC_STATS);
150 	if (!peer)
151 		return;
152 	rx_tid  = &peer->rx_tid[tid];
153 
154 	if (!rx_tid) {
155 		dp_peer_unref_delete(peer, DP_MOD_ID_GENERIC_STATS);
156 		return;
157 	}
158 
159 	rx_tid->rx_msdu_cnt.bytes += queue_status->total_cnt;
160 	rx_tid->rx_msdu_cnt.num += queue_status->msdu_frms_cnt;
161 	dp_peer_unref_delete(peer, DP_MOD_ID_GENERIC_STATS);
162 }
163 
164 qdf_export_symbol(dp_peer_update_tid_stats_from_reo);
165 #endif
166 
dp_rx_tid_stats_cb(struct dp_soc * soc,void * cb_ctxt,union hal_reo_status * reo_status)167 void dp_rx_tid_stats_cb(struct dp_soc *soc, void *cb_ctxt,
168 			union hal_reo_status *reo_status)
169 {
170 	struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
171 	struct hal_reo_queue_status *queue_status = &reo_status->queue_status;
172 
173 	if (queue_status->header.status == HAL_REO_CMD_DRAIN)
174 		return;
175 
176 	if (queue_status->header.status != HAL_REO_CMD_SUCCESS) {
177 		DP_PRINT_STATS("REO stats failure %d for TID %d",
178 			       queue_status->header.status, rx_tid->tid);
179 		return;
180 	}
181 
182 	DP_PRINT_STATS("REO queue stats (TID: %d):\n"
183 		       "ssn: %d\n"
184 		       "curr_idx  : %d\n"
185 		       "pn_31_0   : %08x\n"
186 		       "pn_63_32  : %08x\n"
187 		       "pn_95_64  : %08x\n"
188 		       "pn_127_96 : %08x\n"
189 		       "last_rx_enq_tstamp : %08x\n"
190 		       "last_rx_deq_tstamp : %08x\n"
191 		       "rx_bitmap_31_0     : %08x\n"
192 		       "rx_bitmap_63_32    : %08x\n"
193 		       "rx_bitmap_95_64    : %08x\n"
194 		       "rx_bitmap_127_96   : %08x\n"
195 		       "rx_bitmap_159_128  : %08x\n"
196 		       "rx_bitmap_191_160  : %08x\n"
197 		       "rx_bitmap_223_192  : %08x\n"
198 		       "rx_bitmap_255_224  : %08x\n",
199 		       rx_tid->tid,
200 		       queue_status->ssn, queue_status->curr_idx,
201 		       queue_status->pn_31_0, queue_status->pn_63_32,
202 		       queue_status->pn_95_64, queue_status->pn_127_96,
203 		       queue_status->last_rx_enq_tstamp,
204 		       queue_status->last_rx_deq_tstamp,
205 		       queue_status->rx_bitmap_31_0,
206 		       queue_status->rx_bitmap_63_32,
207 		       queue_status->rx_bitmap_95_64,
208 		       queue_status->rx_bitmap_127_96,
209 		       queue_status->rx_bitmap_159_128,
210 		       queue_status->rx_bitmap_191_160,
211 		       queue_status->rx_bitmap_223_192,
212 		       queue_status->rx_bitmap_255_224);
213 
214 	DP_PRINT_STATS(
215 		       "curr_mpdu_cnt      : %d\n"
216 		       "curr_msdu_cnt      : %d\n"
217 		       "fwd_timeout_cnt    : %d\n"
218 		       "fwd_bar_cnt        : %d\n"
219 		       "dup_cnt            : %d\n"
220 		       "frms_in_order_cnt  : %d\n"
221 		       "bar_rcvd_cnt       : %d\n"
222 		       "mpdu_frms_cnt      : %d\n"
223 		       "msdu_frms_cnt      : %d\n"
224 		       "total_byte_cnt     : %d\n"
225 		       "late_recv_mpdu_cnt : %d\n"
226 		       "win_jump_2k        : %d\n"
227 		       "hole_cnt           : %d\n",
228 		       queue_status->curr_mpdu_cnt,
229 		       queue_status->curr_msdu_cnt,
230 		       queue_status->fwd_timeout_cnt,
231 		       queue_status->fwd_bar_cnt,
232 		       queue_status->dup_cnt,
233 		       queue_status->frms_in_order_cnt,
234 		       queue_status->bar_rcvd_cnt,
235 		       queue_status->mpdu_frms_cnt,
236 		       queue_status->msdu_frms_cnt,
237 		       queue_status->total_cnt,
238 		       queue_status->late_recv_mpdu_cnt,
239 		       queue_status->win_jump_2k,
240 		       queue_status->hole_cnt);
241 
242 	DP_PRINT_STATS("Addba Req          : %d\n"
243 			"Addba Resp         : %d\n"
244 			"Addba Resp success : %d\n"
245 			"Addba Resp failed  : %d\n"
246 			"Delba Req received : %d\n"
247 			"Delba Tx success   : %d\n"
248 			"Delba Tx Fail      : %d\n"
249 			"BA window size     : %d\n"
250 			"Pn size            : %d\n",
251 			rx_tid->num_of_addba_req,
252 			rx_tid->num_of_addba_resp,
253 			rx_tid->num_addba_rsp_success,
254 			rx_tid->num_addba_rsp_failed,
255 			rx_tid->num_of_delba_req,
256 			rx_tid->delba_tx_success_cnt,
257 			rx_tid->delba_tx_fail_cnt,
258 			rx_tid->ba_win_size,
259 			rx_tid->pn_size);
260 }
261 
dp_rx_tid_update_cb(struct dp_soc * soc,void * cb_ctxt,union hal_reo_status * reo_status)262 static void dp_rx_tid_update_cb(struct dp_soc *soc, void *cb_ctxt,
263 				union hal_reo_status *reo_status)
264 {
265 	struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
266 
267 	if ((reo_status->rx_queue_status.header.status !=
268 		HAL_REO_CMD_SUCCESS) &&
269 		(reo_status->rx_queue_status.header.status !=
270 		HAL_REO_CMD_DRAIN)) {
271 		/* Should not happen normally. Just print error for now */
272 		dp_peer_err("%pK: Rx tid HW desc update failed(%d): tid %d",
273 			    soc, reo_status->rx_queue_status.header.status,
274 			    rx_tid->tid);
275 	}
276 }
277 
dp_get_peer_vdev_roaming_in_progress(struct dp_peer * peer)278 bool dp_get_peer_vdev_roaming_in_progress(struct dp_peer *peer)
279 {
280 	struct ol_if_ops *ol_ops = NULL;
281 	bool is_roaming = false;
282 	uint8_t vdev_id = -1;
283 	struct cdp_soc_t *soc;
284 
285 	if (!peer) {
286 		dp_peer_info("Peer is NULL. No roaming possible");
287 		return false;
288 	}
289 
290 	soc = dp_soc_to_cdp_soc_t(peer->vdev->pdev->soc);
291 	ol_ops = peer->vdev->pdev->soc->cdp_soc.ol_ops;
292 
293 	if (ol_ops && ol_ops->is_roam_inprogress) {
294 		dp_get_vdevid(soc, peer->mac_addr.raw, &vdev_id);
295 		is_roaming = ol_ops->is_roam_inprogress(vdev_id);
296 	}
297 
298 	dp_peer_info("peer: " QDF_MAC_ADDR_FMT ", vdev_id: %d, is_roaming: %d",
299 		     QDF_MAC_ADDR_REF(peer->mac_addr.raw), vdev_id, is_roaming);
300 
301 	return is_roaming;
302 }
303 
304 #ifdef WLAN_FEATURE_11BE_MLO
305 /**
306  * dp_rx_tid_setup_allow() - check if rx_tid and reo queue desc
307  *			     setup is necessary
308  * @peer: DP peer handle
309  *
310  * Return: true - allow, false - disallow
311  */
312 static inline
dp_rx_tid_setup_allow(struct dp_peer * peer)313 bool dp_rx_tid_setup_allow(struct dp_peer *peer)
314 {
315 	if (IS_MLO_DP_LINK_PEER(peer) && !peer->first_link)
316 		return false;
317 
318 	return true;
319 }
320 
321 /**
322  * dp_rx_tid_update_allow() - check if rx_tid update needed
323  * @peer: DP peer handle
324  *
325  * Return: true - allow, false - disallow
326  */
327 static inline
dp_rx_tid_update_allow(struct dp_peer * peer)328 bool dp_rx_tid_update_allow(struct dp_peer *peer)
329 {
330 	/* not as expected for MLO connection link peer */
331 	if (IS_MLO_DP_LINK_PEER(peer)) {
332 		QDF_BUG(0);
333 		return false;
334 	}
335 
336 	return true;
337 }
338 #else
339 static inline
dp_rx_tid_setup_allow(struct dp_peer * peer)340 bool dp_rx_tid_setup_allow(struct dp_peer *peer)
341 {
342 	return true;
343 }
344 
345 static inline
dp_rx_tid_update_allow(struct dp_peer * peer)346 bool dp_rx_tid_update_allow(struct dp_peer *peer)
347 {
348 	return true;
349 }
350 #endif
351 
352 QDF_STATUS
dp_rx_tid_update_wifi3(struct dp_peer * peer,int tid,uint32_t ba_window_size,uint32_t start_seq,bool bar_update)353 dp_rx_tid_update_wifi3(struct dp_peer *peer, int tid, uint32_t ba_window_size,
354 		       uint32_t start_seq, bool bar_update)
355 {
356 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
357 	struct dp_soc *soc = peer->vdev->pdev->soc;
358 	struct hal_reo_cmd_params params;
359 
360 	if (!dp_rx_tid_update_allow(peer)) {
361 		dp_peer_err("skip tid update for peer:" QDF_MAC_ADDR_FMT,
362 			    QDF_MAC_ADDR_REF(peer->mac_addr.raw));
363 		return QDF_STATUS_E_FAILURE;
364 	}
365 
366 	qdf_mem_zero(&params, sizeof(params));
367 
368 	params.std.need_status = 1;
369 	params.std.addr_lo = rx_tid->hw_qdesc_paddr & 0xffffffff;
370 	params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
371 	params.u.upd_queue_params.update_ba_window_size = 1;
372 	params.u.upd_queue_params.ba_window_size = ba_window_size;
373 
374 	if (start_seq < IEEE80211_SEQ_MAX) {
375 		params.u.upd_queue_params.update_ssn = 1;
376 		params.u.upd_queue_params.ssn = start_seq;
377 	} else {
378 	    dp_set_ssn_valid_flag(&params, 0);
379 	}
380 
381 	if (dp_reo_send_cmd(soc, CMD_UPDATE_RX_REO_QUEUE, &params,
382 			    dp_rx_tid_update_cb, rx_tid)) {
383 		dp_err_log("failed to send reo cmd CMD_UPDATE_RX_REO_QUEUE");
384 		DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
385 	}
386 
387 	rx_tid->ba_win_size = ba_window_size;
388 
389 	if (dp_get_peer_vdev_roaming_in_progress(peer))
390 		return QDF_STATUS_E_PERM;
391 
392 	if (!bar_update)
393 		dp_peer_rx_reorder_queue_setup(soc, peer,
394 					       BIT(tid), ba_window_size);
395 
396 	return QDF_STATUS_SUCCESS;
397 }
398 
399 #ifdef WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY
400 /**
401  * dp_reo_desc_defer_free_enqueue() - enqueue REO QDESC to be freed into
402  *                                    the deferred list
403  * @soc: Datapath soc handle
404  * @freedesc: REO DESC reference that needs to be freed
405  *
406  * Return: true if enqueued, else false
407  */
dp_reo_desc_defer_free_enqueue(struct dp_soc * soc,struct reo_desc_list_node * freedesc)408 static bool dp_reo_desc_defer_free_enqueue(struct dp_soc *soc,
409 					   struct reo_desc_list_node *freedesc)
410 {
411 	struct reo_desc_deferred_freelist_node *desc;
412 
413 	if (!qdf_atomic_read(&soc->cmn_init_done))
414 		return false;
415 
416 	desc = qdf_mem_malloc(sizeof(*desc));
417 	if (!desc)
418 		return false;
419 
420 	desc->hw_qdesc_paddr = freedesc->rx_tid.hw_qdesc_paddr;
421 	desc->hw_qdesc_alloc_size = freedesc->rx_tid.hw_qdesc_alloc_size;
422 	desc->hw_qdesc_vaddr_unaligned =
423 			freedesc->rx_tid.hw_qdesc_vaddr_unaligned;
424 	desc->free_ts = qdf_get_system_timestamp();
425 	DP_RX_REO_QDESC_DEFERRED_GET_MAC(desc, freedesc);
426 
427 	qdf_spin_lock_bh(&soc->reo_desc_deferred_freelist_lock);
428 	if (!soc->reo_desc_deferred_freelist_init) {
429 		qdf_mem_free(desc);
430 		qdf_spin_unlock_bh(&soc->reo_desc_deferred_freelist_lock);
431 		return false;
432 	}
433 	qdf_list_insert_back(&soc->reo_desc_deferred_freelist,
434 			     (qdf_list_node_t *)desc);
435 	qdf_spin_unlock_bh(&soc->reo_desc_deferred_freelist_lock);
436 
437 	return true;
438 }
439 
440 /**
441  * dp_reo_desc_defer_free() - free the REO QDESC in the deferred list
442  *                            based on time threshold
443  * @soc: Datapath soc handle
444  *
445  * Return: true if enqueued, else false
446  */
dp_reo_desc_defer_free(struct dp_soc * soc)447 static void dp_reo_desc_defer_free(struct dp_soc *soc)
448 {
449 	struct reo_desc_deferred_freelist_node *desc;
450 	unsigned long curr_ts = qdf_get_system_timestamp();
451 
452 	qdf_spin_lock_bh(&soc->reo_desc_deferred_freelist_lock);
453 
454 	while ((qdf_list_peek_front(&soc->reo_desc_deferred_freelist,
455 	       (qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) &&
456 	       (curr_ts > (desc->free_ts + REO_DESC_DEFERRED_FREE_MS))) {
457 		qdf_list_remove_front(&soc->reo_desc_deferred_freelist,
458 				      (qdf_list_node_t **)&desc);
459 
460 		DP_RX_REO_QDESC_DEFERRED_FREE_EVT(desc);
461 
462 		qdf_mem_unmap_nbytes_single(soc->osdev,
463 					    desc->hw_qdesc_paddr,
464 					    QDF_DMA_BIDIRECTIONAL,
465 					    desc->hw_qdesc_alloc_size);
466 		qdf_mem_free(desc->hw_qdesc_vaddr_unaligned);
467 		qdf_mem_free(desc);
468 
469 		curr_ts = qdf_get_system_timestamp();
470 	}
471 
472 	qdf_spin_unlock_bh(&soc->reo_desc_deferred_freelist_lock);
473 }
474 #else
475 static inline bool
dp_reo_desc_defer_free_enqueue(struct dp_soc * soc,struct reo_desc_list_node * freedesc)476 dp_reo_desc_defer_free_enqueue(struct dp_soc *soc,
477 			       struct reo_desc_list_node *freedesc)
478 {
479 	return false;
480 }
481 
dp_reo_desc_defer_free(struct dp_soc * soc)482 static void dp_reo_desc_defer_free(struct dp_soc *soc)
483 {
484 }
485 #endif /* !WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY */
486 
check_free_list_for_invalid_flush(struct dp_soc * soc)487 void check_free_list_for_invalid_flush(struct dp_soc *soc)
488 {
489 	uint32_t i;
490 	uint32_t *addr_deref_val;
491 	unsigned long curr_ts = qdf_get_system_timestamp();
492 	uint32_t max_list_size;
493 
494 	max_list_size = soc->wlan_cfg_ctx->qref_control_size;
495 
496 	if (max_list_size == 0)
497 		return;
498 
499 	for (i = 0; i < soc->free_addr_list_idx; i++) {
500 		addr_deref_val = (uint32_t *)
501 			    soc->list_qdesc_addr_free[i].hw_qdesc_vaddr_unalign;
502 
503 		if (*addr_deref_val == 0xDDBEEF84 ||
504 		    *addr_deref_val == 0xADBEEF84 ||
505 		    *addr_deref_val == 0xBDBEEF84 ||
506 		    *addr_deref_val == 0xCDBEEF84) {
507 			if (soc->list_qdesc_addr_free[i].ts_hw_flush_back == 0)
508 				soc->list_qdesc_addr_free[i].ts_hw_flush_back =
509 									curr_ts;
510 		}
511 	}
512 }
513 
514 /**
515  * dp_reo_desc_free() - Callback free reo descriptor memory after
516  * HW cache flush
517  *
518  * @soc: DP SOC handle
519  * @cb_ctxt: Callback context
520  * @reo_status: REO command status
521  */
dp_reo_desc_free(struct dp_soc * soc,void * cb_ctxt,union hal_reo_status * reo_status)522 static void dp_reo_desc_free(struct dp_soc *soc, void *cb_ctxt,
523 			     union hal_reo_status *reo_status)
524 {
525 	struct reo_desc_list_node *freedesc =
526 		(struct reo_desc_list_node *)cb_ctxt;
527 	struct dp_rx_tid *rx_tid = &freedesc->rx_tid;
528 	unsigned long curr_ts = qdf_get_system_timestamp();
529 
530 	if ((reo_status->fl_cache_status.header.status !=
531 		HAL_REO_CMD_SUCCESS) &&
532 		(reo_status->fl_cache_status.header.status !=
533 		HAL_REO_CMD_DRAIN)) {
534 		dp_peer_err("%pK: Rx tid HW desc flush failed(%d): tid %d",
535 			    soc, reo_status->rx_queue_status.header.status,
536 			    freedesc->rx_tid.tid);
537 	}
538 	dp_peer_info("%pK: %lu hw_qdesc_paddr: %pK, tid:%d", soc,
539 		     curr_ts, (void *)(rx_tid->hw_qdesc_paddr),
540 		     rx_tid->tid);
541 
542 	/* REO desc is enqueued to be freed at a later point
543 	 * in time, just free the freedesc alone and return
544 	 */
545 	if (dp_reo_desc_defer_free_enqueue(soc, freedesc))
546 		goto out;
547 
548 	DP_RX_REO_QDESC_FREE_EVT(freedesc);
549 	add_entry_free_list(soc, rx_tid);
550 
551 	hal_reo_shared_qaddr_cache_clear(soc->hal_soc);
552 	qdf_mem_unmap_nbytes_single(soc->osdev,
553 				    rx_tid->hw_qdesc_paddr,
554 				    QDF_DMA_BIDIRECTIONAL,
555 				    rx_tid->hw_qdesc_alloc_size);
556 	check_free_list_for_invalid_flush(soc);
557 
558 	*(uint32_t *)rx_tid->hw_qdesc_vaddr_unaligned = 0;
559 	qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
560 out:
561 	qdf_mem_free(freedesc);
562 }
563 
564 #if defined(CONFIG_WIFI_EMULATION_WIFI_3_0) && defined(BUILD_X86)
565 /* Hawkeye emulation requires bus address to be >= 0x50000000 */
dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)566 static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)
567 {
568 	if (dma_addr < 0x50000000)
569 		return QDF_STATUS_E_FAILURE;
570 	else
571 		return QDF_STATUS_SUCCESS;
572 }
573 #else
dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)574 static inline int dp_reo_desc_addr_chk(qdf_dma_addr_t dma_addr)
575 {
576 	return QDF_STATUS_SUCCESS;
577 }
578 #endif
579 
580 static inline void
dp_rx_tid_setup_error_process(uint32_t tid_bitmap,struct dp_peer * peer)581 dp_rx_tid_setup_error_process(uint32_t tid_bitmap, struct dp_peer *peer)
582 {
583 	struct dp_rx_tid *rx_tid;
584 	int tid;
585 	struct dp_soc *soc = peer->vdev->pdev->soc;
586 
587 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
588 		if (!(BIT(tid) & tid_bitmap))
589 			continue;
590 
591 		rx_tid = &peer->rx_tid[tid];
592 		if (!rx_tid->hw_qdesc_vaddr_unaligned)
593 			continue;
594 
595 		if (dp_reo_desc_addr_chk(rx_tid->hw_qdesc_paddr) ==
596 		    QDF_STATUS_SUCCESS)
597 			qdf_mem_unmap_nbytes_single(
598 				soc->osdev,
599 				rx_tid->hw_qdesc_paddr,
600 				QDF_DMA_BIDIRECTIONAL,
601 				rx_tid->hw_qdesc_alloc_size);
602 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
603 		rx_tid->hw_qdesc_vaddr_unaligned = NULL;
604 		rx_tid->hw_qdesc_paddr = 0;
605 	}
606 }
607 
608 static QDF_STATUS
dp_single_rx_tid_setup(struct dp_peer * peer,int tid,uint32_t ba_window_size,uint32_t start_seq)609 dp_single_rx_tid_setup(struct dp_peer *peer, int tid,
610 		       uint32_t ba_window_size, uint32_t start_seq)
611 {
612 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
613 	struct dp_vdev *vdev = peer->vdev;
614 	struct dp_soc *soc = vdev->pdev->soc;
615 	uint32_t hw_qdesc_size;
616 	uint32_t hw_qdesc_align;
617 	int hal_pn_type;
618 	void *hw_qdesc_vaddr;
619 	uint32_t alloc_tries = 0, ret;
620 	QDF_STATUS status = QDF_STATUS_SUCCESS;
621 	struct dp_txrx_peer *txrx_peer;
622 
623 	rx_tid->delba_tx_status = 0;
624 	rx_tid->ppdu_id_2k = 0;
625 	rx_tid->num_of_addba_req = 0;
626 	rx_tid->num_of_delba_req = 0;
627 	rx_tid->num_of_addba_resp = 0;
628 	rx_tid->num_addba_rsp_failed = 0;
629 	rx_tid->num_addba_rsp_success = 0;
630 	rx_tid->delba_tx_success_cnt = 0;
631 	rx_tid->delba_tx_fail_cnt = 0;
632 	rx_tid->statuscode = 0;
633 
634 	/* TODO: Allocating HW queue descriptors based on max BA window size
635 	 * for all QOS TIDs so that same descriptor can be used later when
636 	 * ADDBA request is received. This should be changed to allocate HW
637 	 * queue descriptors based on BA window size being negotiated (0 for
638 	 * non BA cases), and reallocate when BA window size changes and also
639 	 * send WMI message to FW to change the REO queue descriptor in Rx
640 	 * peer entry as part of dp_rx_tid_update.
641 	 */
642 	hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc,
643 					       ba_window_size, tid);
644 
645 	hw_qdesc_align = hal_get_reo_qdesc_align(soc->hal_soc);
646 	/* To avoid unnecessary extra allocation for alignment, try allocating
647 	 * exact size and see if we already have aligned address.
648 	 */
649 	rx_tid->hw_qdesc_alloc_size = hw_qdesc_size;
650 
651 try_desc_alloc:
652 	rx_tid->hw_qdesc_vaddr_unaligned =
653 		qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size);
654 
655 	if (!rx_tid->hw_qdesc_vaddr_unaligned) {
656 		dp_peer_err("%pK: Rx tid HW desc alloc failed: tid %d",
657 			    soc, tid);
658 		return QDF_STATUS_E_NOMEM;
659 	}
660 
661 	if ((unsigned long)(rx_tid->hw_qdesc_vaddr_unaligned) %
662 		hw_qdesc_align) {
663 		/* Address allocated above is not aligned. Allocate extra
664 		 * memory for alignment
665 		 */
666 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
667 		rx_tid->hw_qdesc_vaddr_unaligned =
668 			qdf_mem_malloc(rx_tid->hw_qdesc_alloc_size +
669 					hw_qdesc_align - 1);
670 
671 		if (!rx_tid->hw_qdesc_vaddr_unaligned) {
672 			dp_peer_err("%pK: Rx tid HW desc alloc failed: tid %d",
673 				    soc, tid);
674 			return QDF_STATUS_E_NOMEM;
675 		}
676 
677 		hw_qdesc_vaddr = (void *)qdf_align((unsigned long)
678 			rx_tid->hw_qdesc_vaddr_unaligned,
679 			hw_qdesc_align);
680 
681 		dp_peer_debug("%pK: Total Size %d Aligned Addr %pK",
682 			      soc, rx_tid->hw_qdesc_alloc_size,
683 			      hw_qdesc_vaddr);
684 
685 	} else {
686 		hw_qdesc_vaddr = rx_tid->hw_qdesc_vaddr_unaligned;
687 	}
688 	rx_tid->hw_qdesc_vaddr_aligned = hw_qdesc_vaddr;
689 
690 	txrx_peer = dp_get_txrx_peer(peer);
691 
692 	/* TODO: Ensure that sec_type is set before ADDBA is received.
693 	 * Currently this is set based on htt indication
694 	 * HTT_T2H_MSG_TYPE_SEC_IND from target
695 	 */
696 	switch (txrx_peer->security[dp_sec_ucast].sec_type) {
697 	case cdp_sec_type_tkip_nomic:
698 	case cdp_sec_type_aes_ccmp:
699 	case cdp_sec_type_aes_ccmp_256:
700 	case cdp_sec_type_aes_gcmp:
701 	case cdp_sec_type_aes_gcmp_256:
702 		hal_pn_type = HAL_PN_WPA;
703 		break;
704 	case cdp_sec_type_wapi:
705 		if (vdev->opmode == wlan_op_mode_ap)
706 			hal_pn_type = HAL_PN_WAPI_EVEN;
707 		else
708 			hal_pn_type = HAL_PN_WAPI_UNEVEN;
709 		break;
710 	default:
711 		hal_pn_type = HAL_PN_NONE;
712 		break;
713 	}
714 
715 	hal_reo_qdesc_setup(soc->hal_soc, tid, ba_window_size, start_seq,
716 		hw_qdesc_vaddr, rx_tid->hw_qdesc_paddr, hal_pn_type,
717 		vdev->vdev_stats_id);
718 
719 	ret = qdf_mem_map_nbytes_single(soc->osdev, hw_qdesc_vaddr,
720 					QDF_DMA_BIDIRECTIONAL,
721 					rx_tid->hw_qdesc_alloc_size,
722 					&rx_tid->hw_qdesc_paddr);
723 
724 	if (!ret)
725 		add_entry_alloc_list(soc, rx_tid, peer, hw_qdesc_vaddr);
726 
727 	if (dp_reo_desc_addr_chk(rx_tid->hw_qdesc_paddr) !=
728 			QDF_STATUS_SUCCESS || ret) {
729 		if (alloc_tries++ < 10) {
730 			qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
731 			rx_tid->hw_qdesc_vaddr_unaligned = NULL;
732 			goto try_desc_alloc;
733 		} else {
734 			dp_peer_err("%pK: Rx tid %d desc alloc fail (lowmem)",
735 				    soc, tid);
736 			status = QDF_STATUS_E_NOMEM;
737 			goto error;
738 		}
739 	}
740 
741 	return QDF_STATUS_SUCCESS;
742 
743 error:
744 	dp_rx_tid_setup_error_process(1 << tid, peer);
745 
746 	return status;
747 }
748 
dp_rx_tid_setup_wifi3(struct dp_peer * peer,uint32_t tid_bitmap,uint32_t ba_window_size,uint32_t start_seq)749 QDF_STATUS dp_rx_tid_setup_wifi3(struct dp_peer *peer,
750 				 uint32_t tid_bitmap,
751 				 uint32_t ba_window_size,
752 				 uint32_t start_seq)
753 {
754 	QDF_STATUS status;
755 	int tid;
756 	struct dp_rx_tid *rx_tid;
757 	struct dp_vdev *vdev = peer->vdev;
758 	struct dp_soc *soc = vdev->pdev->soc;
759 	uint8_t setup_fail_cnt = 0;
760 
761 	if (!qdf_atomic_read(&peer->is_default_route_set))
762 		return QDF_STATUS_E_FAILURE;
763 
764 	if (!dp_rx_tid_setup_allow(peer)) {
765 		dp_peer_info("skip rx tid setup for peer" QDF_MAC_ADDR_FMT,
766 			     QDF_MAC_ADDR_REF(peer->mac_addr.raw));
767 		goto send_wmi_reo_cmd;
768 	}
769 
770 	dp_peer_info("tid_bitmap 0x%x, ba_window_size %d, start_seq %d",
771 		     tid_bitmap, ba_window_size, start_seq);
772 
773 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
774 		if (!(BIT(tid) & tid_bitmap))
775 			continue;
776 
777 		rx_tid = &peer->rx_tid[tid];
778 		rx_tid->ba_win_size = ba_window_size;
779 		if (rx_tid->hw_qdesc_vaddr_unaligned) {
780 			status = dp_rx_tid_update_wifi3(peer, tid,
781 					ba_window_size, start_seq, false);
782 			if (QDF_IS_STATUS_ERROR(status)) {
783 				/* Not continue to update other tid(s) and
784 				 * return even if they have not been set up.
785 				 */
786 				dp_peer_err("Update tid %d fail", tid);
787 				return status;
788 			}
789 
790 			dp_peer_info("Update tid %d", tid);
791 			tid_bitmap &= ~BIT(tid);
792 			continue;
793 		}
794 
795 		status = dp_single_rx_tid_setup(peer, tid,
796 						ba_window_size, start_seq);
797 		if (QDF_IS_STATUS_ERROR(status)) {
798 			dp_peer_err("Set up tid %d fail, status=%d",
799 				    tid, status);
800 			tid_bitmap &= ~BIT(tid);
801 			setup_fail_cnt++;
802 			continue;
803 		}
804 	}
805 
806 	/* tid_bitmap == 0 means there is no tid(s) for further setup */
807 	if (!tid_bitmap) {
808 		dp_peer_info("tid_bitmap=0, no tid setup, setup_fail_cnt %d",
809 			     setup_fail_cnt);
810 
811 		/*  If setup_fail_cnt==0, all tid(s) has been
812 		 * successfully updated, so we return success.
813 		 */
814 		if (!setup_fail_cnt)
815 			return QDF_STATUS_SUCCESS;
816 		else
817 			return QDF_STATUS_E_FAILURE;
818 	}
819 
820 send_wmi_reo_cmd:
821 	if (dp_get_peer_vdev_roaming_in_progress(peer)) {
822 		status = QDF_STATUS_E_PERM;
823 		goto error;
824 	}
825 
826 	dp_peer_info("peer %pK, tids 0x%x, multi_reo %d, s_seq %d, w_size %d",
827 		      peer, tid_bitmap,
828 		      soc->features.multi_rx_reorder_q_setup_support,
829 		      start_seq, ba_window_size);
830 
831 	status = dp_peer_rx_reorder_queue_setup(soc, peer,
832 						tid_bitmap,
833 						ba_window_size);
834 	if (QDF_IS_STATUS_SUCCESS(status))
835 		return status;
836 
837 error:
838 	dp_rx_tid_setup_error_process(tid_bitmap, peer);
839 
840 	return status;
841 }
842 
843 #ifdef DP_UMAC_HW_RESET_SUPPORT
844 static
dp_peer_rst_tids(struct dp_soc * soc,struct dp_peer * peer,void * arg)845 void dp_peer_rst_tids(struct dp_soc *soc, struct dp_peer *peer, void *arg)
846 {
847 	int tid;
848 
849 	for (tid = 0; tid < (DP_MAX_TIDS - 1); tid++) {
850 		struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
851 		void *vaddr = rx_tid->hw_qdesc_vaddr_aligned;
852 
853 		if (vaddr)
854 			dp_reset_rx_reo_tid_queue(soc, vaddr,
855 						  rx_tid->hw_qdesc_alloc_size);
856 	}
857 }
858 
dp_reset_tid_q_setup(struct dp_soc * soc)859 void dp_reset_tid_q_setup(struct dp_soc *soc)
860 {
861 	dp_soc_iterate_peer(soc, dp_peer_rst_tids, NULL, DP_MOD_ID_UMAC_RESET);
862 }
863 #endif
864 #ifdef REO_DESC_DEFER_FREE
865 /**
866  * dp_reo_desc_clean_up() - If cmd to flush base desc fails add
867  * desc back to freelist and defer the deletion
868  *
869  * @soc: DP SOC handle
870  * @desc: Base descriptor to be freed
871  * @reo_status: REO command status
872  */
dp_reo_desc_clean_up(struct dp_soc * soc,struct reo_desc_list_node * desc,union hal_reo_status * reo_status)873 static void dp_reo_desc_clean_up(struct dp_soc *soc,
874 				 struct reo_desc_list_node *desc,
875 				 union hal_reo_status *reo_status)
876 {
877 	desc->free_ts = qdf_get_system_timestamp();
878 	DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
879 	qdf_list_insert_back(&soc->reo_desc_freelist,
880 			     (qdf_list_node_t *)desc);
881 }
882 
883 /**
884  * dp_reo_limit_clean_batch_sz() - Limit number REO CMD queued to cmd
885  * ring in avoid of REO hang
886  *
887  * @list_size: REO desc list size to be cleaned
888  */
dp_reo_limit_clean_batch_sz(uint32_t * list_size)889 static inline void dp_reo_limit_clean_batch_sz(uint32_t *list_size)
890 {
891 	unsigned long curr_ts = qdf_get_system_timestamp();
892 
893 	if ((*list_size) > REO_DESC_FREELIST_SIZE) {
894 		dp_err_log("%lu:freedesc number %d in freelist",
895 			   curr_ts, *list_size);
896 		/* limit the batch queue size */
897 		*list_size = REO_DESC_FREELIST_SIZE;
898 	}
899 }
900 #else
901 /**
902  * dp_reo_desc_clean_up() - If send cmd to REO inorder to flush
903  * cache fails free the base REO desc anyway
904  *
905  * @soc: DP SOC handle
906  * @desc: Base descriptor to be freed
907  * @reo_status: REO command status
908  */
dp_reo_desc_clean_up(struct dp_soc * soc,struct reo_desc_list_node * desc,union hal_reo_status * reo_status)909 static void dp_reo_desc_clean_up(struct dp_soc *soc,
910 				 struct reo_desc_list_node *desc,
911 				 union hal_reo_status *reo_status)
912 {
913 	if (reo_status) {
914 		qdf_mem_zero(reo_status, sizeof(*reo_status));
915 		reo_status->fl_cache_status.header.status = 0;
916 		dp_reo_desc_free(soc, (void *)desc, reo_status);
917 	}
918 }
919 
920 /**
921  * dp_reo_limit_clean_batch_sz() - Limit number REO CMD queued to cmd
922  * ring in avoid of REO hang
923  *
924  * @list_size: REO desc list size to be cleaned
925  */
dp_reo_limit_clean_batch_sz(uint32_t * list_size)926 static inline void dp_reo_limit_clean_batch_sz(uint32_t *list_size)
927 {
928 }
929 #endif
930 
931 /**
932  * dp_resend_update_reo_cmd() - Resend the UPDATE_REO_QUEUE
933  * cmd and re-insert desc into free list if send fails.
934  *
935  * @soc: DP SOC handle
936  * @desc: desc with resend update cmd flag set
937  * @rx_tid: Desc RX tid associated with update cmd for resetting
938  * valid field to 0 in h/w
939  *
940  * Return: QDF status
941  */
942 static QDF_STATUS
dp_resend_update_reo_cmd(struct dp_soc * soc,struct reo_desc_list_node * desc,struct dp_rx_tid * rx_tid)943 dp_resend_update_reo_cmd(struct dp_soc *soc,
944 			 struct reo_desc_list_node *desc,
945 			 struct dp_rx_tid *rx_tid)
946 {
947 	struct hal_reo_cmd_params params;
948 
949 	qdf_mem_zero(&params, sizeof(params));
950 	params.std.need_status = 1;
951 	params.std.addr_lo =
952 		rx_tid->hw_qdesc_paddr & 0xffffffff;
953 	params.std.addr_hi =
954 		(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
955 	params.u.upd_queue_params.update_vld = 1;
956 	params.u.upd_queue_params.vld = 0;
957 	desc->resend_update_reo_cmd = false;
958 	/*
959 	 * If the cmd send fails then set resend_update_reo_cmd flag
960 	 * and insert the desc at the end of the free list to retry.
961 	 */
962 	if (dp_reo_send_cmd(soc,
963 			    CMD_UPDATE_RX_REO_QUEUE,
964 			    &params,
965 			    dp_rx_tid_delete_cb,
966 			    (void *)desc)
967 	    != QDF_STATUS_SUCCESS) {
968 		desc->resend_update_reo_cmd = true;
969 		desc->free_ts = qdf_get_system_timestamp();
970 		qdf_list_insert_back(&soc->reo_desc_freelist,
971 				     (qdf_list_node_t *)desc);
972 		dp_err_log("failed to send reo cmd CMD_UPDATE_RX_REO_QUEUE");
973 		DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
974 		return QDF_STATUS_E_FAILURE;
975 	}
976 
977 	return QDF_STATUS_SUCCESS;
978 }
979 
dp_rx_tid_delete_cb(struct dp_soc * soc,void * cb_ctxt,union hal_reo_status * reo_status)980 void dp_rx_tid_delete_cb(struct dp_soc *soc, void *cb_ctxt,
981 			 union hal_reo_status *reo_status)
982 {
983 	struct reo_desc_list_node *freedesc =
984 		(struct reo_desc_list_node *)cb_ctxt;
985 	uint32_t list_size;
986 	struct reo_desc_list_node *desc = NULL;
987 	unsigned long curr_ts = qdf_get_system_timestamp();
988 	uint32_t desc_size, tot_desc_size;
989 	struct hal_reo_cmd_params params;
990 	bool flush_failure = false;
991 
992 	DP_RX_REO_QDESC_UPDATE_EVT(freedesc);
993 
994 	if (reo_status->rx_queue_status.header.status == HAL_REO_CMD_DRAIN) {
995 		qdf_mem_zero(reo_status, sizeof(*reo_status));
996 		reo_status->fl_cache_status.header.status = HAL_REO_CMD_DRAIN;
997 		dp_reo_desc_free(soc, (void *)freedesc, reo_status);
998 		DP_STATS_INC(soc, rx.err.reo_cmd_send_drain, 1);
999 		return;
1000 	} else if (reo_status->rx_queue_status.header.status !=
1001 		HAL_REO_CMD_SUCCESS) {
1002 		/* Should not happen normally. Just print error for now */
1003 		dp_info_rl("Rx tid HW desc deletion failed(%d): tid %d",
1004 			   reo_status->rx_queue_status.header.status,
1005 			   freedesc->rx_tid.tid);
1006 	}
1007 
1008 	dp_peer_info("%pK: rx_tid: %d status: %d",
1009 		     soc, freedesc->rx_tid.tid,
1010 		     reo_status->rx_queue_status.header.status);
1011 
1012 	qdf_spin_lock_bh(&soc->reo_desc_freelist_lock);
1013 	freedesc->free_ts = curr_ts;
1014 	qdf_list_insert_back_size(&soc->reo_desc_freelist,
1015 				  (qdf_list_node_t *)freedesc, &list_size);
1016 
1017 	/* MCL path add the desc back to reo_desc_freelist when REO FLUSH
1018 	 * failed. it may cause the number of REO queue pending  in free
1019 	 * list is even larger than REO_CMD_RING max size and lead REO CMD
1020 	 * flood then cause REO HW in an unexpected condition. So it's
1021 	 * needed to limit the number REO cmds in a batch operation.
1022 	 */
1023 	dp_reo_limit_clean_batch_sz(&list_size);
1024 
1025 	while ((qdf_list_peek_front(&soc->reo_desc_freelist,
1026 		(qdf_list_node_t **)&desc) == QDF_STATUS_SUCCESS) &&
1027 		((list_size >= REO_DESC_FREELIST_SIZE) ||
1028 		(curr_ts > (desc->free_ts + REO_DESC_FREE_DEFER_MS)) ||
1029 		(desc->resend_update_reo_cmd && list_size))) {
1030 		struct dp_rx_tid *rx_tid;
1031 
1032 		qdf_list_remove_front(&soc->reo_desc_freelist,
1033 				      (qdf_list_node_t **)&desc);
1034 		list_size--;
1035 		rx_tid = &desc->rx_tid;
1036 
1037 		/* First process descs with resend_update_reo_cmd set */
1038 		if (desc->resend_update_reo_cmd) {
1039 			if (dp_resend_update_reo_cmd(soc, desc, rx_tid) !=
1040 			    QDF_STATUS_SUCCESS)
1041 				break;
1042 			else
1043 				continue;
1044 		}
1045 
1046 		/* Flush and invalidate REO descriptor from HW cache: Base and
1047 		 * extension descriptors should be flushed separately
1048 		 */
1049 		if (desc->pending_ext_desc_size)
1050 			tot_desc_size = desc->pending_ext_desc_size;
1051 		else
1052 			tot_desc_size = rx_tid->hw_qdesc_alloc_size;
1053 		/* Get base descriptor size by passing non-qos TID */
1054 		desc_size = hal_get_reo_qdesc_size(soc->hal_soc, 0,
1055 						   DP_NON_QOS_TID);
1056 
1057 		/* Flush reo extension descriptors */
1058 		while ((tot_desc_size -= desc_size) > 0) {
1059 			qdf_mem_zero(&params, sizeof(params));
1060 			params.std.addr_lo =
1061 				((uint64_t)(rx_tid->hw_qdesc_paddr) +
1062 				tot_desc_size) & 0xffffffff;
1063 			params.std.addr_hi =
1064 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1065 
1066 			if (QDF_STATUS_SUCCESS !=
1067 			    dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, &params,
1068 					    NULL, NULL)) {
1069 				dp_info_rl("fail to send CMD_CACHE_FLUSH:"
1070 					   "tid %d desc %pK", rx_tid->tid,
1071 					   (void *)(rx_tid->hw_qdesc_paddr));
1072 				desc->pending_ext_desc_size = tot_desc_size +
1073 								      desc_size;
1074 				dp_reo_desc_clean_up(soc, desc, reo_status);
1075 				flush_failure = true;
1076 				break;
1077 			}
1078 		}
1079 
1080 		if (flush_failure)
1081 			break;
1082 
1083 		desc->pending_ext_desc_size = desc_size;
1084 
1085 		/* Flush base descriptor */
1086 		qdf_mem_zero(&params, sizeof(params));
1087 		params.std.need_status = 1;
1088 		params.std.addr_lo =
1089 			(uint64_t)(rx_tid->hw_qdesc_paddr) & 0xffffffff;
1090 		params.std.addr_hi = (uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1091 		if (rx_tid->ba_win_size > 256)
1092 			params.u.fl_cache_params.flush_q_1k_desc = 1;
1093 		params.u.fl_cache_params.fwd_mpdus_in_queue = 1;
1094 
1095 		if (QDF_STATUS_SUCCESS != dp_reo_send_cmd(soc,
1096 							  CMD_FLUSH_CACHE,
1097 							  &params,
1098 							  dp_reo_desc_free,
1099 							  (void *)desc)) {
1100 			union hal_reo_status reo_status;
1101 			/*
1102 			 * If dp_reo_send_cmd return failure, related TID queue desc
1103 			 * should be unmapped. Also locally reo_desc, together with
1104 			 * TID queue desc also need to be freed accordingly.
1105 			 *
1106 			 * Here invoke desc_free function directly to do clean up.
1107 			 *
1108 			 * In case of MCL path add the desc back to the free
1109 			 * desc list and defer deletion.
1110 			 */
1111 			dp_info_rl("fail to send REO cmd to flush cache: tid %d",
1112 				   rx_tid->tid);
1113 			dp_reo_desc_clean_up(soc, desc, &reo_status);
1114 			DP_STATS_INC(soc, rx.err.reo_cmd_send_fail, 1);
1115 			break;
1116 		}
1117 	}
1118 	qdf_spin_unlock_bh(&soc->reo_desc_freelist_lock);
1119 
1120 	dp_reo_desc_defer_free(soc);
1121 }
1122 
1123 /**
1124  * dp_rx_tid_delete_wifi3() - Delete receive TID queue
1125  * @peer: Datapath peer handle
1126  * @tid: TID
1127  *
1128  * Return: 0 on success, error code on failure
1129  */
dp_rx_tid_delete_wifi3(struct dp_peer * peer,int tid)1130 static int dp_rx_tid_delete_wifi3(struct dp_peer *peer, int tid)
1131 {
1132 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1133 	struct dp_soc *soc = peer->vdev->pdev->soc;
1134 	union hal_reo_status reo_status;
1135 	struct hal_reo_cmd_params params;
1136 	struct reo_desc_list_node *freedesc =
1137 		qdf_mem_malloc(sizeof(*freedesc));
1138 
1139 	if (!freedesc) {
1140 		dp_peer_err("%pK: malloc failed for freedesc: tid %d",
1141 			    soc, tid);
1142 		qdf_assert(0);
1143 		return -ENOMEM;
1144 	}
1145 
1146 	freedesc->rx_tid = *rx_tid;
1147 	freedesc->resend_update_reo_cmd = false;
1148 
1149 	qdf_mem_zero(&params, sizeof(params));
1150 
1151 	DP_RX_REO_QDESC_GET_MAC(freedesc, peer);
1152 
1153 	reo_status.rx_queue_status.header.status = HAL_REO_CMD_SUCCESS;
1154 	dp_rx_tid_delete_cb(soc, freedesc, &reo_status);
1155 
1156 	rx_tid->hw_qdesc_vaddr_unaligned = NULL;
1157 	rx_tid->hw_qdesc_alloc_size = 0;
1158 	rx_tid->hw_qdesc_paddr = 0;
1159 
1160 	return 0;
1161 }
1162 
1163 #ifdef DP_LFR
dp_peer_setup_remaining_tids(struct dp_peer * peer)1164 static void dp_peer_setup_remaining_tids(struct dp_peer *peer)
1165 {
1166 	int tid;
1167 	uint32_t tid_bitmap = 0;
1168 
1169 	for (tid = 1; tid < DP_MAX_TIDS-1; tid++)
1170 		tid_bitmap |= BIT(tid);
1171 
1172 	dp_peer_info("Sett up tid_bitmap 0x%x for peer %pK peer->local_id %d",
1173 		     tid_bitmap, peer, peer->local_id);
1174 	dp_rx_tid_setup_wifi3(peer, tid_bitmap, 1, 0);
1175 }
1176 
1177 #else
dp_peer_setup_remaining_tids(struct dp_peer * peer)1178 static void dp_peer_setup_remaining_tids(struct dp_peer *peer) {};
1179 #endif
1180 
1181 #ifdef WLAN_FEATURE_11BE_MLO
1182 /**
1183  * dp_peer_rx_tids_init() - initialize each tids in peer
1184  * @peer: peer pointer
1185  *
1186  * Return: None
1187  */
dp_peer_rx_tids_init(struct dp_peer * peer)1188 static void dp_peer_rx_tids_init(struct dp_peer *peer)
1189 {
1190 	int tid;
1191 	struct dp_rx_tid *rx_tid;
1192 	struct dp_rx_tid_defrag *rx_tid_defrag;
1193 
1194 	if (!IS_MLO_DP_LINK_PEER(peer)) {
1195 		for (tid = 0; tid < DP_MAX_TIDS; tid++) {
1196 			rx_tid_defrag = &peer->txrx_peer->rx_tid[tid];
1197 
1198 			rx_tid_defrag->array = &rx_tid_defrag->base;
1199 			rx_tid_defrag->defrag_timeout_ms = 0;
1200 			rx_tid_defrag->defrag_waitlist_elem.tqe_next = NULL;
1201 			rx_tid_defrag->defrag_waitlist_elem.tqe_prev = NULL;
1202 			rx_tid_defrag->base.head = NULL;
1203 			rx_tid_defrag->base.tail = NULL;
1204 			rx_tid_defrag->tid = tid;
1205 			rx_tid_defrag->defrag_peer = peer->txrx_peer;
1206 		}
1207 	}
1208 
1209 	/* if not first assoc link peer,
1210 	 * not to initialize rx_tids again.
1211 	 */
1212 	if (IS_MLO_DP_LINK_PEER(peer) && !peer->first_link)
1213 		return;
1214 
1215 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
1216 		rx_tid = &peer->rx_tid[tid];
1217 		rx_tid->tid = tid;
1218 		rx_tid->ba_win_size = 0;
1219 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
1220 	}
1221 }
1222 #else
dp_peer_rx_tids_init(struct dp_peer * peer)1223 static void dp_peer_rx_tids_init(struct dp_peer *peer)
1224 {
1225 	int tid;
1226 	struct dp_rx_tid *rx_tid;
1227 	struct dp_rx_tid_defrag *rx_tid_defrag;
1228 
1229 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
1230 		rx_tid = &peer->rx_tid[tid];
1231 
1232 		rx_tid_defrag = &peer->txrx_peer->rx_tid[tid];
1233 		rx_tid->tid = tid;
1234 		rx_tid->ba_win_size = 0;
1235 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
1236 
1237 		rx_tid_defrag->base.head = NULL;
1238 		rx_tid_defrag->base.tail = NULL;
1239 		rx_tid_defrag->tid = tid;
1240 		rx_tid_defrag->array = &rx_tid_defrag->base;
1241 		rx_tid_defrag->defrag_timeout_ms = 0;
1242 		rx_tid_defrag->defrag_waitlist_elem.tqe_next = NULL;
1243 		rx_tid_defrag->defrag_waitlist_elem.tqe_prev = NULL;
1244 		rx_tid_defrag->defrag_peer = peer->txrx_peer;
1245 	}
1246 }
1247 #endif
1248 
dp_peer_rx_tid_setup(struct dp_peer * peer)1249 void dp_peer_rx_tid_setup(struct dp_peer *peer)
1250 {
1251 	struct dp_soc *soc = peer->vdev->pdev->soc;
1252 	struct dp_txrx_peer *txrx_peer = dp_get_txrx_peer(peer);
1253 	struct dp_vdev *vdev = peer->vdev;
1254 
1255 	dp_peer_rx_tids_init(peer);
1256 
1257 	/* Setup default (non-qos) rx tid queue */
1258 	dp_rx_tid_setup_wifi3(peer, BIT(DP_NON_QOS_TID), 1, 0);
1259 
1260 	/* Setup rx tid queue for TID 0.
1261 	 * Other queues will be setup on receiving first packet, which will cause
1262 	 * NULL REO queue error. For Mesh peer, if on one of the mesh AP the
1263 	 * mesh peer is not deleted, the new addition of mesh peer on other mesh AP
1264 	 * doesn't do BA negotiation leading to mismatch in BA windows.
1265 	 * To avoid this send max BA window during init.
1266 	 */
1267 	if (qdf_unlikely(vdev->mesh_vdev) ||
1268 	    qdf_unlikely(txrx_peer->nawds_enabled))
1269 		dp_rx_tid_setup_wifi3(
1270 				peer, BIT(0),
1271 				hal_get_rx_max_ba_window(soc->hal_soc, 0),
1272 				0);
1273 	else
1274 		dp_rx_tid_setup_wifi3(peer, BIT(0), 1, 0);
1275 
1276 	/*
1277 	 * Setup the rest of TID's to handle LFR
1278 	 */
1279 	dp_peer_setup_remaining_tids(peer);
1280 }
1281 
dp_peer_rx_cleanup(struct dp_vdev * vdev,struct dp_peer * peer)1282 void dp_peer_rx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
1283 {
1284 	int tid;
1285 	uint32_t tid_delete_mask = 0;
1286 
1287 	if (!peer->txrx_peer)
1288 		return;
1289 
1290 	dp_info("Remove tids for peer: %pK", peer);
1291 
1292 	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
1293 		struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
1294 		struct dp_rx_tid_defrag *defrag_rx_tid =
1295 				&peer->txrx_peer->rx_tid[tid];
1296 
1297 		qdf_spin_lock_bh(&defrag_rx_tid->defrag_tid_lock);
1298 		if (!peer->bss_peer || peer->vdev->opmode == wlan_op_mode_sta) {
1299 			/* Cleanup defrag related resource */
1300 			dp_rx_defrag_waitlist_remove(peer->txrx_peer, tid);
1301 			dp_rx_reorder_flush_frag(peer->txrx_peer, tid);
1302 		}
1303 		qdf_spin_unlock_bh(&defrag_rx_tid->defrag_tid_lock);
1304 
1305 		qdf_spin_lock_bh(&rx_tid->tid_lock);
1306 		if (peer->rx_tid[tid].hw_qdesc_vaddr_unaligned) {
1307 			dp_rx_tid_delete_wifi3(peer, tid);
1308 
1309 			tid_delete_mask |= (1 << tid);
1310 		}
1311 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
1312 	}
1313 #ifdef notyet /* See if FW can remove queues as part of peer cleanup */
1314 	if (soc->ol_ops->peer_rx_reorder_queue_remove) {
1315 		soc->ol_ops->peer_rx_reorder_queue_remove(soc->ctrl_psoc,
1316 			peer->vdev->pdev->pdev_id,
1317 			peer->vdev->vdev_id, peer->mac_addr.raw,
1318 			tid_delete_mask);
1319 	}
1320 #endif
1321 }
1322 
1323 /**
1324  * dp_teardown_256_ba_sessions() - Teardown sessions using 256
1325  *                                window size when a request with
1326  *                                64 window size is received.
1327  *                                This is done as a WAR since HW can
1328  *                                have only one setting per peer (64 or 256).
1329  *                                For HKv2, we use per tid buffersize setting
1330  *                                for 0 to per_tid_basize_max_tid. For tid
1331  *                                more than per_tid_basize_max_tid we use HKv1
1332  *                                method.
1333  * @peer: Datapath peer
1334  *
1335  * Return: void
1336  */
dp_teardown_256_ba_sessions(struct dp_peer * peer)1337 static void dp_teardown_256_ba_sessions(struct dp_peer *peer)
1338 {
1339 	uint8_t delba_rcode = 0;
1340 	int tid;
1341 	struct dp_rx_tid *rx_tid = NULL;
1342 
1343 	tid = peer->vdev->pdev->soc->per_tid_basize_max_tid;
1344 	for (; tid < DP_MAX_TIDS; tid++) {
1345 		rx_tid = &peer->rx_tid[tid];
1346 		qdf_spin_lock_bh(&rx_tid->tid_lock);
1347 
1348 		if (rx_tid->ba_win_size <= 64) {
1349 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
1350 			continue;
1351 		} else {
1352 			if (rx_tid->ba_status == DP_RX_BA_ACTIVE ||
1353 			    rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
1354 				/* send delba */
1355 				if (!rx_tid->delba_tx_status) {
1356 					rx_tid->delba_tx_retry++;
1357 					rx_tid->delba_tx_status = 1;
1358 					rx_tid->delba_rcode =
1359 					IEEE80211_REASON_QOS_SETUP_REQUIRED;
1360 					delba_rcode = rx_tid->delba_rcode;
1361 
1362 					qdf_spin_unlock_bh(&rx_tid->tid_lock);
1363 					if (peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba)
1364 						peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba(
1365 							peer->vdev->pdev->soc->ctrl_psoc,
1366 							peer->vdev->vdev_id,
1367 							peer->mac_addr.raw,
1368 							tid, delba_rcode,
1369 							CDP_DELBA_REASON_NONE);
1370 				} else {
1371 					qdf_spin_unlock_bh(&rx_tid->tid_lock);
1372 				}
1373 			} else {
1374 				qdf_spin_unlock_bh(&rx_tid->tid_lock);
1375 			}
1376 		}
1377 	}
1378 }
1379 
dp_addba_resp_tx_completion_wifi3(struct cdp_soc_t * cdp_soc,uint8_t * peer_mac,uint16_t vdev_id,uint8_t tid,int status)1380 int dp_addba_resp_tx_completion_wifi3(struct cdp_soc_t *cdp_soc,
1381 				      uint8_t *peer_mac,
1382 				      uint16_t vdev_id,
1383 				      uint8_t tid, int status)
1384 {
1385 	struct dp_peer *peer = dp_peer_get_tgt_peer_hash_find(
1386 					(struct dp_soc *)cdp_soc,
1387 					peer_mac, 0, vdev_id,
1388 					DP_MOD_ID_CDP);
1389 	struct dp_rx_tid *rx_tid = NULL;
1390 
1391 	if (!peer) {
1392 		dp_peer_debug("%pK: Peer is NULL!", cdp_soc);
1393 		goto fail;
1394 	}
1395 	rx_tid = &peer->rx_tid[tid];
1396 	qdf_spin_lock_bh(&rx_tid->tid_lock);
1397 	if (status) {
1398 		rx_tid->num_addba_rsp_failed++;
1399 		if (rx_tid->hw_qdesc_vaddr_unaligned)
1400 			dp_rx_tid_update_wifi3(peer, tid, 1,
1401 					       IEEE80211_SEQ_MAX, false);
1402 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
1403 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
1404 		dp_err("RxTid- %d addba rsp tx completion failed", tid);
1405 
1406 		goto success;
1407 	}
1408 
1409 	rx_tid->num_addba_rsp_success++;
1410 	if (rx_tid->ba_status == DP_RX_BA_INACTIVE) {
1411 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
1412 		dp_peer_err("%pK: Rx Tid- %d hw qdesc is not in IN_PROGRESS",
1413 			    cdp_soc, tid);
1414 		goto fail;
1415 	}
1416 
1417 	if (!qdf_atomic_read(&peer->is_default_route_set)) {
1418 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
1419 		dp_peer_debug("%pK: default route is not set for peer: " QDF_MAC_ADDR_FMT,
1420 			      cdp_soc, QDF_MAC_ADDR_REF(peer->mac_addr.raw));
1421 		goto fail;
1422 	}
1423 
1424 	if (dp_rx_tid_update_wifi3(peer, tid,
1425 				   rx_tid->ba_win_size,
1426 				   rx_tid->startseqnum,
1427 				   false)) {
1428 		dp_err("Failed update REO SSN");
1429 	}
1430 
1431 	dp_info("tid %u window_size %u start_seq_num %u",
1432 		tid, rx_tid->ba_win_size,
1433 		rx_tid->startseqnum);
1434 
1435 	/* First Session */
1436 	if (peer->active_ba_session_cnt == 0) {
1437 		if (rx_tid->ba_win_size > 64 && rx_tid->ba_win_size <= 256)
1438 			peer->hw_buffer_size = 256;
1439 		else if (rx_tid->ba_win_size <= 1024 &&
1440 			 rx_tid->ba_win_size > 256)
1441 			peer->hw_buffer_size = 1024;
1442 		else
1443 			peer->hw_buffer_size = 64;
1444 	}
1445 
1446 	rx_tid->ba_status = DP_RX_BA_ACTIVE;
1447 
1448 	peer->active_ba_session_cnt++;
1449 
1450 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
1451 
1452 	/* Kill any session having 256 buffer size
1453 	 * when 64 buffer size request is received.
1454 	 * Also, latch on to 64 as new buffer size.
1455 	 */
1456 	if (peer->kill_256_sessions) {
1457 		dp_teardown_256_ba_sessions(peer);
1458 		peer->kill_256_sessions = 0;
1459 	}
1460 
1461 success:
1462 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
1463 	return QDF_STATUS_SUCCESS;
1464 
1465 fail:
1466 	if (peer)
1467 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
1468 
1469 	return QDF_STATUS_E_FAILURE;
1470 }
1471 
1472 QDF_STATUS
dp_addba_responsesetup_wifi3(struct cdp_soc_t * cdp_soc,uint8_t * peer_mac,uint16_t vdev_id,uint8_t tid,uint8_t * dialogtoken,uint16_t * statuscode,uint16_t * buffersize,uint16_t * batimeout)1473 dp_addba_responsesetup_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
1474 			     uint16_t vdev_id, uint8_t tid,
1475 			     uint8_t *dialogtoken, uint16_t *statuscode,
1476 			     uint16_t *buffersize, uint16_t *batimeout)
1477 {
1478 	struct dp_rx_tid *rx_tid = NULL;
1479 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1480 	struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)cdp_soc,
1481 						       peer_mac, 0, vdev_id,
1482 						       DP_MOD_ID_CDP);
1483 
1484 	if (!peer) {
1485 		dp_peer_debug("%pK: Peer is NULL!", cdp_soc);
1486 		return QDF_STATUS_E_FAILURE;
1487 	}
1488 	rx_tid = &peer->rx_tid[tid];
1489 	qdf_spin_lock_bh(&rx_tid->tid_lock);
1490 	rx_tid->num_of_addba_resp++;
1491 	/* setup ADDBA response parameters */
1492 	*dialogtoken = rx_tid->dialogtoken;
1493 	*statuscode = rx_tid->statuscode;
1494 	*buffersize = rx_tid->ba_win_size;
1495 	*batimeout  = 0;
1496 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
1497 
1498 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
1499 
1500 	return status;
1501 }
1502 
1503 /**
1504  * dp_check_ba_buffersize() - Check buffer size in request
1505  *                            and latch onto this size based on
1506  *                            size used in first active session.
1507  * @peer: Datapath peer
1508  * @tid: Tid
1509  * @buffersize: Block ack window size
1510  *
1511  * Return: void
1512  */
dp_check_ba_buffersize(struct dp_peer * peer,uint16_t tid,uint16_t buffersize)1513 static void dp_check_ba_buffersize(struct dp_peer *peer,
1514 				   uint16_t tid,
1515 				   uint16_t buffersize)
1516 {
1517 	struct dp_rx_tid *rx_tid = NULL;
1518 	struct dp_soc *soc = peer->vdev->pdev->soc;
1519 	uint16_t max_ba_window;
1520 
1521 	max_ba_window = hal_get_rx_max_ba_window(soc->hal_soc, tid);
1522 	dp_info("Input buffersize %d, max dp allowed %d",
1523 		buffersize, max_ba_window);
1524 	/* Adjust BA window size, restrict it to max DP allowed */
1525 	buffersize = QDF_MIN(buffersize, max_ba_window);
1526 
1527 	dp_info(QDF_MAC_ADDR_FMT" per_tid_basize_max_tid %d tid %d buffersize %d hw_buffer_size %d",
1528 		QDF_MAC_ADDR_REF(peer->mac_addr.raw),
1529 		soc->per_tid_basize_max_tid, tid, buffersize,
1530 		peer->hw_buffer_size);
1531 
1532 	rx_tid = &peer->rx_tid[tid];
1533 	if (soc->per_tid_basize_max_tid &&
1534 	    tid < soc->per_tid_basize_max_tid) {
1535 		rx_tid->ba_win_size = buffersize;
1536 		goto out;
1537 	} else {
1538 		if (peer->active_ba_session_cnt == 0) {
1539 			rx_tid->ba_win_size = buffersize;
1540 		} else {
1541 			if (peer->hw_buffer_size == 64) {
1542 				if (buffersize <= 64)
1543 					rx_tid->ba_win_size = buffersize;
1544 				else
1545 					rx_tid->ba_win_size = peer->hw_buffer_size;
1546 			} else if (peer->hw_buffer_size == 256) {
1547 				if (buffersize > 64) {
1548 					rx_tid->ba_win_size = buffersize;
1549 				} else {
1550 					rx_tid->ba_win_size = buffersize;
1551 					peer->hw_buffer_size = 64;
1552 					peer->kill_256_sessions = 1;
1553 				}
1554 			} else if (buffersize <= 1024) {
1555 				/*
1556 				 * Above checks are only for HK V2
1557 				 * Set incoming buffer size for others
1558 				 */
1559 				rx_tid->ba_win_size = buffersize;
1560 			} else {
1561 				dp_err("Invalid buffer size %d", buffersize);
1562 				qdf_assert_always(0);
1563 			}
1564 		}
1565 	}
1566 
1567 out:
1568 	dp_info("rx_tid->ba_win_size %d peer->hw_buffer_size %d peer->kill_256_sessions %d",
1569 		rx_tid->ba_win_size,
1570 		peer->hw_buffer_size,
1571 		peer->kill_256_sessions);
1572 }
1573 
dp_rx_tid_update_ba_win_size(struct cdp_soc_t * cdp_soc,uint8_t * peer_mac,uint16_t vdev_id,uint8_t tid,uint16_t buffersize)1574 QDF_STATUS dp_rx_tid_update_ba_win_size(struct cdp_soc_t *cdp_soc,
1575 					uint8_t *peer_mac, uint16_t vdev_id,
1576 					uint8_t tid, uint16_t buffersize)
1577 {
1578 	struct dp_rx_tid *rx_tid = NULL;
1579 	struct dp_peer *peer;
1580 
1581 	peer = dp_peer_get_tgt_peer_hash_find((struct dp_soc *)cdp_soc,
1582 					      peer_mac, 0, vdev_id,
1583 					      DP_MOD_ID_CDP);
1584 	if (!peer) {
1585 		dp_peer_debug("%pK: Peer is NULL!", cdp_soc);
1586 		return QDF_STATUS_E_FAILURE;
1587 	}
1588 
1589 	rx_tid = &peer->rx_tid[tid];
1590 
1591 	qdf_spin_lock_bh(&rx_tid->tid_lock);
1592 	rx_tid->ba_win_size = buffersize;
1593 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
1594 
1595 	dp_info("peer "QDF_MAC_ADDR_FMT", tid %d, update BA win size to %d",
1596 		QDF_MAC_ADDR_REF(peer->mac_addr.raw), tid, buffersize);
1597 
1598 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
1599 
1600 	return QDF_STATUS_SUCCESS;
1601 }
1602 
1603 #define DP_RX_BA_SESSION_DISABLE  1
1604 
dp_addba_requestprocess_wifi3(struct cdp_soc_t * cdp_soc,uint8_t * peer_mac,uint16_t vdev_id,uint8_t dialogtoken,uint16_t tid,uint16_t batimeout,uint16_t buffersize,uint16_t startseqnum)1605 int dp_addba_requestprocess_wifi3(struct cdp_soc_t *cdp_soc,
1606 				  uint8_t *peer_mac,
1607 				  uint16_t vdev_id,
1608 				  uint8_t dialogtoken,
1609 				  uint16_t tid, uint16_t batimeout,
1610 				  uint16_t buffersize,
1611 				  uint16_t startseqnum)
1612 {
1613 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1614 	struct dp_rx_tid *rx_tid = NULL;
1615 	struct dp_peer *peer;
1616 
1617 	peer = dp_peer_get_tgt_peer_hash_find((struct dp_soc *)cdp_soc,
1618 					      peer_mac,
1619 					      0, vdev_id,
1620 					      DP_MOD_ID_CDP);
1621 
1622 	if (!peer) {
1623 		dp_peer_debug("%pK: Peer is NULL!", cdp_soc);
1624 		return QDF_STATUS_E_FAILURE;
1625 	}
1626 	rx_tid = &peer->rx_tid[tid];
1627 	qdf_spin_lock_bh(&rx_tid->tid_lock);
1628 	rx_tid->num_of_addba_req++;
1629 	if ((rx_tid->ba_status == DP_RX_BA_ACTIVE &&
1630 	     rx_tid->hw_qdesc_vaddr_unaligned)) {
1631 		dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX, false);
1632 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
1633 		peer->active_ba_session_cnt--;
1634 		dp_peer_debug("%pK: Rx Tid- %d hw qdesc is already setup",
1635 			      cdp_soc, tid);
1636 	}
1637 
1638 	if (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
1639 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
1640 		status = QDF_STATUS_E_FAILURE;
1641 		goto fail;
1642 	}
1643 
1644 	if (rx_tid->rx_ba_win_size_override == DP_RX_BA_SESSION_DISABLE) {
1645 		dp_peer_info("%pK: disable BA session",
1646 			     cdp_soc);
1647 
1648 		buffersize = 1;
1649 	} else if (rx_tid->rx_ba_win_size_override) {
1650 		dp_peer_info("%pK: override BA win to %d", cdp_soc,
1651 			     rx_tid->rx_ba_win_size_override);
1652 
1653 		buffersize = rx_tid->rx_ba_win_size_override;
1654 	} else {
1655 		dp_peer_info("%pK: restore BA win %d based on addba req", cdp_soc,
1656 			     buffersize);
1657 	}
1658 
1659 	dp_check_ba_buffersize(peer, tid, buffersize);
1660 
1661 	if (dp_rx_tid_setup_wifi3(peer, BIT(tid),
1662 	    rx_tid->ba_win_size, startseqnum)) {
1663 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
1664 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
1665 		status = QDF_STATUS_E_FAILURE;
1666 		goto fail;
1667 	}
1668 	rx_tid->ba_status = DP_RX_BA_IN_PROGRESS;
1669 
1670 	rx_tid->dialogtoken = dialogtoken;
1671 	rx_tid->startseqnum = startseqnum;
1672 
1673 	if (rx_tid->userstatuscode != IEEE80211_STATUS_SUCCESS)
1674 		rx_tid->statuscode = rx_tid->userstatuscode;
1675 	else
1676 		rx_tid->statuscode = IEEE80211_STATUS_SUCCESS;
1677 
1678 	if (rx_tid->rx_ba_win_size_override == DP_RX_BA_SESSION_DISABLE)
1679 		rx_tid->statuscode = IEEE80211_STATUS_REFUSED;
1680 
1681 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
1682 
1683 fail:
1684 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
1685 
1686 	return status;
1687 }
1688 
1689 QDF_STATUS
dp_set_addba_response(struct cdp_soc_t * cdp_soc,uint8_t * peer_mac,uint16_t vdev_id,uint8_t tid,uint16_t statuscode)1690 dp_set_addba_response(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
1691 		      uint16_t vdev_id, uint8_t tid, uint16_t statuscode)
1692 {
1693 	struct dp_peer *peer = dp_peer_get_tgt_peer_hash_find(
1694 					(struct dp_soc *)cdp_soc,
1695 					peer_mac, 0, vdev_id,
1696 					DP_MOD_ID_CDP);
1697 	struct dp_rx_tid *rx_tid;
1698 
1699 	if (!peer) {
1700 		dp_peer_debug("%pK: Peer is NULL!", cdp_soc);
1701 		return QDF_STATUS_E_FAILURE;
1702 	}
1703 
1704 	rx_tid = &peer->rx_tid[tid];
1705 	qdf_spin_lock_bh(&rx_tid->tid_lock);
1706 	rx_tid->userstatuscode = statuscode;
1707 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
1708 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
1709 
1710 	return QDF_STATUS_SUCCESS;
1711 }
1712 
dp_delba_process_wifi3(struct cdp_soc_t * cdp_soc,uint8_t * peer_mac,uint16_t vdev_id,int tid,uint16_t reasoncode)1713 int dp_delba_process_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
1714 			   uint16_t vdev_id, int tid, uint16_t reasoncode)
1715 {
1716 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1717 	struct dp_rx_tid *rx_tid;
1718 	struct dp_peer *peer = dp_peer_get_tgt_peer_hash_find(
1719 					(struct dp_soc *)cdp_soc,
1720 					peer_mac, 0, vdev_id,
1721 					DP_MOD_ID_CDP);
1722 
1723 	if (!peer) {
1724 		dp_peer_debug("%pK: Peer is NULL!", cdp_soc);
1725 		return QDF_STATUS_E_FAILURE;
1726 	}
1727 	rx_tid = &peer->rx_tid[tid];
1728 	qdf_spin_lock_bh(&rx_tid->tid_lock);
1729 	if (rx_tid->ba_status == DP_RX_BA_INACTIVE ||
1730 	    rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
1731 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
1732 		status = QDF_STATUS_E_FAILURE;
1733 		goto fail;
1734 	}
1735 	/* TODO: See if we can delete the existing REO queue descriptor and
1736 	 * replace with a new one without queue extension descript to save
1737 	 * memory
1738 	 */
1739 	rx_tid->delba_rcode = reasoncode;
1740 	rx_tid->num_of_delba_req++;
1741 	dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX, false);
1742 
1743 	rx_tid->ba_status = DP_RX_BA_INACTIVE;
1744 	peer->active_ba_session_cnt--;
1745 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
1746 fail:
1747 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
1748 
1749 	return status;
1750 }
1751 
dp_delba_tx_completion_wifi3(struct cdp_soc_t * cdp_soc,uint8_t * peer_mac,uint16_t vdev_id,uint8_t tid,int status)1752 int dp_delba_tx_completion_wifi3(struct cdp_soc_t *cdp_soc, uint8_t *peer_mac,
1753 				 uint16_t vdev_id,
1754 				 uint8_t tid, int status)
1755 {
1756 	QDF_STATUS ret = QDF_STATUS_SUCCESS;
1757 	struct dp_rx_tid *rx_tid = NULL;
1758 	struct dp_peer *peer = dp_peer_get_tgt_peer_hash_find(
1759 					(struct dp_soc *)cdp_soc,
1760 					peer_mac, 0, vdev_id,
1761 					DP_MOD_ID_CDP);
1762 
1763 	if (!peer) {
1764 		dp_peer_debug("%pK: Peer is NULL!", cdp_soc);
1765 		return QDF_STATUS_E_FAILURE;
1766 	}
1767 	rx_tid = &peer->rx_tid[tid];
1768 	qdf_spin_lock_bh(&rx_tid->tid_lock);
1769 	if (status) {
1770 		rx_tid->delba_tx_fail_cnt++;
1771 		if (rx_tid->delba_tx_retry >= DP_MAX_DELBA_RETRY) {
1772 			rx_tid->delba_tx_retry = 0;
1773 			rx_tid->delba_tx_status = 0;
1774 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
1775 		} else {
1776 			rx_tid->delba_tx_retry++;
1777 			rx_tid->delba_tx_status = 1;
1778 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
1779 			if (peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba)
1780 				peer->vdev->pdev->soc->cdp_soc.ol_ops->send_delba(
1781 					peer->vdev->pdev->soc->ctrl_psoc,
1782 					peer->vdev->vdev_id,
1783 					peer->mac_addr.raw, tid,
1784 					rx_tid->delba_rcode,
1785 					CDP_DELBA_REASON_NONE);
1786 		}
1787 		goto end;
1788 	} else {
1789 		rx_tid->delba_tx_success_cnt++;
1790 		rx_tid->delba_tx_retry = 0;
1791 		rx_tid->delba_tx_status = 0;
1792 	}
1793 	if (rx_tid->ba_status == DP_RX_BA_ACTIVE) {
1794 		dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX, false);
1795 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
1796 		peer->active_ba_session_cnt--;
1797 	}
1798 	if (rx_tid->ba_status == DP_RX_BA_IN_PROGRESS) {
1799 		dp_rx_tid_update_wifi3(peer, tid, 1, IEEE80211_SEQ_MAX, false);
1800 		rx_tid->ba_status = DP_RX_BA_INACTIVE;
1801 	}
1802 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
1803 
1804 end:
1805 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
1806 
1807 	return ret;
1808 }
1809 
1810 QDF_STATUS
dp_set_pn_check_wifi3(struct cdp_soc_t * soc_t,uint8_t vdev_id,uint8_t * peer_mac,enum cdp_sec_type sec_type,uint32_t * rx_pn)1811 dp_set_pn_check_wifi3(struct cdp_soc_t *soc_t, uint8_t vdev_id,
1812 		      uint8_t *peer_mac, enum cdp_sec_type sec_type,
1813 		      uint32_t *rx_pn)
1814 {
1815 	struct dp_pdev *pdev;
1816 	int i;
1817 	uint8_t pn_size;
1818 	struct hal_reo_cmd_params params;
1819 	struct dp_peer *peer = NULL;
1820 	struct dp_vdev *vdev = NULL;
1821 	struct dp_soc *soc = NULL;
1822 
1823 	peer = dp_peer_get_tgt_peer_hash_find((struct dp_soc *)soc_t,
1824 					      peer_mac, 0, vdev_id,
1825 					      DP_MOD_ID_CDP);
1826 
1827 	if (!peer) {
1828 		dp_peer_debug("%pK: Peer is NULL!", soc);
1829 		return QDF_STATUS_E_FAILURE;
1830 	}
1831 
1832 	vdev = peer->vdev;
1833 
1834 	if (!vdev) {
1835 		dp_peer_debug("%pK: VDEV is NULL!", soc);
1836 		dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
1837 		return QDF_STATUS_E_FAILURE;
1838 	}
1839 
1840 	pdev = vdev->pdev;
1841 	soc = pdev->soc;
1842 	qdf_mem_zero(&params, sizeof(params));
1843 
1844 	params.std.need_status = 1;
1845 	params.u.upd_queue_params.update_pn_valid = 1;
1846 	params.u.upd_queue_params.update_pn_size = 1;
1847 	params.u.upd_queue_params.update_pn = 1;
1848 	params.u.upd_queue_params.update_pn_check_needed = 1;
1849 	params.u.upd_queue_params.update_svld = 1;
1850 	params.u.upd_queue_params.svld = 0;
1851 
1852 	switch (sec_type) {
1853 	case cdp_sec_type_tkip_nomic:
1854 	case cdp_sec_type_aes_ccmp:
1855 	case cdp_sec_type_aes_ccmp_256:
1856 	case cdp_sec_type_aes_gcmp:
1857 	case cdp_sec_type_aes_gcmp_256:
1858 		params.u.upd_queue_params.pn_check_needed = 1;
1859 		params.u.upd_queue_params.pn_size = PN_SIZE_48;
1860 		pn_size = 48;
1861 		break;
1862 	case cdp_sec_type_wapi:
1863 		params.u.upd_queue_params.pn_check_needed = 1;
1864 		params.u.upd_queue_params.pn_size = PN_SIZE_128;
1865 		pn_size = 128;
1866 		if (vdev->opmode == wlan_op_mode_ap) {
1867 			params.u.upd_queue_params.pn_even = 1;
1868 			params.u.upd_queue_params.update_pn_even = 1;
1869 		} else {
1870 			params.u.upd_queue_params.pn_uneven = 1;
1871 			params.u.upd_queue_params.update_pn_uneven = 1;
1872 		}
1873 		break;
1874 	default:
1875 		params.u.upd_queue_params.pn_check_needed = 0;
1876 		pn_size = 0;
1877 		break;
1878 	}
1879 
1880 	for (i = 0; i < DP_MAX_TIDS; i++) {
1881 		struct dp_rx_tid *rx_tid = &peer->rx_tid[i];
1882 
1883 		qdf_spin_lock_bh(&rx_tid->tid_lock);
1884 		if (rx_tid->hw_qdesc_vaddr_unaligned) {
1885 			params.std.addr_lo =
1886 				rx_tid->hw_qdesc_paddr & 0xffffffff;
1887 			params.std.addr_hi =
1888 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
1889 
1890 			if (pn_size) {
1891 				dp_peer_info("%pK: PN set for TID:%d pn:%x:%x:%x:%x",
1892 					     soc, i, rx_pn[3], rx_pn[2],
1893 					     rx_pn[1], rx_pn[0]);
1894 				params.u.upd_queue_params.update_pn_valid = 1;
1895 				params.u.upd_queue_params.pn_31_0 = rx_pn[0];
1896 				params.u.upd_queue_params.pn_63_32 = rx_pn[1];
1897 				params.u.upd_queue_params.pn_95_64 = rx_pn[2];
1898 				params.u.upd_queue_params.pn_127_96 = rx_pn[3];
1899 			}
1900 			rx_tid->pn_size = pn_size;
1901 			if (dp_reo_send_cmd(soc,
1902 					    CMD_UPDATE_RX_REO_QUEUE,
1903 					    &params, dp_rx_tid_update_cb,
1904 					    rx_tid)) {
1905 				dp_err_log("fail to send CMD_UPDATE_RX_REO_QUEUE"
1906 					   "tid %d desc %pK", rx_tid->tid,
1907 					   (void *)(rx_tid->hw_qdesc_paddr));
1908 				DP_STATS_INC(soc,
1909 					     rx.err.reo_cmd_send_fail, 1);
1910 			}
1911 		} else {
1912 			dp_peer_info("%pK: PN Check not setup for TID :%d ", soc, i);
1913 		}
1914 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
1915 	}
1916 
1917 	dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
1918 
1919 	return QDF_STATUS_SUCCESS;
1920 }
1921 
1922 QDF_STATUS
dp_rx_delba_ind_handler(void * soc_handle,uint16_t peer_id,uint8_t tid,uint16_t win_sz)1923 dp_rx_delba_ind_handler(void *soc_handle, uint16_t peer_id,
1924 			uint8_t tid, uint16_t win_sz)
1925 {
1926 	struct dp_soc *soc = (struct dp_soc *)soc_handle;
1927 	struct dp_peer *peer;
1928 	struct dp_rx_tid *rx_tid;
1929 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1930 
1931 	peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_HTT);
1932 
1933 	if (!peer) {
1934 		dp_peer_err("%pK: Couldn't find peer from ID %d",
1935 			    soc, peer_id);
1936 		return QDF_STATUS_E_FAILURE;
1937 	}
1938 
1939 	qdf_assert_always(tid < DP_MAX_TIDS);
1940 
1941 	rx_tid = &peer->rx_tid[tid];
1942 
1943 	if (rx_tid->hw_qdesc_vaddr_unaligned) {
1944 		if (!rx_tid->delba_tx_status) {
1945 			dp_peer_info("%pK: PEER_ID: %d TID: %d, BA win: %d ",
1946 				     soc, peer_id, tid, win_sz);
1947 
1948 			qdf_spin_lock_bh(&rx_tid->tid_lock);
1949 
1950 			rx_tid->delba_tx_status = 1;
1951 
1952 			rx_tid->rx_ba_win_size_override =
1953 			    qdf_min((uint16_t)63, win_sz);
1954 
1955 			rx_tid->delba_rcode =
1956 			    IEEE80211_REASON_QOS_SETUP_REQUIRED;
1957 
1958 			qdf_spin_unlock_bh(&rx_tid->tid_lock);
1959 
1960 			if (soc->cdp_soc.ol_ops->send_delba)
1961 				soc->cdp_soc.ol_ops->send_delba(
1962 					peer->vdev->pdev->soc->ctrl_psoc,
1963 					peer->vdev->vdev_id,
1964 					peer->mac_addr.raw,
1965 					tid,
1966 					rx_tid->delba_rcode,
1967 					CDP_DELBA_REASON_NONE);
1968 		}
1969 	} else {
1970 		dp_peer_err("%pK: BA session is not setup for TID:%d ",
1971 			    soc, tid);
1972 		status = QDF_STATUS_E_FAILURE;
1973 	}
1974 
1975 	dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
1976 
1977 	return status;
1978 }
1979 
1980 #ifdef IPA_OFFLOAD
dp_peer_get_rxtid_stats_ipa(struct dp_peer * peer,dp_rxtid_stats_cmd_cb dp_stats_cmd_cb)1981 int dp_peer_get_rxtid_stats_ipa(struct dp_peer *peer,
1982 				dp_rxtid_stats_cmd_cb dp_stats_cmd_cb)
1983 {
1984 	struct dp_soc *soc = peer->vdev->pdev->soc;
1985 	struct hal_reo_cmd_params params;
1986 	int i;
1987 	int stats_cmd_sent_cnt = 0;
1988 	QDF_STATUS status;
1989 	uint16_t peer_id = peer->peer_id;
1990 	unsigned long comb_peer_id_tid;
1991 	struct dp_rx_tid *rx_tid;
1992 
1993 	if (!dp_stats_cmd_cb)
1994 		return stats_cmd_sent_cnt;
1995 
1996 	qdf_mem_zero(&params, sizeof(params));
1997 	for (i = 0; i < DP_MAX_TIDS; i++) {
1998 		if ((i >= CDP_DATA_TID_MAX) && (i != CDP_DATA_NON_QOS_TID))
1999 			continue;
2000 
2001 		rx_tid = &peer->rx_tid[i];
2002 		if (rx_tid->hw_qdesc_vaddr_unaligned) {
2003 			params.std.need_status = 1;
2004 			params.std.addr_lo =
2005 				rx_tid->hw_qdesc_paddr & 0xffffffff;
2006 			params.std.addr_hi =
2007 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2008 			params.u.stats_params.clear = 1;
2009 			comb_peer_id_tid = ((i << DP_PEER_REO_STATS_TID_SHIFT)
2010 					    | peer_id);
2011 			status = dp_reo_send_cmd(soc, CMD_GET_QUEUE_STATS,
2012 						 &params, dp_stats_cmd_cb,
2013 						 (void *)comb_peer_id_tid);
2014 			if (QDF_IS_STATUS_SUCCESS(status))
2015 				stats_cmd_sent_cnt++;
2016 
2017 			/* Flush REO descriptor from HW cache to update stats
2018 			 * in descriptor memory. This is to help debugging
2019 			 */
2020 			qdf_mem_zero(&params, sizeof(params));
2021 			params.std.need_status = 0;
2022 			params.std.addr_lo =
2023 				rx_tid->hw_qdesc_paddr & 0xffffffff;
2024 			params.std.addr_hi =
2025 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2026 			params.u.fl_cache_params.flush_no_inval = 1;
2027 			dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, &params, NULL,
2028 					NULL);
2029 		}
2030 	}
2031 
2032 	return stats_cmd_sent_cnt;
2033 }
2034 
2035 qdf_export_symbol(dp_peer_get_rxtid_stats_ipa);
2036 
2037 #endif
dp_peer_rxtid_stats(struct dp_peer * peer,dp_rxtid_stats_cmd_cb dp_stats_cmd_cb,void * cb_ctxt)2038 int dp_peer_rxtid_stats(struct dp_peer *peer,
2039 			dp_rxtid_stats_cmd_cb dp_stats_cmd_cb,
2040 			void *cb_ctxt)
2041 {
2042 	struct dp_soc *soc = peer->vdev->pdev->soc;
2043 	struct hal_reo_cmd_params params;
2044 	int i;
2045 	int stats_cmd_sent_cnt = 0;
2046 	QDF_STATUS status;
2047 	struct dp_rx_tid *rx_tid;
2048 
2049 	if (!dp_stats_cmd_cb)
2050 		return stats_cmd_sent_cnt;
2051 
2052 	qdf_mem_zero(&params, sizeof(params));
2053 	for (i = 0; i < DP_MAX_TIDS; i++) {
2054 		if ((i >= CDP_DATA_TID_MAX) && (i != CDP_DATA_NON_QOS_TID))
2055 			continue;
2056 
2057 		rx_tid = &peer->rx_tid[i];
2058 		if (rx_tid->hw_qdesc_vaddr_unaligned) {
2059 			params.std.need_status = 1;
2060 			params.std.addr_lo =
2061 				rx_tid->hw_qdesc_paddr & 0xffffffff;
2062 			params.std.addr_hi =
2063 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2064 
2065 			if (cb_ctxt) {
2066 				status = dp_reo_send_cmd(
2067 						soc, CMD_GET_QUEUE_STATS,
2068 						&params, dp_stats_cmd_cb,
2069 						cb_ctxt);
2070 			} else {
2071 				status = dp_reo_send_cmd(
2072 						soc, CMD_GET_QUEUE_STATS,
2073 						&params, dp_stats_cmd_cb,
2074 						rx_tid);
2075 			}
2076 
2077 			if (QDF_IS_STATUS_SUCCESS(status))
2078 				stats_cmd_sent_cnt++;
2079 
2080 			/* Flush REO descriptor from HW cache to update stats
2081 			 * in descriptor memory. This is to help debugging
2082 			 */
2083 			qdf_mem_zero(&params, sizeof(params));
2084 			params.std.need_status = 0;
2085 			params.std.addr_lo =
2086 				rx_tid->hw_qdesc_paddr & 0xffffffff;
2087 			params.std.addr_hi =
2088 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2089 			params.u.fl_cache_params.flush_no_inval = 1;
2090 			dp_reo_send_cmd(soc, CMD_FLUSH_CACHE, &params, NULL,
2091 					NULL);
2092 		}
2093 	}
2094 
2095 	return stats_cmd_sent_cnt;
2096 }
2097 
dp_peer_rx_tids_create(struct dp_peer * peer)2098 QDF_STATUS dp_peer_rx_tids_create(struct dp_peer *peer)
2099 {
2100 	uint8_t i;
2101 
2102 	if (IS_MLO_DP_MLD_PEER(peer)) {
2103 		dp_peer_info("skip for mld peer");
2104 		return QDF_STATUS_SUCCESS;
2105 	}
2106 
2107 	if (peer->rx_tid) {
2108 		QDF_BUG(0);
2109 		dp_peer_err("peer rx_tid mem already exist");
2110 		return QDF_STATUS_E_FAILURE;
2111 	}
2112 
2113 	peer->rx_tid = qdf_mem_malloc(DP_MAX_TIDS *
2114 			sizeof(struct dp_rx_tid));
2115 
2116 	if (!peer->rx_tid) {
2117 		dp_err("fail to alloc tid for peer" QDF_MAC_ADDR_FMT,
2118 		       QDF_MAC_ADDR_REF(peer->mac_addr.raw));
2119 		return QDF_STATUS_E_NOMEM;
2120 	}
2121 
2122 	qdf_mem_zero(peer->rx_tid, DP_MAX_TIDS * sizeof(struct dp_rx_tid));
2123 	for (i = 0; i < DP_MAX_TIDS; i++)
2124 		qdf_spinlock_create(&peer->rx_tid[i].tid_lock);
2125 
2126 	return QDF_STATUS_SUCCESS;
2127 }
2128 
dp_peer_rx_tids_destroy(struct dp_peer * peer)2129 void dp_peer_rx_tids_destroy(struct dp_peer *peer)
2130 {
2131 	uint8_t i;
2132 
2133 	if (!IS_MLO_DP_LINK_PEER(peer)) {
2134 		for (i = 0; i < DP_MAX_TIDS; i++)
2135 			qdf_spinlock_destroy(&peer->rx_tid[i].tid_lock);
2136 
2137 		qdf_mem_free(peer->rx_tid);
2138 	}
2139 
2140 	peer->rx_tid = NULL;
2141 }
2142 
2143 #ifdef DUMP_REO_QUEUE_INFO_IN_DDR
dp_dump_rx_reo_queue_info(struct dp_soc * soc,void * cb_ctxt,union hal_reo_status * reo_status)2144 void dp_dump_rx_reo_queue_info(
2145 	struct dp_soc *soc, void *cb_ctxt, union hal_reo_status *reo_status)
2146 {
2147 	struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
2148 
2149 	if (!rx_tid)
2150 		return;
2151 
2152 	if (reo_status->fl_cache_status.header.status !=
2153 		HAL_REO_CMD_SUCCESS) {
2154 		dp_err_rl("Rx tid REO HW desc flush failed(%d)",
2155 			  reo_status->rx_queue_status.header.status);
2156 		return;
2157 	}
2158 	qdf_spin_lock_bh(&rx_tid->tid_lock);
2159 	hal_dump_rx_reo_queue_desc(rx_tid->hw_qdesc_vaddr_aligned);
2160 	qdf_spin_unlock_bh(&rx_tid->tid_lock);
2161 }
2162 
dp_send_cache_flush_for_rx_tid(struct dp_soc * soc,struct dp_peer * peer)2163 void dp_send_cache_flush_for_rx_tid(
2164 	struct dp_soc *soc, struct dp_peer *peer)
2165 {
2166 	int i;
2167 	struct dp_rx_tid *rx_tid;
2168 	struct hal_reo_cmd_params params;
2169 
2170 	if (!peer) {
2171 		dp_err_rl("Peer is NULL");
2172 		return;
2173 	}
2174 
2175 	for (i = 0; i < DP_MAX_TIDS; i++) {
2176 		rx_tid = &peer->rx_tid[i];
2177 		if (!rx_tid)
2178 			continue;
2179 		qdf_spin_lock_bh(&rx_tid->tid_lock);
2180 		if (rx_tid->hw_qdesc_vaddr_aligned) {
2181 			qdf_mem_zero(&params, sizeof(params));
2182 			params.std.need_status = 1;
2183 			params.std.addr_lo =
2184 				rx_tid->hw_qdesc_paddr & 0xffffffff;
2185 			params.std.addr_hi =
2186 				(uint64_t)(rx_tid->hw_qdesc_paddr) >> 32;
2187 			params.u.fl_cache_params.flush_no_inval = 0;
2188 
2189 			if (rx_tid->ba_win_size > 256)
2190 				params.u.fl_cache_params.flush_q_1k_desc = 1;
2191 			params.u.fl_cache_params.fwd_mpdus_in_queue = 1;
2192 
2193 			if (QDF_STATUS_SUCCESS !=
2194 				dp_reo_send_cmd(
2195 					soc, CMD_FLUSH_CACHE,
2196 					&params, dp_dump_rx_reo_queue_info,
2197 					(void *)rx_tid)) {
2198 				dp_err_rl("cache flush send failed tid %d",
2199 					  rx_tid->tid);
2200 				qdf_spin_unlock_bh(&rx_tid->tid_lock);
2201 				break;
2202 			}
2203 		}
2204 		qdf_spin_unlock_bh(&rx_tid->tid_lock);
2205 	}
2206 }
2207 
dp_get_rx_reo_queue_info(struct cdp_soc_t * soc_hdl,uint8_t vdev_id)2208 void dp_get_rx_reo_queue_info(
2209 	struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
2210 {
2211 	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
2212 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
2213 						     DP_MOD_ID_GENERIC_STATS);
2214 	struct dp_peer *peer = NULL;
2215 
2216 	if (!vdev) {
2217 		dp_err_rl("vdev is null for vdev_id: %u", vdev_id);
2218 		goto failed;
2219 	}
2220 
2221 	peer = dp_vdev_bss_peer_ref_n_get(soc, vdev, DP_MOD_ID_GENERIC_STATS);
2222 
2223 	if (!peer) {
2224 		dp_err_rl("Peer is NULL");
2225 		goto failed;
2226 	}
2227 	dp_send_cache_flush_for_rx_tid(soc, peer);
2228 failed:
2229 	if (peer)
2230 		dp_peer_unref_delete(peer, DP_MOD_ID_GENERIC_STATS);
2231 	if (vdev)
2232 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_GENERIC_STATS);
2233 }
2234 #endif /* DUMP_REO_QUEUE_INFO_IN_DDR */
2235 
2236