xref: /wlan-driver/qca-wifi-host-cmn/dp/wifi3.0/be/mlo/dp_mlo.c (revision 5113495b16420b49004c444715d2daae2066e7dc)
1 /*
2  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 #include <wlan_utility.h>
17 #include <dp_internal.h>
18 #include <dp_htt.h>
19 #include <hal_be_api.h>
20 #include "dp_mlo.h"
21 #include <dp_be.h>
22 #include <dp_be_rx.h>
23 #include <dp_htt.h>
24 #include <dp_internal.h>
25 #include <wlan_cfg.h>
26 #include <wlan_mlo_mgr_cmn.h>
27 #include "dp_umac_reset.h"
28 
29 #define dp_aggregate_vdev_stats_for_unmapped_peers(_tgtobj, _srcobj) \
30 	DP_UPDATE_VDEV_STATS_FOR_UNMAPPED_PEERS(_tgtobj, _srcobj)
31 
32 #ifdef DP_UMAC_HW_RESET_SUPPORT
33 /**
34  * dp_umac_reset_update_partner_map() - Update Umac reset partner map
35  * @mlo_ctx: mlo soc context
36  * @chip_id: chip id
37  * @set: flag indicating whether to set or clear the bit
38  *
39  * Return: void
40  */
41 static void dp_umac_reset_update_partner_map(struct dp_mlo_ctxt *mlo_ctx,
42 					     int chip_id, bool set);
43 #endif
44 /**
45  * dp_mlo_ctxt_attach_wifi3() - Attach DP MLO context
46  * @ctrl_ctxt: CDP control context
47  *
48  * Return: DP MLO context handle on success, NULL on failure
49  */
50 static struct cdp_mlo_ctxt *
dp_mlo_ctxt_attach_wifi3(struct cdp_ctrl_mlo_mgr * ctrl_ctxt)51 dp_mlo_ctxt_attach_wifi3(struct cdp_ctrl_mlo_mgr *ctrl_ctxt)
52 {
53 	struct dp_mlo_ctxt *mlo_ctxt =
54 		qdf_mem_malloc(sizeof(struct dp_mlo_ctxt));
55 
56 	if (!mlo_ctxt) {
57 		dp_err("Failed to allocate DP MLO Context");
58 		return NULL;
59 	}
60 
61 	mlo_ctxt->ctrl_ctxt = ctrl_ctxt;
62 
63 	if (dp_mlo_peer_find_hash_attach_be
64 			(mlo_ctxt, DP_MAX_MLO_PEER) != QDF_STATUS_SUCCESS) {
65 		dp_err("Failed to allocate peer hash");
66 		qdf_mem_free(mlo_ctxt);
67 		return NULL;
68 	}
69 
70 	qdf_get_random_bytes(mlo_ctxt->toeplitz_hash_ipv4,
71 			     (sizeof(mlo_ctxt->toeplitz_hash_ipv4[0]) *
72 			      LRO_IPV4_SEED_ARR_SZ));
73 	qdf_get_random_bytes(mlo_ctxt->toeplitz_hash_ipv6,
74 			     (sizeof(mlo_ctxt->toeplitz_hash_ipv6[0]) *
75 			      LRO_IPV6_SEED_ARR_SZ));
76 
77 	qdf_spinlock_create(&mlo_ctxt->ml_soc_list_lock);
78 	qdf_spinlock_create(&mlo_ctxt->grp_umac_reset_ctx.grp_ctx_lock);
79 	dp_mlo_dev_ctxt_list_attach(mlo_ctxt);
80 	return dp_mlo_ctx_to_cdp(mlo_ctxt);
81 }
82 
83 /**
84  * dp_mlo_ctxt_detach_wifi3() - Detach DP MLO context
85  * @cdp_ml_ctxt: pointer to CDP DP MLO context
86  *
87  * Return: void
88  */
dp_mlo_ctxt_detach_wifi3(struct cdp_mlo_ctxt * cdp_ml_ctxt)89 static void dp_mlo_ctxt_detach_wifi3(struct cdp_mlo_ctxt *cdp_ml_ctxt)
90 {
91 	struct dp_mlo_ctxt *mlo_ctxt = cdp_mlo_ctx_to_dp(cdp_ml_ctxt);
92 
93 	if (!cdp_ml_ctxt)
94 		return;
95 
96 	qdf_spinlock_destroy(&mlo_ctxt->grp_umac_reset_ctx.grp_ctx_lock);
97 	qdf_spinlock_destroy(&mlo_ctxt->ml_soc_list_lock);
98 	dp_mlo_dev_ctxt_list_detach(mlo_ctxt);
99 	dp_mlo_peer_find_hash_detach_be(mlo_ctxt);
100 	qdf_mem_free(mlo_ctxt);
101 }
102 
103 /**
104  * dp_mlo_set_soc_by_chip_id() - Add DP soc to ML context soc list
105  * @ml_ctxt: DP ML context handle
106  * @soc: DP soc handle
107  * @chip_id: MLO chip id
108  *
109  * Return: void
110  */
dp_mlo_set_soc_by_chip_id(struct dp_mlo_ctxt * ml_ctxt,struct dp_soc * soc,uint8_t chip_id)111 static void dp_mlo_set_soc_by_chip_id(struct dp_mlo_ctxt *ml_ctxt,
112 				      struct dp_soc *soc,
113 				      uint8_t chip_id)
114 {
115 	qdf_spin_lock_bh(&ml_ctxt->ml_soc_list_lock);
116 	ml_ctxt->ml_soc_list[chip_id] = soc;
117 
118 	/* The same API is called during soc_attach and soc_detach
119 	 * soc parameter is non-null or null accordingly.
120 	 */
121 	if (soc)
122 		ml_ctxt->ml_soc_cnt++;
123 	else
124 		ml_ctxt->ml_soc_cnt--;
125 
126 	dp_umac_reset_update_partner_map(ml_ctxt, chip_id, !!soc);
127 
128 	qdf_spin_unlock_bh(&ml_ctxt->ml_soc_list_lock);
129 }
130 
131 struct dp_soc*
dp_mlo_get_soc_ref_by_chip_id(struct dp_mlo_ctxt * ml_ctxt,uint8_t chip_id)132 dp_mlo_get_soc_ref_by_chip_id(struct dp_mlo_ctxt *ml_ctxt,
133 			      uint8_t chip_id)
134 {
135 	struct dp_soc *soc = NULL;
136 
137 	if (!ml_ctxt) {
138 		dp_warn("MLO context not created, MLO not enabled");
139 		return NULL;
140 	}
141 
142 	qdf_spin_lock_bh(&ml_ctxt->ml_soc_list_lock);
143 	soc = ml_ctxt->ml_soc_list[chip_id];
144 
145 	if (!soc) {
146 		qdf_spin_unlock_bh(&ml_ctxt->ml_soc_list_lock);
147 		return NULL;
148 	}
149 
150 	qdf_atomic_inc(&soc->ref_count);
151 	qdf_spin_unlock_bh(&ml_ctxt->ml_soc_list_lock);
152 
153 	return soc;
154 }
155 
dp_partner_soc_rx_hw_cc_init(struct dp_mlo_ctxt * mlo_ctxt,struct dp_soc_be * be_soc)156 static QDF_STATUS dp_partner_soc_rx_hw_cc_init(struct dp_mlo_ctxt *mlo_ctxt,
157 					       struct dp_soc_be *be_soc)
158 {
159 	uint8_t i;
160 	struct dp_soc *partner_soc;
161 	struct dp_soc_be *be_partner_soc;
162 	uint8_t pool_id;
163 	QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
164 
165 	for (i = 0; i < WLAN_MAX_MLO_CHIPS; i++) {
166 		partner_soc = dp_mlo_get_soc_ref_by_chip_id(mlo_ctxt, i);
167 		if (!partner_soc) {
168 			dp_err("partner_soc is NULL");
169 			continue;
170 		}
171 
172 		be_partner_soc = dp_get_be_soc_from_dp_soc(partner_soc);
173 
174 		for (pool_id = 0; pool_id < MAX_RXDESC_POOLS; pool_id++) {
175 			qdf_status =
176 				dp_hw_cookie_conversion_init
177 					(be_soc,
178 					 &be_partner_soc->rx_cc_ctx[pool_id]);
179 			if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
180 				dp_alert("MLO partner soc RX CC init failed");
181 				return qdf_status;
182 			}
183 		}
184 	}
185 
186 	return qdf_status;
187 }
188 
dp_mlo_soc_drain_rx_buf(struct dp_soc * soc,void * arg,int chip_id)189 static void dp_mlo_soc_drain_rx_buf(struct dp_soc *soc, void *arg, int chip_id)
190 {
191 	uint8_t i = 0;
192 	uint8_t cpu = 0;
193 	uint8_t rx_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS] = {0};
194 	uint8_t rx_err_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS] = {0};
195 	uint8_t rx_wbm_rel_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS] = {0};
196 	uint8_t reo_status_ring_mask[WLAN_CFG_INT_NUM_CONTEXTS] = {0};
197 
198 	/* Save the current interrupt mask and disable the interrupts */
199 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
200 		rx_ring_mask[i] = soc->intr_ctx[i].rx_ring_mask;
201 		rx_err_ring_mask[i] = soc->intr_ctx[i].rx_err_ring_mask;
202 		rx_wbm_rel_ring_mask[i] = soc->intr_ctx[i].rx_wbm_rel_ring_mask;
203 		reo_status_ring_mask[i] = soc->intr_ctx[i].reo_status_ring_mask;
204 
205 		soc->intr_ctx[i].rx_ring_mask = 0;
206 		soc->intr_ctx[i].rx_err_ring_mask = 0;
207 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0;
208 		soc->intr_ctx[i].reo_status_ring_mask = 0;
209 	}
210 
211 	/* make sure dp_service_srngs not running on any of the CPU */
212 	for (cpu = 0; cpu < NR_CPUS; cpu++) {
213 		while (qdf_atomic_test_bit(cpu,
214 					   &soc->service_rings_running))
215 			;
216 	}
217 
218 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
219 		uint8_t ring = 0;
220 		uint32_t num_entries = 0;
221 		hal_ring_handle_t hal_ring_hdl = NULL;
222 		uint8_t rx_mask = wlan_cfg_get_rx_ring_mask(
223 						soc->wlan_cfg_ctx, i);
224 		uint8_t rx_err_mask = wlan_cfg_get_rx_err_ring_mask(
225 						soc->wlan_cfg_ctx, i);
226 		uint8_t rx_wbm_rel_mask = wlan_cfg_get_rx_wbm_rel_ring_mask(
227 						soc->wlan_cfg_ctx, i);
228 
229 		if (rx_mask) {
230 			/* iterate through each reo ring and process the buf */
231 			for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
232 				if (!(rx_mask & (1 << ring)))
233 					continue;
234 
235 				hal_ring_hdl =
236 					soc->reo_dest_ring[ring].hal_srng;
237 				num_entries = hal_srng_get_num_entries(
238 								soc->hal_soc,
239 								hal_ring_hdl);
240 				dp_rx_process_be(&soc->intr_ctx[i],
241 						 hal_ring_hdl,
242 						 ring,
243 						 num_entries);
244 			}
245 		}
246 
247 		/* Process REO Exception ring */
248 		if (rx_err_mask) {
249 			hal_ring_hdl = soc->reo_exception_ring.hal_srng;
250 			num_entries = hal_srng_get_num_entries(
251 						soc->hal_soc,
252 						hal_ring_hdl);
253 
254 			dp_rx_err_process(&soc->intr_ctx[i], soc,
255 					  hal_ring_hdl, num_entries);
256 		}
257 
258 		/* Process Rx WBM release ring */
259 		if (rx_wbm_rel_mask) {
260 			hal_ring_hdl = soc->rx_rel_ring.hal_srng;
261 			num_entries = hal_srng_get_num_entries(
262 						soc->hal_soc,
263 						hal_ring_hdl);
264 
265 			dp_rx_wbm_err_process(&soc->intr_ctx[i], soc,
266 					      hal_ring_hdl, num_entries);
267 		}
268 	}
269 
270 	/* restore the interrupt mask */
271 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
272 		soc->intr_ctx[i].rx_ring_mask = rx_ring_mask[i];
273 		soc->intr_ctx[i].rx_err_ring_mask = rx_err_ring_mask[i];
274 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = rx_wbm_rel_ring_mask[i];
275 		soc->intr_ctx[i].reo_status_ring_mask = reo_status_ring_mask[i];
276 	}
277 }
278 
dp_mlo_soc_setup(struct cdp_soc_t * soc_hdl,struct cdp_mlo_ctxt * cdp_ml_ctxt)279 static void dp_mlo_soc_setup(struct cdp_soc_t *soc_hdl,
280 			     struct cdp_mlo_ctxt *cdp_ml_ctxt)
281 {
282 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
283 	struct dp_mlo_ctxt *mlo_ctxt = cdp_mlo_ctx_to_dp(cdp_ml_ctxt);
284 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
285 	uint8_t pdev_id;
286 
287 	if (!cdp_ml_ctxt)
288 		return;
289 
290 	be_soc->ml_ctxt = mlo_ctxt;
291 
292 	for (pdev_id = 0; pdev_id < MAX_PDEV_CNT; pdev_id++) {
293 		if (soc->pdev_list[pdev_id])
294 			dp_mlo_update_link_to_pdev_map(soc,
295 						       soc->pdev_list[pdev_id]);
296 	}
297 
298 	dp_mlo_set_soc_by_chip_id(mlo_ctxt, soc, be_soc->mlo_chip_id);
299 }
300 
dp_mlo_soc_teardown(struct cdp_soc_t * soc_hdl,struct cdp_mlo_ctxt * cdp_ml_ctxt,bool is_force_down)301 static void dp_mlo_soc_teardown(struct cdp_soc_t *soc_hdl,
302 				struct cdp_mlo_ctxt *cdp_ml_ctxt,
303 				bool is_force_down)
304 {
305 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
306 	struct dp_mlo_ctxt *mlo_ctxt = cdp_mlo_ctx_to_dp(cdp_ml_ctxt);
307 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
308 
309 	if (!cdp_ml_ctxt)
310 		return;
311 
312 	/* During the teardown drain the Rx buffers if any exist in the ring */
313 	dp_mlo_iter_ptnr_soc(be_soc,
314 			     dp_mlo_soc_drain_rx_buf,
315 			     NULL);
316 
317 	dp_mlo_set_soc_by_chip_id(mlo_ctxt, NULL, be_soc->mlo_chip_id);
318 	be_soc->ml_ctxt = NULL;
319 }
320 
dp_mlo_setup_complete(struct cdp_mlo_ctxt * cdp_ml_ctxt)321 static void dp_mlo_setup_complete(struct cdp_mlo_ctxt *cdp_ml_ctxt)
322 {
323 	struct dp_mlo_ctxt *mlo_ctxt = cdp_mlo_ctx_to_dp(cdp_ml_ctxt);
324 	int i;
325 	struct dp_soc *soc;
326 	struct dp_soc_be *be_soc;
327 	QDF_STATUS qdf_status;
328 
329 	if (!cdp_ml_ctxt)
330 		return;
331 
332 	for (i = 0; i < WLAN_MAX_MLO_CHIPS; i++) {
333 		soc = dp_mlo_get_soc_ref_by_chip_id(mlo_ctxt, i);
334 
335 		if (!soc)
336 			continue;
337 		be_soc = dp_get_be_soc_from_dp_soc(soc);
338 
339 		qdf_status = dp_partner_soc_rx_hw_cc_init(mlo_ctxt, be_soc);
340 
341 		if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
342 			dp_alert("MLO partner SOC Rx desc CC init failed");
343 			qdf_assert_always(0);
344 		}
345 	}
346 }
347 
dp_mlo_update_delta_tsf2(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,uint64_t delta_tsf2)348 static void dp_mlo_update_delta_tsf2(struct cdp_soc_t *soc_hdl,
349 				     uint8_t pdev_id, uint64_t delta_tsf2)
350 {
351 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
352 	struct dp_pdev *pdev;
353 	struct dp_pdev_be *be_pdev;
354 
355 	pdev = dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
356 						  pdev_id);
357 	if (!pdev) {
358 		dp_err("pdev is NULL for pdev_id %u", pdev_id);
359 		return;
360 	}
361 
362 	be_pdev = dp_get_be_pdev_from_dp_pdev(pdev);
363 
364 	be_pdev->delta_tsf2 = delta_tsf2;
365 }
366 
dp_mlo_update_delta_tqm(struct cdp_soc_t * soc_hdl,uint64_t delta_tqm)367 static void dp_mlo_update_delta_tqm(struct cdp_soc_t *soc_hdl,
368 				    uint64_t delta_tqm)
369 {
370 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
371 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
372 
373 	be_soc->delta_tqm = delta_tqm;
374 }
375 
dp_mlo_update_mlo_ts_offset(struct cdp_soc_t * soc_hdl,uint64_t offset)376 static void dp_mlo_update_mlo_ts_offset(struct cdp_soc_t *soc_hdl,
377 					uint64_t offset)
378 {
379 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
380 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
381 
382 	be_soc->mlo_tstamp_offset = offset;
383 }
384 
385 #ifdef CONFIG_MLO_SINGLE_DEV
386 /**
387  * dp_aggregate_vdev_basic_stats() - aggregate vdev basic stats
388  * @tgt_vdev_stats: target vdev buffer
389  * @src_vdev_stats: source vdev buffer
390  *
391  * return: void
392  */
393 static inline
dp_aggregate_vdev_basic_stats(struct cdp_vdev_stats * tgt_vdev_stats,struct dp_vdev_stats * src_vdev_stats)394 void dp_aggregate_vdev_basic_stats(
395 			struct cdp_vdev_stats *tgt_vdev_stats,
396 			struct dp_vdev_stats *src_vdev_stats)
397 {
398 	DP_UPDATE_BASIC_STATS(tgt_vdev_stats, src_vdev_stats);
399 }
400 
401 /**
402  * dp_aggregate_vdev_ingress_stats() - aggregate vdev ingress stats
403  * @tgt_vdev_stats: target vdev buffer
404  * @src_vdev_stats: source vdev buffer
405  * @xmit_type: xmit type of packet - MLD/Link
406  *
407  * return: void
408  */
409 static inline
dp_aggregate_vdev_ingress_stats(struct cdp_vdev_stats * tgt_vdev_stats,struct dp_vdev_stats * src_vdev_stats,enum dp_pkt_xmit_type xmit_type)410 void dp_aggregate_vdev_ingress_stats(
411 			struct cdp_vdev_stats *tgt_vdev_stats,
412 			struct dp_vdev_stats *src_vdev_stats,
413 			enum dp_pkt_xmit_type xmit_type)
414 {
415 	/* Aggregate vdev ingress stats */
416 	DP_UPDATE_LINK_VDEV_INGRESS_STATS(tgt_vdev_stats, src_vdev_stats,
417 					 xmit_type);
418 }
419 
420 /**
421  * dp_aggregate_all_vdev_stats() - aggregate vdev ingress and unmap peer stats
422  * @tgt_vdev_stats: target vdev buffer
423  * @src_vdev_stats: source vdev buffer
424  * @xmit_type: xmit type of packet - MLD/Link
425  *
426  * return: void
427  */
428 static inline
dp_aggregate_all_vdev_stats(struct cdp_vdev_stats * tgt_vdev_stats,struct dp_vdev_stats * src_vdev_stats,enum dp_pkt_xmit_type xmit_type)429 void dp_aggregate_all_vdev_stats(
430 			struct cdp_vdev_stats *tgt_vdev_stats,
431 			struct dp_vdev_stats *src_vdev_stats,
432 			enum dp_pkt_xmit_type xmit_type)
433 {
434 	dp_aggregate_vdev_ingress_stats(tgt_vdev_stats, src_vdev_stats,
435 					xmit_type);
436 	dp_aggregate_vdev_stats_for_unmapped_peers(tgt_vdev_stats,
437 						   src_vdev_stats);
438 }
439 
440 /**
441  * dp_mlo_vdev_stats_aggr_bridge_vap() - aggregate bridge vdev stats
442  * @be_vdev: Dp Vdev handle
443  * @bridge_vdev: Dp vdev handle for bridge vdev
444  * @arg: buffer for target vdev stats
445  * @xmit_type: xmit type of packet - MLD/Link
446  *
447  * return: void
448  */
449 static
dp_mlo_vdev_stats_aggr_bridge_vap(struct dp_vdev_be * be_vdev,struct dp_vdev * bridge_vdev,void * arg,enum dp_pkt_xmit_type xmit_type)450 void dp_mlo_vdev_stats_aggr_bridge_vap(struct dp_vdev_be *be_vdev,
451 				       struct dp_vdev *bridge_vdev,
452 				       void *arg,
453 				       enum dp_pkt_xmit_type xmit_type)
454 {
455 	struct cdp_vdev_stats *tgt_vdev_stats = (struct cdp_vdev_stats *)arg;
456 	struct dp_vdev_be *bridge_be_vdev = NULL;
457 
458 	bridge_be_vdev = dp_get_be_vdev_from_dp_vdev(bridge_vdev);
459 	if (!bridge_be_vdev)
460 		return;
461 
462 	dp_aggregate_all_vdev_stats(tgt_vdev_stats, &bridge_vdev->stats,
463 				    xmit_type);
464 	dp_aggregate_vdev_stats_for_unmapped_peers(tgt_vdev_stats,
465 						(&bridge_be_vdev->mlo_stats));
466 	dp_vdev_iterate_peer(bridge_vdev, dp_update_vdev_stats, tgt_vdev_stats,
467 			     DP_MOD_ID_GENERIC_STATS);
468 }
469 
470 /**
471  * dp_mlo_vdev_stats_aggr_bridge_vap_unified() - aggregate bridge vdev stats for
472  * unified mode, all MLO and legacy packets are submitted to vdev
473  * @be_vdev: Dp Vdev handle
474  * @bridge_vdev: Dp vdev handle for bridge vdev
475  * @arg: buffer for target vdev stats
476  *
477  * return: void
478  */
479 static
dp_mlo_vdev_stats_aggr_bridge_vap_unified(struct dp_vdev_be * be_vdev,struct dp_vdev * bridge_vdev,void * arg)480 void dp_mlo_vdev_stats_aggr_bridge_vap_unified(struct dp_vdev_be *be_vdev,
481 				       struct dp_vdev *bridge_vdev,
482 				       void *arg)
483 {
484 	dp_mlo_vdev_stats_aggr_bridge_vap(be_vdev, bridge_vdev, arg,
485 					  DP_XMIT_TOTAL);
486 }
487 
488 /**
489  * dp_mlo_vdev_stats_aggr_bridge_vap_mld() - aggregate bridge vdev stats for MLD
490  * mode, all MLO packets are submitted to MLD
491  * @be_vdev: Dp Vdev handle
492  * @bridge_vdev: Dp vdev handle for bridge vdev
493  * @arg: buffer for target vdev stats
494  *
495  * return: void
496  */
497 static
dp_mlo_vdev_stats_aggr_bridge_vap_mld(struct dp_vdev_be * be_vdev,struct dp_vdev * bridge_vdev,void * arg)498 void dp_mlo_vdev_stats_aggr_bridge_vap_mld(struct dp_vdev_be *be_vdev,
499 				       struct dp_vdev *bridge_vdev,
500 				       void *arg)
501 {
502 	dp_mlo_vdev_stats_aggr_bridge_vap(be_vdev, bridge_vdev, arg,
503 					  DP_XMIT_MLD);
504 }
505 
506 /**
507  * dp_aggregate_interface_stats_based_on_peer_type() - aggregate stats at
508  * VDEV level based on peer type connected to vdev
509  * @vdev: DP VDEV handle
510  * @vdev_stats: target vdev stats pointer
511  * @peer_type: type of peer - MLO Link or Legacy peer
512  *
513  * return: void
514  */
515 static
dp_aggregate_interface_stats_based_on_peer_type(struct dp_vdev * vdev,struct cdp_vdev_stats * vdev_stats,enum dp_peer_type peer_type)516 void dp_aggregate_interface_stats_based_on_peer_type(
517 					struct dp_vdev *vdev,
518 					struct cdp_vdev_stats *vdev_stats,
519 					enum dp_peer_type peer_type)
520 {
521 	struct cdp_vdev_stats *tgt_vdev_stats = NULL;
522 	struct dp_vdev_be *be_vdev = NULL;
523 	struct dp_soc_be *be_soc = NULL;
524 
525 	if (!vdev || !vdev->pdev)
526 		return;
527 
528 	tgt_vdev_stats = vdev_stats;
529 	be_soc = dp_get_be_soc_from_dp_soc(vdev->pdev->soc);
530 	be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
531 	if (!be_vdev)
532 		return;
533 
534 	if (peer_type == DP_PEER_TYPE_LEGACY) {
535 		dp_aggregate_all_vdev_stats(tgt_vdev_stats,
536 					    &vdev->stats, DP_XMIT_LINK);
537 	} else {
538 		if (be_vdev->mcast_primary) {
539 			dp_mlo_iter_ptnr_vdev(be_soc, be_vdev,
540 					      dp_mlo_vdev_stats_aggr_bridge_vap_mld,
541 					      (void *)vdev_stats,
542 					      DP_MOD_ID_GENERIC_STATS,
543 					      DP_BRIDGE_VDEV_ITER,
544 					      DP_VDEV_ITERATE_SKIP_SELF);
545 		}
546 		dp_aggregate_vdev_ingress_stats(tgt_vdev_stats,
547 						&vdev->stats, DP_XMIT_MLD);
548 		dp_aggregate_vdev_stats_for_unmapped_peers(
549 							tgt_vdev_stats,
550 							(&be_vdev->mlo_stats));
551 	}
552 
553 	/* Aggregate associated peer stats */
554 	dp_vdev_iterate_specific_peer_type(vdev,
555 					   dp_update_vdev_stats,
556 					   vdev_stats,
557 					   DP_MOD_ID_GENERIC_STATS,
558 					   peer_type);
559 }
560 
561 /**
562  * dp_aggregate_interface_stats() - aggregate stats at VDEV level
563  * @vdev: DP VDEV handle
564  * @vdev_stats: target vdev stats pointer
565  *
566  * return: void
567  */
568 static
dp_aggregate_interface_stats(struct dp_vdev * vdev,struct cdp_vdev_stats * vdev_stats)569 void dp_aggregate_interface_stats(struct dp_vdev *vdev,
570 				  struct cdp_vdev_stats *vdev_stats)
571 {
572 	struct dp_vdev_be *be_vdev = NULL;
573 	struct dp_soc_be *be_soc = NULL;
574 
575 	if (!vdev || !vdev->pdev)
576 		return;
577 
578 	be_soc = dp_get_be_soc_from_dp_soc(vdev->pdev->soc);
579 	be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
580 	if (!be_vdev)
581 		return;
582 
583 	if (be_vdev->mcast_primary) {
584 		dp_mlo_iter_ptnr_vdev(be_soc, be_vdev,
585 				      dp_mlo_vdev_stats_aggr_bridge_vap_unified,
586 				      (void *)vdev_stats, DP_MOD_ID_GENERIC_STATS,
587 				      DP_BRIDGE_VDEV_ITER,
588 				      DP_VDEV_ITERATE_SKIP_SELF);
589 	}
590 
591 	dp_aggregate_vdev_stats_for_unmapped_peers(vdev_stats,
592 						(&be_vdev->mlo_stats));
593 	dp_aggregate_all_vdev_stats(vdev_stats, &vdev->stats,
594 				    DP_XMIT_TOTAL);
595 
596 	dp_vdev_iterate_peer(vdev, dp_update_vdev_stats, vdev_stats,
597 			     DP_MOD_ID_GENERIC_STATS);
598 
599 	dp_update_vdev_rate_stats(vdev_stats, &vdev->stats);
600 }
601 
602 /**
603  * dp_mlo_aggr_ptnr_iface_stats() - aggregate mlo partner vdev stats
604  * @be_vdev: vdev handle
605  * @ptnr_vdev: partner vdev handle
606  * @arg: target buffer for aggregation
607  *
608  * return: void
609  */
610 static
dp_mlo_aggr_ptnr_iface_stats(struct dp_vdev_be * be_vdev,struct dp_vdev * ptnr_vdev,void * arg)611 void dp_mlo_aggr_ptnr_iface_stats(struct dp_vdev_be *be_vdev,
612 				  struct dp_vdev *ptnr_vdev,
613 				  void *arg)
614 {
615 	struct cdp_vdev_stats *tgt_vdev_stats = (struct cdp_vdev_stats *)arg;
616 
617 	dp_aggregate_interface_stats(ptnr_vdev, tgt_vdev_stats);
618 }
619 
620 /**
621  * dp_mlo_aggr_ptnr_iface_stats_mlo_links() - aggregate mlo partner vdev stats
622  * based on peer type
623  * @be_vdev: vdev handle
624  * @ptnr_vdev: partner vdev handle
625  * @arg: target buffer for aggregation
626  *
627  * return: void
628  */
629 static
dp_mlo_aggr_ptnr_iface_stats_mlo_links(struct dp_vdev_be * be_vdev,struct dp_vdev * ptnr_vdev,void * arg)630 void dp_mlo_aggr_ptnr_iface_stats_mlo_links(
631 					struct dp_vdev_be *be_vdev,
632 					struct dp_vdev *ptnr_vdev,
633 					void *arg)
634 {
635 	struct cdp_vdev_stats *tgt_vdev_stats = (struct cdp_vdev_stats *)arg;
636 
637 	dp_aggregate_interface_stats_based_on_peer_type(ptnr_vdev,
638 							tgt_vdev_stats,
639 							DP_PEER_TYPE_MLO_LINK);
640 }
641 
642 /**
643  * dp_aggregate_sta_interface_stats() - for sta mode aggregate vdev stats from
644  * all link peers
645  * @soc: soc handle
646  * @vdev: vdev handle
647  * @buf: target buffer for aggregation
648  *
649  * return: QDF_STATUS
650  */
651 static QDF_STATUS
dp_aggregate_sta_interface_stats(struct dp_soc * soc,struct dp_vdev * vdev,void * buf)652 dp_aggregate_sta_interface_stats(struct dp_soc *soc,
653 				 struct dp_vdev *vdev,
654 				 void *buf)
655 {
656 	struct dp_peer *vap_bss_peer = NULL;
657 	struct dp_peer *mld_peer = NULL;
658 	struct dp_peer *link_peer = NULL;
659 	struct dp_mld_link_peers link_peers_info;
660 	uint8_t i = 0;
661 	QDF_STATUS ret = QDF_STATUS_SUCCESS;
662 
663 	vap_bss_peer = dp_vdev_bss_peer_ref_n_get(soc, vdev,
664 						  DP_MOD_ID_GENERIC_STATS);
665 	if (!vap_bss_peer)
666 		return QDF_STATUS_E_FAILURE;
667 
668 	mld_peer = DP_GET_MLD_PEER_FROM_PEER(vap_bss_peer);
669 
670 	if (!mld_peer) {
671 		dp_peer_unref_delete(vap_bss_peer, DP_MOD_ID_GENERIC_STATS);
672 		return QDF_STATUS_E_FAILURE;
673 	}
674 
675 	dp_get_link_peers_ref_from_mld_peer(soc, mld_peer, &link_peers_info,
676 					    DP_MOD_ID_GENERIC_STATS);
677 
678 	for (i = 0; i < link_peers_info.num_links; i++) {
679 		link_peer = link_peers_info.link_peers[i];
680 		dp_update_vdev_stats(soc, link_peer, buf);
681 		dp_aggregate_vdev_ingress_stats((struct cdp_vdev_stats *)buf,
682 						&link_peer->vdev->stats,
683 						DP_XMIT_TOTAL);
684 		dp_aggregate_vdev_basic_stats(
685 					(struct cdp_vdev_stats *)buf,
686 					&link_peer->vdev->stats);
687 	}
688 
689 	dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_GENERIC_STATS);
690 	dp_peer_unref_delete(vap_bss_peer, DP_MOD_ID_GENERIC_STATS);
691 	return ret;
692 }
693 
dp_mlo_get_mld_vdev_stats(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,void * buf,bool link_vdev_only)694 static QDF_STATUS dp_mlo_get_mld_vdev_stats(struct cdp_soc_t *soc_hdl,
695 					    uint8_t vdev_id, void *buf,
696 					    bool link_vdev_only)
697 {
698 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
699 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
700 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
701 						     DP_MOD_ID_GENERIC_STATS);
702 	struct dp_vdev_be *vdev_be = NULL;
703 	QDF_STATUS ret = QDF_STATUS_SUCCESS;
704 
705 	if (!vdev)
706 		return QDF_STATUS_E_FAILURE;
707 
708 	vdev_be = dp_get_be_vdev_from_dp_vdev(vdev);
709 	if (!vdev_be || !vdev_be->mlo_dev_ctxt) {
710 		dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_GENERIC_STATS);
711 		return QDF_STATUS_E_FAILURE;
712 	}
713 
714 	if (vdev->opmode == wlan_op_mode_sta) {
715 		ret = dp_aggregate_sta_interface_stats(soc, vdev, buf);
716 		goto complete;
717 	}
718 
719 	if (DP_MLD_MODE_HYBRID_NONBOND == soc->mld_mode_ap &&
720 	    vdev->opmode == wlan_op_mode_ap) {
721 		dp_aggregate_interface_stats_based_on_peer_type(
722 						vdev, buf,
723 						DP_PEER_TYPE_MLO_LINK);
724 		if (link_vdev_only)
725 			goto complete;
726 
727 		/* Aggregate stats from partner vdevs */
728 		dp_mlo_iter_ptnr_vdev(be_soc, vdev_be,
729 				      dp_mlo_aggr_ptnr_iface_stats_mlo_links,
730 				      buf,
731 				      DP_MOD_ID_GENERIC_STATS,
732 				      DP_LINK_VDEV_ITER,
733 				      DP_VDEV_ITERATE_SKIP_SELF);
734 
735 		/* Aggregate vdev stats from MLO ctx for detached MLO Links */
736 		dp_update_mlo_link_vdev_ctxt_stats(buf,
737 						  &vdev_be->mlo_dev_ctxt->stats,
738 						  DP_XMIT_MLD);
739 	} else {
740 		dp_aggregate_interface_stats(vdev, buf);
741 
742 		if (link_vdev_only)
743 			goto complete;
744 
745 		/* Aggregate stats from partner vdevs */
746 		dp_mlo_iter_ptnr_vdev(be_soc, vdev_be,
747 				      dp_mlo_aggr_ptnr_iface_stats, buf,
748 				      DP_MOD_ID_GENERIC_STATS,
749 				      DP_LINK_VDEV_ITER,
750 				      DP_VDEV_ITERATE_SKIP_SELF);
751 
752 		/* Aggregate vdev stats from MLO ctx for detached MLO Links */
753 		dp_update_mlo_link_vdev_ctxt_stats(buf,
754 						  &vdev_be->mlo_dev_ctxt->stats,
755 						  DP_XMIT_TOTAL);
756 	}
757 
758 complete:
759 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_GENERIC_STATS);
760 	return ret;
761 }
762 
763 QDF_STATUS
dp_get_interface_stats_be(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,void * buf,bool is_aggregate)764 dp_get_interface_stats_be(struct cdp_soc_t *soc_hdl,
765 			  uint8_t vdev_id,
766 			  void *buf,
767 			  bool is_aggregate)
768 {
769 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
770 	struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
771 						     DP_MOD_ID_GENERIC_STATS);
772 	if (!vdev)
773 		return QDF_STATUS_E_FAILURE;
774 
775 	if (DP_MLD_MODE_HYBRID_NONBOND == soc->mld_mode_ap &&
776 	    vdev->opmode == wlan_op_mode_ap) {
777 		dp_aggregate_interface_stats_based_on_peer_type(
778 						vdev, buf,
779 						DP_PEER_TYPE_LEGACY);
780 	} else {
781 		dp_aggregate_interface_stats(vdev, buf);
782 	}
783 
784 	dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_GENERIC_STATS);
785 
786 	return QDF_STATUS_SUCCESS;
787 }
788 #endif
789 
790 static struct cdp_mlo_ops dp_mlo_ops = {
791 	.mlo_soc_setup = dp_mlo_soc_setup,
792 	.mlo_soc_teardown = dp_mlo_soc_teardown,
793 	.mlo_setup_complete = dp_mlo_setup_complete,
794 	.mlo_update_delta_tsf2 = dp_mlo_update_delta_tsf2,
795 	.mlo_update_delta_tqm = dp_mlo_update_delta_tqm,
796 	.mlo_update_mlo_ts_offset = dp_mlo_update_mlo_ts_offset,
797 	.mlo_ctxt_attach = dp_mlo_ctxt_attach_wifi3,
798 	.mlo_ctxt_detach = dp_mlo_ctxt_detach_wifi3,
799 #ifdef CONFIG_MLO_SINGLE_DEV
800 	.mlo_get_mld_vdev_stats = dp_mlo_get_mld_vdev_stats,
801 #endif
802 };
803 
dp_soc_mlo_fill_params(struct dp_soc * soc,struct cdp_soc_attach_params * params)804 void dp_soc_mlo_fill_params(struct dp_soc *soc,
805 			    struct cdp_soc_attach_params *params)
806 {
807 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
808 
809 	if (!params->mlo_enabled) {
810 		dp_warn("MLO not enabled on SOC");
811 		return;
812 	}
813 
814 	be_soc->mlo_chip_id = params->mlo_chip_id;
815 	be_soc->ml_ctxt = cdp_mlo_ctx_to_dp(params->ml_context);
816 	be_soc->mlo_enabled = 1;
817 	soc->cdp_soc.ops->mlo_ops = &dp_mlo_ops;
818 }
819 
dp_mlo_update_link_to_pdev_map(struct dp_soc * soc,struct dp_pdev * pdev)820 void dp_mlo_update_link_to_pdev_map(struct dp_soc *soc, struct dp_pdev *pdev)
821 {
822 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
823 	struct dp_pdev_be *be_pdev = dp_get_be_pdev_from_dp_pdev(pdev);
824 	struct dp_mlo_ctxt *ml_ctxt = be_soc->ml_ctxt;
825 	uint8_t link_id;
826 
827 	if (!be_soc->mlo_enabled)
828 		return;
829 
830 	if (!ml_ctxt)
831 		return;
832 
833 	link_id = be_pdev->mlo_link_id;
834 
835 	if (link_id < WLAN_MAX_MLO_CHIPS * WLAN_MAX_MLO_LINKS_PER_SOC) {
836 		if (!ml_ctxt->link_to_pdev_map[link_id])
837 			ml_ctxt->link_to_pdev_map[link_id] = be_pdev;
838 		else
839 			dp_alert("Attempt to update existing map for link %u",
840 				 link_id);
841 	}
842 }
843 
dp_mlo_update_link_to_pdev_unmap(struct dp_soc * soc,struct dp_pdev * pdev)844 void dp_mlo_update_link_to_pdev_unmap(struct dp_soc *soc, struct dp_pdev *pdev)
845 {
846 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
847 	struct dp_pdev_be *be_pdev = dp_get_be_pdev_from_dp_pdev(pdev);
848 	struct dp_mlo_ctxt *ml_ctxt = be_soc->ml_ctxt;
849 	uint8_t link_id;
850 
851 	if (!be_soc->mlo_enabled)
852 		return;
853 
854 	if (!ml_ctxt)
855 		return;
856 
857 	link_id = be_pdev->mlo_link_id;
858 
859 	if (link_id < WLAN_MAX_MLO_CHIPS * WLAN_MAX_MLO_LINKS_PER_SOC)
860 		ml_ctxt->link_to_pdev_map[link_id] = NULL;
861 }
862 
863 static struct dp_pdev_be *
dp_mlo_get_be_pdev_from_link_id(struct dp_mlo_ctxt * ml_ctxt,uint8_t link_id)864 dp_mlo_get_be_pdev_from_link_id(struct dp_mlo_ctxt *ml_ctxt, uint8_t link_id)
865 {
866 	if (link_id < WLAN_MAX_MLO_CHIPS * WLAN_MAX_MLO_LINKS_PER_SOC)
867 		return ml_ctxt->link_to_pdev_map[link_id];
868 	return NULL;
869 }
870 
dp_pdev_mlo_fill_params(struct dp_pdev * pdev,struct cdp_pdev_attach_params * params)871 void dp_pdev_mlo_fill_params(struct dp_pdev *pdev,
872 			     struct cdp_pdev_attach_params *params)
873 {
874 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(pdev->soc);
875 	struct dp_pdev_be *be_pdev = dp_get_be_pdev_from_dp_pdev(pdev);
876 
877 	if (!be_soc->mlo_enabled) {
878 		dp_info("MLO not enabled on SOC");
879 		return;
880 	}
881 
882 	be_pdev->mlo_link_id = params->mlo_link_id;
883 }
884 
dp_mlo_partner_chips_map(struct dp_soc * soc,struct dp_peer * peer,uint16_t peer_id)885 void dp_mlo_partner_chips_map(struct dp_soc *soc,
886 			      struct dp_peer *peer,
887 			      uint16_t peer_id)
888 {
889 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
890 	struct dp_mlo_ctxt *mlo_ctxt = NULL;
891 	bool is_ml_peer_id =
892 		HTT_RX_PEER_META_DATA_V1_ML_PEER_VALID_GET(peer_id);
893 	uint8_t chip_id;
894 	struct dp_soc *temp_soc;
895 
896 	/* for non ML peer dont map on partner chips*/
897 	if (!is_ml_peer_id)
898 		return;
899 
900 	mlo_ctxt = be_soc->ml_ctxt;
901 	if (!mlo_ctxt)
902 		return;
903 
904 	qdf_spin_lock_bh(&mlo_ctxt->ml_soc_list_lock);
905 	for (chip_id = 0; chip_id < DP_MAX_MLO_CHIPS; chip_id++) {
906 		temp_soc = mlo_ctxt->ml_soc_list[chip_id];
907 
908 		if (!temp_soc)
909 			continue;
910 
911 		/* skip if this is current soc */
912 		if (temp_soc == soc)
913 			continue;
914 
915 		dp_peer_find_id_to_obj_add(temp_soc, peer, peer_id);
916 	}
917 	qdf_spin_unlock_bh(&mlo_ctxt->ml_soc_list_lock);
918 }
919 
920 qdf_export_symbol(dp_mlo_partner_chips_map);
921 
dp_mlo_partner_chips_unmap(struct dp_soc * soc,uint16_t peer_id)922 void dp_mlo_partner_chips_unmap(struct dp_soc *soc,
923 				uint16_t peer_id)
924 {
925 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
926 	struct dp_mlo_ctxt *mlo_ctxt = be_soc->ml_ctxt;
927 	bool is_ml_peer_id =
928 		HTT_RX_PEER_META_DATA_V1_ML_PEER_VALID_GET(peer_id);
929 	uint8_t chip_id;
930 	struct dp_soc *temp_soc;
931 
932 	if (!is_ml_peer_id)
933 		return;
934 
935 	if (!mlo_ctxt)
936 		return;
937 
938 	qdf_spin_lock_bh(&mlo_ctxt->ml_soc_list_lock);
939 	for (chip_id = 0; chip_id < DP_MAX_MLO_CHIPS; chip_id++) {
940 		temp_soc = mlo_ctxt->ml_soc_list[chip_id];
941 
942 		if (!temp_soc)
943 			continue;
944 
945 		/* skip if this is current soc */
946 		if (temp_soc == soc)
947 			continue;
948 
949 		dp_peer_find_id_to_obj_remove(temp_soc, peer_id);
950 	}
951 	qdf_spin_unlock_bh(&mlo_ctxt->ml_soc_list_lock);
952 }
953 
954 qdf_export_symbol(dp_mlo_partner_chips_unmap);
955 
dp_mlo_get_chip_id(struct dp_soc * soc)956 uint8_t dp_mlo_get_chip_id(struct dp_soc *soc)
957 {
958 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
959 
960 	return be_soc->mlo_chip_id;
961 }
962 
963 qdf_export_symbol(dp_mlo_get_chip_id);
964 
965 struct dp_peer *
dp_mlo_link_peer_hash_find_by_chip_id(struct dp_soc * soc,uint8_t * peer_mac_addr,int mac_addr_is_aligned,uint8_t vdev_id,uint8_t chip_id,enum dp_mod_id mod_id)966 dp_mlo_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
967 				      uint8_t *peer_mac_addr,
968 				      int mac_addr_is_aligned,
969 				      uint8_t vdev_id,
970 				      uint8_t chip_id,
971 				      enum dp_mod_id mod_id)
972 {
973 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
974 	struct dp_mlo_ctxt *mlo_ctxt = be_soc->ml_ctxt;
975 	struct dp_soc *link_peer_soc = NULL;
976 	struct dp_peer *peer = NULL;
977 
978 	if (!mlo_ctxt)
979 		return NULL;
980 
981 	link_peer_soc = dp_mlo_get_soc_ref_by_chip_id(mlo_ctxt, chip_id);
982 
983 	if (!link_peer_soc)
984 		return NULL;
985 
986 	peer = dp_peer_find_hash_find(link_peer_soc, peer_mac_addr,
987 				      mac_addr_is_aligned, vdev_id,
988 				      mod_id);
989 	qdf_atomic_dec(&link_peer_soc->ref_count);
990 	return peer;
991 }
992 
993 qdf_export_symbol(dp_mlo_link_peer_hash_find_by_chip_id);
994 
dp_mlo_get_rx_hash_key(struct dp_soc * soc,struct cdp_lro_hash_config * lro_hash)995 void dp_mlo_get_rx_hash_key(struct dp_soc *soc,
996 			    struct cdp_lro_hash_config *lro_hash)
997 {
998 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
999 	struct dp_mlo_ctxt *ml_ctxt = be_soc->ml_ctxt;
1000 
1001 	if (!be_soc->mlo_enabled || !ml_ctxt)
1002 		return dp_get_rx_hash_key_bytes(lro_hash);
1003 
1004 	qdf_mem_copy(lro_hash->toeplitz_hash_ipv4, ml_ctxt->toeplitz_hash_ipv4,
1005 		     (sizeof(lro_hash->toeplitz_hash_ipv4[0]) *
1006 		      LRO_IPV4_SEED_ARR_SZ));
1007 	qdf_mem_copy(lro_hash->toeplitz_hash_ipv6, ml_ctxt->toeplitz_hash_ipv6,
1008 		     (sizeof(lro_hash->toeplitz_hash_ipv6[0]) *
1009 		      LRO_IPV6_SEED_ARR_SZ));
1010 }
1011 
1012 struct dp_soc *
dp_rx_replenish_soc_get(struct dp_soc * soc,uint8_t chip_id)1013 dp_rx_replenish_soc_get(struct dp_soc *soc, uint8_t chip_id)
1014 {
1015 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
1016 	struct dp_mlo_ctxt *mlo_ctxt = be_soc->ml_ctxt;
1017 	struct dp_soc *replenish_soc;
1018 
1019 	if (!be_soc->mlo_enabled || !mlo_ctxt)
1020 		return soc;
1021 
1022 	if (be_soc->mlo_chip_id == chip_id)
1023 		return soc;
1024 
1025 	replenish_soc = dp_mlo_get_soc_ref_by_chip_id(mlo_ctxt, chip_id);
1026 	if (qdf_unlikely(!replenish_soc)) {
1027 		dp_alert("replenish SOC is NULL");
1028 		qdf_assert_always(0);
1029 	}
1030 
1031 	return replenish_soc;
1032 }
1033 
dp_soc_get_num_soc_be(struct dp_soc * soc)1034 uint8_t dp_soc_get_num_soc_be(struct dp_soc *soc)
1035 {
1036 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
1037 	struct dp_mlo_ctxt *mlo_ctxt = be_soc->ml_ctxt;
1038 
1039 	if (!be_soc->mlo_enabled || !mlo_ctxt)
1040 		return 1;
1041 
1042 	return mlo_ctxt->ml_soc_cnt;
1043 }
1044 
1045 struct dp_soc *
dp_soc_get_by_idle_bm_id(struct dp_soc * soc,uint8_t idle_bm_id)1046 dp_soc_get_by_idle_bm_id(struct dp_soc *soc, uint8_t idle_bm_id)
1047 {
1048 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
1049 	struct dp_mlo_ctxt *mlo_ctxt = be_soc->ml_ctxt;
1050 	struct dp_soc *partner_soc = NULL;
1051 	uint8_t chip_id;
1052 
1053 	if (!be_soc->mlo_enabled || !mlo_ctxt)
1054 		return soc;
1055 
1056 	for (chip_id = 0; chip_id < WLAN_MAX_MLO_CHIPS; chip_id++) {
1057 		partner_soc = dp_mlo_get_soc_ref_by_chip_id(mlo_ctxt, chip_id);
1058 
1059 		if (!partner_soc)
1060 			continue;
1061 
1062 		if (partner_soc->idle_link_bm_id == idle_bm_id)
1063 			return partner_soc;
1064 	}
1065 
1066 	return NULL;
1067 }
1068 
1069 #ifdef WLAN_MLO_MULTI_CHIP
dp_print_mlo_partner_list(struct dp_vdev_be * be_vdev,struct dp_vdev * partner_vdev,void * arg)1070 static void dp_print_mlo_partner_list(struct dp_vdev_be *be_vdev,
1071 				      struct dp_vdev *partner_vdev,
1072 				      void *arg)
1073 {
1074 	struct dp_vdev_be *partner_vdev_be = NULL;
1075 	struct dp_soc_be *partner_soc_be = NULL;
1076 
1077 	partner_vdev_be = dp_get_be_vdev_from_dp_vdev(partner_vdev);
1078 	partner_soc_be = dp_get_be_soc_from_dp_soc(partner_vdev->pdev->soc);
1079 
1080 	DP_PRINT_STATS("is_bridge_vap = %s, mcast_primary = %s,  vdev_id = %d, pdev_id = %d, chip_id = %d",
1081 		       partner_vdev->is_bridge_vdev ? "true" : "false",
1082 		       partner_vdev_be->mcast_primary ? "true" : "false",
1083 		       partner_vdev->vdev_id,
1084 		       partner_vdev->pdev->pdev_id,
1085 		       partner_soc_be->mlo_chip_id);
1086 }
1087 
dp_mlo_iter_ptnr_vdev(struct dp_soc_be * be_soc,struct dp_vdev_be * be_vdev,dp_ptnr_vdev_iter_func func,void * arg,enum dp_mod_id mod_id,uint8_t type,bool include_self_vdev)1088 void dp_mlo_iter_ptnr_vdev(struct dp_soc_be *be_soc,
1089 			   struct dp_vdev_be *be_vdev,
1090 			   dp_ptnr_vdev_iter_func func,
1091 			   void *arg,
1092 			   enum dp_mod_id mod_id,
1093 			   uint8_t type,
1094 			   bool include_self_vdev)
1095 {
1096 	int i = 0;
1097 	int j = 0;
1098 	struct dp_mlo_ctxt *dp_mlo = be_soc->ml_ctxt;
1099 	struct dp_vdev *self_vdev = &be_vdev->vdev;
1100 
1101 	if (type < DP_LINK_VDEV_ITER || type > DP_ALL_VDEV_ITER) {
1102 		dp_err("invalid iterate type");
1103 		return;
1104 	}
1105 
1106 	if (!be_vdev->mlo_dev_ctxt) {
1107 		if (!include_self_vdev)
1108 			return;
1109 		(*func)(be_vdev, self_vdev, arg);
1110 	}
1111 
1112 	for (i = 0; (i < WLAN_MAX_MLO_CHIPS) &&
1113 	     IS_LINK_VDEV_ITER_REQUIRED(type); i++) {
1114 		struct dp_soc *ptnr_soc =
1115 				dp_mlo_get_soc_ref_by_chip_id(dp_mlo, i);
1116 
1117 		if (!ptnr_soc)
1118 			continue;
1119 		for (j = 0 ; j < WLAN_MAX_MLO_LINKS_PER_SOC ; j++) {
1120 			struct dp_vdev *ptnr_vdev;
1121 
1122 			ptnr_vdev = dp_vdev_get_ref_by_id(
1123 				ptnr_soc,
1124 				be_vdev->mlo_dev_ctxt->vdev_list[i][j],
1125 				mod_id);
1126 			if (!ptnr_vdev)
1127 				continue;
1128 
1129 			if ((ptnr_vdev == self_vdev) && (!include_self_vdev)) {
1130 				dp_vdev_unref_delete(ptnr_vdev->pdev->soc,
1131 						     ptnr_vdev,
1132 						     mod_id);
1133 				continue;
1134 			}
1135 
1136 			(*func)(be_vdev, ptnr_vdev, arg);
1137 			dp_vdev_unref_delete(ptnr_vdev->pdev->soc,
1138 					     ptnr_vdev,
1139 					     mod_id);
1140 		}
1141 	}
1142 
1143 	for (i = 0; (i < WLAN_MAX_MLO_CHIPS) &&
1144 	     IS_BRIDGE_VDEV_ITER_REQUIRED(type); i++) {
1145 		struct dp_soc *ptnr_soc =
1146 				dp_mlo_get_soc_ref_by_chip_id(dp_mlo, i);
1147 
1148 		if (!ptnr_soc)
1149 			continue;
1150 		for (j = 0 ; j < WLAN_MAX_MLO_LINKS_PER_SOC ; j++) {
1151 			struct dp_vdev *bridge_vdev;
1152 
1153 			bridge_vdev = dp_vdev_get_ref_by_id(
1154 				ptnr_soc,
1155 				be_vdev->mlo_dev_ctxt->bridge_vdev[i][j],
1156 				mod_id);
1157 
1158 			if (!bridge_vdev)
1159 				continue;
1160 
1161 			if ((bridge_vdev == self_vdev) &&
1162 			    (!include_self_vdev)) {
1163 				dp_vdev_unref_delete(
1164 						bridge_vdev->pdev->soc,
1165 						bridge_vdev,
1166 						mod_id);
1167 				continue;
1168 			}
1169 
1170 			(*func)(be_vdev, bridge_vdev, arg);
1171 			dp_vdev_unref_delete(bridge_vdev->pdev->soc,
1172 					     bridge_vdev,
1173 					     mod_id);
1174 		}
1175 	}
1176 }
1177 
1178 qdf_export_symbol(dp_mlo_iter_ptnr_vdev);
1179 
dp_mlo_debug_print_ptnr_info(struct dp_vdev * vdev)1180 void dp_mlo_debug_print_ptnr_info(struct dp_vdev *vdev)
1181 {
1182 	struct dp_vdev_be *be_vdev = NULL;
1183 	struct dp_soc_be *be_soc = NULL;
1184 
1185 	be_soc = dp_get_be_soc_from_dp_soc(vdev->pdev->soc);
1186 	be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
1187 
1188 	DP_PRINT_STATS("self vdev is_bridge_vap = %s, mcast_primary = %s, vdev = %d, pdev_id = %d, chip_id = %d",
1189 		       vdev->is_bridge_vdev ? "true" : "false",
1190 		       be_vdev->mcast_primary ? "true" : "false",
1191 		       vdev->vdev_id,
1192 		       vdev->pdev->pdev_id,
1193 		       dp_mlo_get_chip_id(vdev->pdev->soc));
1194 
1195 	dp_mlo_iter_ptnr_vdev(be_soc, be_vdev,
1196 			      dp_print_mlo_partner_list,
1197 			      NULL, DP_MOD_ID_GENERIC_STATS,
1198 			      DP_ALL_VDEV_ITER,
1199 			      DP_VDEV_ITERATE_SKIP_SELF);
1200 }
1201 #endif
1202 
1203 #ifdef WLAN_MCAST_MLO
dp_mlo_get_mcast_primary_vdev(struct dp_soc_be * be_soc,struct dp_vdev_be * be_vdev,enum dp_mod_id mod_id)1204 struct dp_vdev *dp_mlo_get_mcast_primary_vdev(struct dp_soc_be *be_soc,
1205 					      struct dp_vdev_be *be_vdev,
1206 					      enum dp_mod_id mod_id)
1207 {
1208 	int i = 0;
1209 	int j = 0;
1210 	struct dp_mlo_ctxt *dp_mlo = be_soc->ml_ctxt;
1211 	struct dp_vdev *vdev = (struct dp_vdev *)be_vdev;
1212 
1213 	if (!be_vdev->mlo_dev_ctxt) {
1214 		return NULL;
1215 	}
1216 
1217 	if (be_vdev->mcast_primary) {
1218 		if (dp_vdev_get_ref((struct dp_soc *)be_soc, vdev, mod_id) !=
1219 					QDF_STATUS_SUCCESS)
1220 			return NULL;
1221 
1222 		return vdev;
1223 	}
1224 
1225 	for (i = 0; i < WLAN_MAX_MLO_CHIPS ; i++) {
1226 		struct dp_soc *ptnr_soc =
1227 				dp_mlo_get_soc_ref_by_chip_id(dp_mlo, i);
1228 
1229 		if (!ptnr_soc)
1230 			continue;
1231 		for (j = 0 ; j < WLAN_MAX_MLO_LINKS_PER_SOC ; j++) {
1232 			struct dp_vdev *ptnr_vdev = NULL;
1233 			struct dp_vdev_be *be_ptnr_vdev = NULL;
1234 
1235 			ptnr_vdev = dp_vdev_get_ref_by_id(
1236 					ptnr_soc,
1237 					be_vdev->mlo_dev_ctxt->vdev_list[i][j],
1238 					mod_id);
1239 			if (!ptnr_vdev)
1240 				continue;
1241 			be_ptnr_vdev = dp_get_be_vdev_from_dp_vdev(ptnr_vdev);
1242 			if (be_ptnr_vdev->mcast_primary)
1243 				return ptnr_vdev;
1244 			dp_vdev_unref_delete(be_ptnr_vdev->vdev.pdev->soc,
1245 					     &be_ptnr_vdev->vdev,
1246 					     mod_id);
1247 		}
1248 	}
1249 	return NULL;
1250 }
1251 
1252 qdf_export_symbol(dp_mlo_get_mcast_primary_vdev);
1253 #endif
1254 
1255 /**
1256  * dp_mlo_iter_ptnr_soc() - iterate through mlo soc list and call the callback
1257  * @be_soc: dp_soc_be pointer
1258  * @func: Function to be called for each soc
1259  * @arg: context to be passed to the callback
1260  *
1261  * Return: true if mlo is enabled, false if mlo is disabled
1262  */
dp_mlo_iter_ptnr_soc(struct dp_soc_be * be_soc,dp_ptnr_soc_iter_func func,void * arg)1263 bool dp_mlo_iter_ptnr_soc(struct dp_soc_be *be_soc, dp_ptnr_soc_iter_func func,
1264 			  void *arg)
1265 {
1266 	int i = 0;
1267 	struct dp_mlo_ctxt *dp_mlo = be_soc->ml_ctxt;
1268 
1269 	if (!be_soc->mlo_enabled || !be_soc->ml_ctxt)
1270 		return false;
1271 
1272 	for (i = 0; i < WLAN_MAX_MLO_CHIPS ; i++) {
1273 		struct dp_soc *ptnr_soc =
1274 				dp_mlo_get_soc_ref_by_chip_id(dp_mlo, i);
1275 
1276 		if (!ptnr_soc)
1277 			continue;
1278 		(*func)(ptnr_soc, arg, i);
1279 	}
1280 
1281 	return true;
1282 }
1283 
1284 qdf_export_symbol(dp_mlo_iter_ptnr_soc);
1285 
dp_mlo_get_mlo_ts_offset(struct dp_pdev_be * be_pdev)1286 static inline uint64_t dp_mlo_get_mlo_ts_offset(struct dp_pdev_be *be_pdev)
1287 {
1288 	struct dp_soc *soc;
1289 	struct dp_pdev *pdev;
1290 	struct dp_soc_be *be_soc;
1291 	uint32_t mlo_offset;
1292 
1293 	pdev = &be_pdev->pdev;
1294 	soc = pdev->soc;
1295 	be_soc = dp_get_be_soc_from_dp_soc(soc);
1296 
1297 	mlo_offset = be_soc->mlo_tstamp_offset;
1298 
1299 	return mlo_offset;
1300 }
1301 
dp_mlo_get_delta_tsf2_wrt_mlo_offset(struct dp_soc * soc,uint8_t hw_link_id)1302 int32_t dp_mlo_get_delta_tsf2_wrt_mlo_offset(struct dp_soc *soc,
1303 					     uint8_t hw_link_id)
1304 {
1305 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
1306 	struct dp_mlo_ctxt *ml_ctxt = be_soc->ml_ctxt;
1307 	struct dp_pdev_be *be_pdev;
1308 	int32_t delta_tsf2_mlo_offset;
1309 	int32_t mlo_offset, delta_tsf2;
1310 
1311 	if (!ml_ctxt)
1312 		return 0;
1313 
1314 	be_pdev = dp_mlo_get_be_pdev_from_link_id(ml_ctxt, hw_link_id);
1315 	if (!be_pdev)
1316 		return 0;
1317 
1318 	mlo_offset = dp_mlo_get_mlo_ts_offset(be_pdev);
1319 	delta_tsf2 = be_pdev->delta_tsf2;
1320 
1321 	delta_tsf2_mlo_offset = mlo_offset - delta_tsf2;
1322 
1323 	return delta_tsf2_mlo_offset;
1324 }
1325 
dp_mlo_get_delta_tqm_wrt_mlo_offset(struct dp_soc * soc)1326 int32_t dp_mlo_get_delta_tqm_wrt_mlo_offset(struct dp_soc *soc)
1327 {
1328 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
1329 	int32_t delta_tqm_mlo_offset;
1330 	int32_t mlo_offset, delta_tqm;
1331 
1332 	mlo_offset = be_soc->mlo_tstamp_offset;
1333 	delta_tqm = be_soc->delta_tqm;
1334 
1335 	delta_tqm_mlo_offset = mlo_offset - delta_tqm;
1336 
1337 	return delta_tqm_mlo_offset;
1338 }
1339 
1340 #ifdef DP_UMAC_HW_RESET_SUPPORT
1341 /**
1342  * dp_umac_reset_update_partner_map() - Update Umac reset partner map
1343  * @mlo_ctx: DP ML context handle
1344  * @chip_id: chip id
1345  * @set: flag indicating whether to set or clear the bit
1346  *
1347  * Return: void
1348  */
dp_umac_reset_update_partner_map(struct dp_mlo_ctxt * mlo_ctx,int chip_id,bool set)1349 static void dp_umac_reset_update_partner_map(struct dp_mlo_ctxt *mlo_ctx,
1350 					     int chip_id, bool set)
1351 {
1352 	struct dp_soc_mlo_umac_reset_ctx *grp_umac_reset_ctx =
1353 						&mlo_ctx->grp_umac_reset_ctx;
1354 
1355 	if (set)
1356 		qdf_atomic_set_bit(chip_id, &grp_umac_reset_ctx->partner_map);
1357 	else
1358 		qdf_atomic_clear_bit(chip_id, &grp_umac_reset_ctx->partner_map);
1359 }
1360 
dp_umac_reset_notify_asserted_soc(struct dp_soc * soc)1361 QDF_STATUS dp_umac_reset_notify_asserted_soc(struct dp_soc *soc)
1362 {
1363 	struct dp_mlo_ctxt *mlo_ctx;
1364 	struct dp_soc_be *be_soc;
1365 
1366 	be_soc = dp_get_be_soc_from_dp_soc(soc);
1367 	if (!be_soc) {
1368 		dp_umac_reset_err("null be_soc");
1369 		return QDF_STATUS_E_NULL_VALUE;
1370 	}
1371 
1372 	mlo_ctx = be_soc->ml_ctxt;
1373 	if (!mlo_ctx) {
1374 		/* This API can be called for non-MLO SOC as well. Hence, return
1375 		 * the status as success when mlo_ctx is NULL.
1376 		 */
1377 		return QDF_STATUS_SUCCESS;
1378 	}
1379 
1380 	dp_umac_reset_update_partner_map(mlo_ctx, be_soc->mlo_chip_id, false);
1381 
1382 	return QDF_STATUS_SUCCESS;
1383 }
1384 
1385 /**
1386  * dp_umac_reset_complete_umac_recovery() - Complete Umac reset session
1387  * @soc: dp soc handle
1388  *
1389  * Return: void
1390  */
dp_umac_reset_complete_umac_recovery(struct dp_soc * soc)1391 void dp_umac_reset_complete_umac_recovery(struct dp_soc *soc)
1392 {
1393 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
1394 	struct dp_mlo_ctxt *mlo_ctx = be_soc->ml_ctxt;
1395 	struct dp_soc_mlo_umac_reset_ctx *grp_umac_reset_ctx;
1396 
1397 	if (!mlo_ctx) {
1398 		dp_umac_reset_alert("Umac reset was handled on soc %pK", soc);
1399 		return;
1400 	}
1401 
1402 	grp_umac_reset_ctx = &mlo_ctx->grp_umac_reset_ctx;
1403 	qdf_spin_lock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
1404 
1405 	grp_umac_reset_ctx->umac_reset_in_progress = false;
1406 	grp_umac_reset_ctx->is_target_recovery = false;
1407 	grp_umac_reset_ctx->response_map = 0;
1408 	grp_umac_reset_ctx->request_map = 0;
1409 	grp_umac_reset_ctx->initiator_chip_id = 0;
1410 
1411 	qdf_spin_unlock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
1412 
1413 	dp_umac_reset_alert("Umac reset was handled on mlo group ctxt %pK",
1414 			    mlo_ctx);
1415 }
1416 
1417 /**
1418  * dp_umac_reset_initiate_umac_recovery() - Initiate Umac reset session
1419  * @soc: dp soc handle
1420  * @umac_reset_ctx: Umac reset context
1421  * @rx_event: Rx event received
1422  * @is_target_recovery: Flag to indicate if it is triggered for target recovery
1423  *
1424  * Return: status
1425  */
dp_umac_reset_initiate_umac_recovery(struct dp_soc * soc,struct dp_soc_umac_reset_ctx * umac_reset_ctx,enum umac_reset_rx_event rx_event,bool is_target_recovery)1426 QDF_STATUS dp_umac_reset_initiate_umac_recovery(struct dp_soc *soc,
1427 				struct dp_soc_umac_reset_ctx *umac_reset_ctx,
1428 				enum umac_reset_rx_event rx_event,
1429 				bool is_target_recovery)
1430 {
1431 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
1432 	struct dp_mlo_ctxt *mlo_ctx = be_soc->ml_ctxt;
1433 	struct dp_soc_mlo_umac_reset_ctx *grp_umac_reset_ctx;
1434 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1435 
1436 	if (!mlo_ctx)
1437 		return dp_umac_reset_validate_n_update_state_machine_on_rx(
1438 					umac_reset_ctx, rx_event,
1439 					UMAC_RESET_STATE_WAIT_FOR_TRIGGER,
1440 					UMAC_RESET_STATE_DO_TRIGGER_RECEIVED);
1441 
1442 	grp_umac_reset_ctx = &mlo_ctx->grp_umac_reset_ctx;
1443 	qdf_spin_lock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
1444 
1445 	if (grp_umac_reset_ctx->umac_reset_in_progress) {
1446 		qdf_spin_unlock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
1447 		return QDF_STATUS_E_INVAL;
1448 	}
1449 
1450 	status = dp_umac_reset_validate_n_update_state_machine_on_rx(
1451 					umac_reset_ctx, rx_event,
1452 					UMAC_RESET_STATE_WAIT_FOR_TRIGGER,
1453 					UMAC_RESET_STATE_DO_TRIGGER_RECEIVED);
1454 
1455 	if (status != QDF_STATUS_SUCCESS) {
1456 		qdf_spin_unlock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
1457 		return status;
1458 	}
1459 
1460 	grp_umac_reset_ctx->umac_reset_in_progress = true;
1461 	grp_umac_reset_ctx->is_target_recovery = is_target_recovery;
1462 
1463 	/* We don't wait for the 'Umac trigger' message from all socs */
1464 	grp_umac_reset_ctx->request_map = grp_umac_reset_ctx->partner_map;
1465 	grp_umac_reset_ctx->response_map = grp_umac_reset_ctx->partner_map;
1466 	grp_umac_reset_ctx->initiator_chip_id = dp_mlo_get_chip_id(soc);
1467 	grp_umac_reset_ctx->umac_reset_count++;
1468 
1469 	qdf_spin_unlock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
1470 
1471 	return QDF_STATUS_SUCCESS;
1472 }
1473 
1474 /**
1475  * dp_umac_reset_handle_action_cb() - Function to call action callback
1476  * @soc: dp soc handle
1477  * @umac_reset_ctx: Umac reset context
1478  * @action: Action to call the callback for
1479  *
1480  * Return: QDF_STATUS status
1481  */
1482 QDF_STATUS
dp_umac_reset_handle_action_cb(struct dp_soc * soc,struct dp_soc_umac_reset_ctx * umac_reset_ctx,enum umac_reset_action action)1483 dp_umac_reset_handle_action_cb(struct dp_soc *soc,
1484 			       struct dp_soc_umac_reset_ctx *umac_reset_ctx,
1485 			       enum umac_reset_action action)
1486 {
1487 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
1488 	struct dp_mlo_ctxt *mlo_ctx = be_soc->ml_ctxt;
1489 	struct dp_soc_mlo_umac_reset_ctx *grp_umac_reset_ctx;
1490 
1491 	if (!mlo_ctx) {
1492 		dp_umac_reset_debug("MLO context is Null");
1493 		goto handle;
1494 	}
1495 
1496 	grp_umac_reset_ctx = &mlo_ctx->grp_umac_reset_ctx;
1497 	qdf_spin_lock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
1498 
1499 	qdf_atomic_set_bit(dp_mlo_get_chip_id(soc),
1500 			   &grp_umac_reset_ctx->request_map);
1501 
1502 	dp_umac_reset_debug("partner_map %u request_map %u",
1503 			    grp_umac_reset_ctx->partner_map,
1504 			    grp_umac_reset_ctx->request_map);
1505 
1506 	/* This logic is needed for synchronization between mlo socs */
1507 	if ((grp_umac_reset_ctx->partner_map & grp_umac_reset_ctx->request_map)
1508 			!= grp_umac_reset_ctx->partner_map) {
1509 		struct hif_softc *hif_sc = HIF_GET_SOFTC(soc->hif_handle);
1510 		struct hif_umac_reset_ctx *hif_umac_reset_ctx;
1511 
1512 		if (!hif_sc) {
1513 			hif_err("scn is null");
1514 			qdf_assert_always(0);
1515 			return QDF_STATUS_E_FAILURE;
1516 		}
1517 
1518 		hif_umac_reset_ctx = &hif_sc->umac_reset_ctx;
1519 
1520 		/* Mark the action as pending */
1521 		umac_reset_ctx->pending_action = action;
1522 		/* Reschedule the tasklet and exit */
1523 		tasklet_hi_schedule(&hif_umac_reset_ctx->intr_tq);
1524 		qdf_spin_unlock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
1525 
1526 		return QDF_STATUS_SUCCESS;
1527 	}
1528 
1529 	qdf_spin_unlock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
1530 	umac_reset_ctx->pending_action = UMAC_RESET_ACTION_NONE;
1531 
1532 handle:
1533 	if (!umac_reset_ctx->rx_actions.cb[action]) {
1534 		dp_umac_reset_err("rx callback is NULL");
1535 		return QDF_STATUS_E_FAILURE;
1536 	}
1537 
1538 	return umac_reset_ctx->rx_actions.cb[action](soc);
1539 }
1540 
1541 /**
1542  * dp_umac_reset_post_tx_cmd() - Iterate partner socs and post Tx command
1543  * @umac_reset_ctx: UMAC reset context
1544  * @tx_cmd: Tx command to be posted
1545  *
1546  * Return: QDF status of operation
1547  */
1548 QDF_STATUS
dp_umac_reset_post_tx_cmd(struct dp_soc_umac_reset_ctx * umac_reset_ctx,enum umac_reset_tx_cmd tx_cmd)1549 dp_umac_reset_post_tx_cmd(struct dp_soc_umac_reset_ctx *umac_reset_ctx,
1550 			  enum umac_reset_tx_cmd tx_cmd)
1551 {
1552 	struct dp_soc *soc = container_of(umac_reset_ctx, struct dp_soc,
1553 					  umac_reset_ctx);
1554 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
1555 	struct dp_mlo_ctxt *mlo_ctx = be_soc->ml_ctxt;
1556 	struct dp_soc_mlo_umac_reset_ctx *grp_umac_reset_ctx;
1557 
1558 	if (!mlo_ctx) {
1559 		dp_umac_reset_post_tx_cmd_via_shmem(soc, &tx_cmd, 0);
1560 		return QDF_STATUS_SUCCESS;
1561 	}
1562 
1563 	grp_umac_reset_ctx = &mlo_ctx->grp_umac_reset_ctx;
1564 	qdf_spin_lock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
1565 
1566 	qdf_atomic_set_bit(dp_mlo_get_chip_id(soc),
1567 			   &grp_umac_reset_ctx->response_map);
1568 
1569 	/* This logic is needed for synchronization between mlo socs */
1570 	if ((grp_umac_reset_ctx->partner_map & grp_umac_reset_ctx->response_map)
1571 				!= grp_umac_reset_ctx->partner_map) {
1572 		dp_umac_reset_debug(
1573 			"Response(s) pending : expected map %u current map %u",
1574 			grp_umac_reset_ctx->partner_map,
1575 			grp_umac_reset_ctx->response_map);
1576 
1577 		qdf_spin_unlock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
1578 		return QDF_STATUS_SUCCESS;
1579 	}
1580 
1581 	dp_umac_reset_debug(
1582 		"All responses received: expected map %u current map %u",
1583 		grp_umac_reset_ctx->partner_map,
1584 		grp_umac_reset_ctx->response_map);
1585 
1586 	grp_umac_reset_ctx->response_map = 0;
1587 	grp_umac_reset_ctx->request_map = 0;
1588 	qdf_spin_unlock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
1589 
1590 	dp_mlo_iter_ptnr_soc(be_soc, &dp_umac_reset_post_tx_cmd_via_shmem,
1591 			     &tx_cmd);
1592 
1593 	if (tx_cmd == UMAC_RESET_TX_CMD_POST_RESET_COMPLETE_DONE)
1594 		dp_umac_reset_complete_umac_recovery(soc);
1595 
1596 	return QDF_STATUS_SUCCESS;
1597 }
1598 
1599 /**
1600  * dp_umac_reset_initiator_check() - Check if soc is the Umac reset initiator
1601  * @soc: dp soc handle
1602  *
1603  * Return: true if the soc is initiator or false otherwise
1604  */
dp_umac_reset_initiator_check(struct dp_soc * soc)1605 bool dp_umac_reset_initiator_check(struct dp_soc *soc)
1606 {
1607 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
1608 	struct dp_mlo_ctxt *mlo_ctx = be_soc->ml_ctxt;
1609 
1610 	if (!mlo_ctx)
1611 		return true;
1612 
1613 	return (mlo_ctx->grp_umac_reset_ctx.initiator_chip_id ==
1614 				dp_mlo_get_chip_id(soc));
1615 }
1616 
1617 /**
1618  * dp_umac_reset_target_recovery_check() - Check if this is for target recovery
1619  * @soc: dp soc handle
1620  *
1621  * Return: true if the session is for target recovery or false otherwise
1622  */
dp_umac_reset_target_recovery_check(struct dp_soc * soc)1623 bool dp_umac_reset_target_recovery_check(struct dp_soc *soc)
1624 {
1625 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
1626 	struct dp_mlo_ctxt *mlo_ctx = be_soc->ml_ctxt;
1627 
1628 	if (!mlo_ctx)
1629 		return false;
1630 
1631 	return mlo_ctx->grp_umac_reset_ctx.is_target_recovery;
1632 }
1633 
1634 /**
1635  * dp_umac_reset_is_soc_ignored() - Check if this soc is to be ignored
1636  * @soc: dp soc handle
1637  *
1638  * Return: true if the soc is ignored or false otherwise
1639  */
dp_umac_reset_is_soc_ignored(struct dp_soc * soc)1640 bool dp_umac_reset_is_soc_ignored(struct dp_soc *soc)
1641 {
1642 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
1643 	struct dp_mlo_ctxt *mlo_ctx = be_soc->ml_ctxt;
1644 
1645 	if (!mlo_ctx)
1646 		return false;
1647 
1648 	return !qdf_atomic_test_bit(dp_mlo_get_chip_id(soc),
1649 				    &mlo_ctx->grp_umac_reset_ctx.partner_map);
1650 }
1651 
dp_mlo_umac_reset_stats_print(struct dp_soc * soc)1652 QDF_STATUS dp_mlo_umac_reset_stats_print(struct dp_soc *soc)
1653 {
1654 	struct dp_mlo_ctxt *mlo_ctx;
1655 	struct dp_soc_be *be_soc;
1656 	struct dp_soc_mlo_umac_reset_ctx *grp_umac_reset_ctx;
1657 
1658 	be_soc = dp_get_be_soc_from_dp_soc(soc);
1659 	if (!be_soc) {
1660 		dp_umac_reset_err("null be_soc");
1661 		return QDF_STATUS_E_NULL_VALUE;
1662 	}
1663 
1664 	mlo_ctx = be_soc->ml_ctxt;
1665 	if (!mlo_ctx) {
1666 		/* This API can be called for non-MLO SOC as well. Hence, return
1667 		 * the status as success when mlo_ctx is NULL.
1668 		 */
1669 		return QDF_STATUS_SUCCESS;
1670 	}
1671 
1672 	grp_umac_reset_ctx = &mlo_ctx->grp_umac_reset_ctx;
1673 
1674 	DP_UMAC_RESET_PRINT_STATS("MLO UMAC RESET stats\n"
1675 		  "\t\tPartner map                   :%x\n"
1676 		  "\t\tRequest map                   :%x\n"
1677 		  "\t\tResponse map                  :%x\n"
1678 		  "\t\tIs target recovery            :%d\n"
1679 		  "\t\tIs Umac reset inprogress      :%d\n"
1680 		  "\t\tNumber of UMAC reset triggered:%d\n"
1681 		  "\t\tInitiator chip ID             :%d\n",
1682 		  grp_umac_reset_ctx->partner_map,
1683 		  grp_umac_reset_ctx->request_map,
1684 		  grp_umac_reset_ctx->response_map,
1685 		  grp_umac_reset_ctx->is_target_recovery,
1686 		  grp_umac_reset_ctx->umac_reset_in_progress,
1687 		  grp_umac_reset_ctx->umac_reset_count,
1688 		  grp_umac_reset_ctx->initiator_chip_id);
1689 
1690 	return QDF_STATUS_SUCCESS;
1691 }
1692 
1693 enum cdp_umac_reset_state
dp_get_umac_reset_in_progress_state(struct cdp_soc_t * psoc)1694 dp_get_umac_reset_in_progress_state(struct cdp_soc_t *psoc)
1695 {
1696 	struct dp_soc_umac_reset_ctx *umac_reset_ctx;
1697 	struct dp_soc *soc = (struct dp_soc *)psoc;
1698 	struct dp_soc_mlo_umac_reset_ctx *grp_umac_reset_ctx;
1699 	struct dp_soc_be *be_soc = NULL;
1700 	struct dp_mlo_ctxt *mlo_ctx = NULL;
1701 	enum cdp_umac_reset_state umac_reset_is_inprogress;
1702 
1703 	if (!soc) {
1704 		dp_umac_reset_err("DP SOC is null");
1705 		return CDP_UMAC_RESET_INVALID_STATE;
1706 	}
1707 
1708 	umac_reset_ctx = &soc->umac_reset_ctx;
1709 
1710 	be_soc = dp_get_be_soc_from_dp_soc(soc);
1711 	if (be_soc)
1712 		mlo_ctx = be_soc->ml_ctxt;
1713 
1714 	if (mlo_ctx) {
1715 		grp_umac_reset_ctx = &mlo_ctx->grp_umac_reset_ctx;
1716 		umac_reset_is_inprogress =
1717 			grp_umac_reset_ctx->umac_reset_in_progress;
1718 	} else {
1719 		umac_reset_is_inprogress = (umac_reset_ctx->current_state !=
1720 					    UMAC_RESET_STATE_WAIT_FOR_TRIGGER);
1721 	}
1722 
1723 	if (umac_reset_is_inprogress)
1724 		return CDP_UMAC_RESET_IN_PROGRESS;
1725 
1726 	/* Check if the umac reset was in progress during the buffer
1727 	 * window.
1728 	 */
1729 	umac_reset_is_inprogress =
1730 		((qdf_get_log_timestamp_usecs() -
1731 		  umac_reset_ctx->ts.post_reset_complete_done) <=
1732 		 (wlan_cfg_get_umac_reset_buffer_window_ms(soc->wlan_cfg_ctx) *
1733 		  1000));
1734 
1735 	return (umac_reset_is_inprogress ?
1736 			CDP_UMAC_RESET_IN_PROGRESS_DURING_BUFFER_WINDOW :
1737 			CDP_UMAC_RESET_NOT_IN_PROGRESS);
1738 }
1739 
1740 /**
1741  * dp_get_global_tx_desc_cleanup_flag() - Get cleanup needed flag
1742  * @soc: dp soc handle
1743  *
1744  * Return: cleanup needed/ not needed
1745  */
dp_get_global_tx_desc_cleanup_flag(struct dp_soc * soc)1746 bool dp_get_global_tx_desc_cleanup_flag(struct dp_soc *soc)
1747 {
1748 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
1749 	struct dp_mlo_ctxt *mlo_ctx = be_soc->ml_ctxt;
1750 	struct dp_soc_mlo_umac_reset_ctx *grp_umac_reset_ctx;
1751 	bool flag;
1752 
1753 	if (!mlo_ctx)
1754 		return true;
1755 
1756 	grp_umac_reset_ctx = &mlo_ctx->grp_umac_reset_ctx;
1757 	qdf_spin_lock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
1758 
1759 	flag = grp_umac_reset_ctx->tx_desc_pool_cleaned;
1760 	if (!flag)
1761 		grp_umac_reset_ctx->tx_desc_pool_cleaned = true;
1762 
1763 	qdf_spin_unlock_bh(&grp_umac_reset_ctx->grp_ctx_lock);
1764 
1765 	return !flag;
1766 }
1767 
1768 /**
1769  * dp_reset_global_tx_desc_cleanup_flag() - Reset cleanup needed flag
1770  * @soc: dp soc handle
1771  *
1772  * Return: None
1773  */
dp_reset_global_tx_desc_cleanup_flag(struct dp_soc * soc)1774 void dp_reset_global_tx_desc_cleanup_flag(struct dp_soc *soc)
1775 {
1776 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
1777 	struct dp_mlo_ctxt *mlo_ctx = be_soc->ml_ctxt;
1778 	struct dp_soc_mlo_umac_reset_ctx *grp_umac_reset_ctx;
1779 
1780 	if (!mlo_ctx)
1781 		return;
1782 
1783 	grp_umac_reset_ctx = &mlo_ctx->grp_umac_reset_ctx;
1784 	grp_umac_reset_ctx->tx_desc_pool_cleaned = false;
1785 }
1786 #endif
1787 
1788 struct dp_soc *
dp_get_soc_by_chip_id_be(struct dp_soc * soc,uint8_t chip_id)1789 dp_get_soc_by_chip_id_be(struct dp_soc *soc, uint8_t chip_id)
1790 {
1791 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
1792 	struct dp_mlo_ctxt *mlo_ctxt = be_soc->ml_ctxt;
1793 	struct dp_soc *partner_soc;
1794 
1795 	if (!be_soc->mlo_enabled || !mlo_ctxt)
1796 		return soc;
1797 
1798 	if (be_soc->mlo_chip_id == chip_id)
1799 		return soc;
1800 
1801 	partner_soc = dp_mlo_get_soc_ref_by_chip_id(mlo_ctxt, chip_id);
1802 	return partner_soc;
1803 }
1804