1 /*
2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for
6 * any purpose with or without fee is hereby granted, provided that the
7 * above copyright notice and this permission notice appear in all
8 * copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17 * PERFORMANCE OF THIS SOFTWARE.
18 */
19 #ifndef _DP_PEER_H_
20 #define _DP_PEER_H_
21
22 #include <qdf_types.h>
23 #include <qdf_lock.h>
24 #include "dp_types.h"
25 #include "dp_internal.h"
26
27 #ifdef DUMP_REO_QUEUE_INFO_IN_DDR
28 #include "hal_reo.h"
29 #endif
30
31 #define DP_INVALID_PEER_ID 0xffff
32
33 #define DP_PEER_MAX_MEC_IDX 1024 /* maximum index for MEC table */
34 #define DP_PEER_MAX_MEC_ENTRY 4096 /* maximum MEC entries in MEC table */
35
36 #define DP_FW_PEER_STATS_CMP_TIMEOUT_MSEC 5000
37
38 #define DP_PEER_HASH_LOAD_MULT 2
39 #define DP_PEER_HASH_LOAD_SHIFT 0
40
41 /* Threshold for peer's cached buf queue beyond which frames are dropped */
42 #define DP_RX_CACHED_BUFQ_THRESH 64
43
44 #define dp_peer_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_PEER, params)
45 #define dp_peer_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_PEER, params)
46 #define dp_peer_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_PEER, params)
47 #define dp_peer_info(params...) \
48 __QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_PEER, ## params)
49 #define dp_peer_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_PEER, params)
50
51 #if defined(WLAN_FEATURE_11BE_MLO) && defined(DP_MLO_LINK_STATS_SUPPORT)
52 /**
53 * enum dp_bands - WiFi Band
54 *
55 * @DP_BAND_INVALID: Invalid band
56 * @DP_BAND_2GHZ: 2GHz link
57 * @DP_BAND_5GHZ: 5GHz link
58 * @DP_BAND_6GHZ: 6GHz link
59 * @DP_BAND_UNKNOWN: Unknown band
60 */
61 enum dp_bands {
62 DP_BAND_INVALID = 0,
63 DP_BAND_2GHZ = 1,
64 DP_BAND_5GHZ = 2,
65 DP_BAND_6GHZ = 3,
66 DP_BAND_UNKNOWN = 4,
67 };
68
69 /**
70 * dp_freq_to_band() - Convert frequency to band
71 * @freq: peer frequency
72 *
73 * Return: band for input frequency
74 */
75 enum dp_bands dp_freq_to_band(qdf_freq_t freq);
76 #endif
77
78 void check_free_list_for_invalid_flush(struct dp_soc *soc);
79
80 static inline
add_entry_alloc_list(struct dp_soc * soc,struct dp_rx_tid * rx_tid,struct dp_peer * peer,void * hw_qdesc_vaddr)81 void add_entry_alloc_list(struct dp_soc *soc, struct dp_rx_tid *rx_tid,
82 struct dp_peer *peer, void *hw_qdesc_vaddr)
83 {
84 uint32_t max_list_size;
85 unsigned long curr_ts = qdf_get_system_timestamp();
86 uint32_t qref_index = soc->free_addr_list_idx;
87
88 max_list_size = soc->wlan_cfg_ctx->qref_control_size;
89
90 if (max_list_size == 0)
91 return;
92
93 soc->list_qdesc_addr_alloc[qref_index].hw_qdesc_paddr =
94 rx_tid->hw_qdesc_paddr;
95 soc->list_qdesc_addr_alloc[qref_index].ts_qdesc_mem_hdl = curr_ts;
96 soc->list_qdesc_addr_alloc[qref_index].hw_qdesc_vaddr_align =
97 hw_qdesc_vaddr;
98 soc->list_qdesc_addr_alloc[qref_index].hw_qdesc_vaddr_unalign =
99 rx_tid->hw_qdesc_vaddr_unaligned;
100 soc->list_qdesc_addr_alloc[qref_index].peer_id = peer->peer_id;
101 soc->list_qdesc_addr_alloc[qref_index].tid = rx_tid->tid;
102 soc->alloc_addr_list_idx++;
103
104 if (soc->alloc_addr_list_idx == max_list_size)
105 soc->alloc_addr_list_idx = 0;
106 }
107
108 static inline
add_entry_free_list(struct dp_soc * soc,struct dp_rx_tid * rx_tid)109 void add_entry_free_list(struct dp_soc *soc, struct dp_rx_tid *rx_tid)
110 {
111 uint32_t max_list_size;
112 unsigned long curr_ts = qdf_get_system_timestamp();
113 uint32_t qref_index = soc->free_addr_list_idx;
114
115 max_list_size = soc->wlan_cfg_ctx->qref_control_size;
116
117 if (max_list_size == 0)
118 return;
119
120 soc->list_qdesc_addr_free[qref_index].ts_qdesc_mem_hdl = curr_ts;
121 soc->list_qdesc_addr_free[qref_index].hw_qdesc_paddr =
122 rx_tid->hw_qdesc_paddr;
123 soc->list_qdesc_addr_free[qref_index].hw_qdesc_vaddr_align =
124 rx_tid->hw_qdesc_vaddr_aligned;
125 soc->list_qdesc_addr_free[qref_index].hw_qdesc_vaddr_unalign =
126 rx_tid->hw_qdesc_vaddr_unaligned;
127 soc->free_addr_list_idx++;
128
129 if (soc->free_addr_list_idx == max_list_size)
130 soc->free_addr_list_idx = 0;
131 }
132
133 static inline
add_entry_write_list(struct dp_soc * soc,struct dp_peer * peer,uint32_t tid)134 void add_entry_write_list(struct dp_soc *soc, struct dp_peer *peer,
135 uint32_t tid)
136 {
137 uint32_t max_list_size;
138 unsigned long curr_ts = qdf_get_system_timestamp();
139
140 max_list_size = soc->wlan_cfg_ctx->qref_control_size;
141
142 if (max_list_size == 0)
143 return;
144
145 soc->reo_write_list[soc->write_paddr_list_idx].ts_qaddr_del = curr_ts;
146 soc->reo_write_list[soc->write_paddr_list_idx].peer_id = peer->peer_id;
147 soc->reo_write_list[soc->write_paddr_list_idx].paddr =
148 peer->rx_tid[tid].hw_qdesc_paddr;
149 soc->reo_write_list[soc->write_paddr_list_idx].tid = tid;
150 soc->write_paddr_list_idx++;
151
152 if (soc->write_paddr_list_idx == max_list_size)
153 soc->write_paddr_list_idx = 0;
154 }
155
156 #ifdef REO_QDESC_HISTORY
157 enum reo_qdesc_event_type {
158 REO_QDESC_UPDATE_CB = 0,
159 REO_QDESC_FREE,
160 };
161
162 struct reo_qdesc_event {
163 qdf_dma_addr_t qdesc_addr;
164 uint64_t ts;
165 enum reo_qdesc_event_type type;
166 uint8_t peer_mac[QDF_MAC_ADDR_SIZE];
167 };
168 #endif
169
170 struct ast_del_ctxt {
171 bool age;
172 int del_count;
173 };
174
175 #ifdef QCA_SUPPORT_WDS_EXTENDED
176 /**
177 * dp_peer_is_wds_ext_peer() - peer is WDS_EXT peer
178 *
179 * @peer: DP peer context
180 *
181 * This API checks whether the peer is WDS_EXT peer or not
182 *
183 * Return: true in the wds_ext peer else flase
184 */
dp_peer_is_wds_ext_peer(struct dp_txrx_peer * peer)185 static inline bool dp_peer_is_wds_ext_peer(struct dp_txrx_peer *peer)
186 {
187 return qdf_atomic_test_bit(WDS_EXT_PEER_INIT_BIT, &peer->wds_ext.init);
188 }
189 #else
dp_peer_is_wds_ext_peer(struct dp_txrx_peer * peer)190 static inline bool dp_peer_is_wds_ext_peer(struct dp_txrx_peer *peer)
191 {
192 return false;
193 }
194 #endif
195
196 typedef void dp_peer_iter_func(struct dp_soc *soc, struct dp_peer *peer,
197 void *arg);
198 /**
199 * dp_peer_unref_delete() - unref and delete peer
200 * @peer: Datapath peer handle
201 * @id: ID of module releasing reference
202 *
203 */
204 void dp_peer_unref_delete(struct dp_peer *peer, enum dp_mod_id id);
205
206 /**
207 * dp_txrx_peer_unref_delete() - unref and delete peer
208 * @handle: Datapath txrx ref handle
209 * @id: Module ID of the caller
210 *
211 */
212 void dp_txrx_peer_unref_delete(dp_txrx_ref_handle handle, enum dp_mod_id id);
213
214 /**
215 * dp_peer_find_hash_find() - returns legacy or mlo link peer from
216 * peer_hash_table matching vdev_id and mac_address
217 * @soc: soc handle
218 * @peer_mac_addr: peer mac address
219 * @mac_addr_is_aligned: is mac addr aligned
220 * @vdev_id: vdev_id
221 * @mod_id: id of module requesting reference
222 *
223 * return: peer in success
224 * NULL in failure
225 */
226 struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc,
227 uint8_t *peer_mac_addr,
228 int mac_addr_is_aligned,
229 uint8_t vdev_id,
230 enum dp_mod_id mod_id);
231
232 /**
233 * dp_peer_find_by_id_valid - check if peer exists for given id
234 * @soc: core DP soc context
235 * @peer_id: peer id from peer object can be retrieved
236 *
237 * Return: true if peer exists of false otherwise
238 */
239 bool dp_peer_find_by_id_valid(struct dp_soc *soc, uint16_t peer_id);
240
241 /**
242 * dp_peer_get_ref() - Returns peer object given the peer id
243 *
244 * @soc: core DP soc context
245 * @peer: DP peer
246 * @mod_id: id of module requesting the reference
247 *
248 * Return: QDF_STATUS_SUCCESS if reference held successfully
249 * else QDF_STATUS_E_INVAL
250 */
251 static inline
dp_peer_get_ref(struct dp_soc * soc,struct dp_peer * peer,enum dp_mod_id mod_id)252 QDF_STATUS dp_peer_get_ref(struct dp_soc *soc,
253 struct dp_peer *peer,
254 enum dp_mod_id mod_id)
255 {
256 if (!qdf_atomic_inc_not_zero(&peer->ref_cnt))
257 return QDF_STATUS_E_INVAL;
258
259 if (mod_id > DP_MOD_ID_RX)
260 qdf_atomic_inc(&peer->mod_refs[mod_id]);
261
262 return QDF_STATUS_SUCCESS;
263 }
264
265 /**
266 * __dp_peer_get_ref_by_id() - Returns peer object given the peer id
267 *
268 * @soc: core DP soc context
269 * @peer_id: peer id from peer object can be retrieved
270 * @mod_id: module id
271 *
272 * Return: struct dp_peer*: Pointer to DP peer object
273 */
274 static inline struct dp_peer *
__dp_peer_get_ref_by_id(struct dp_soc * soc,uint16_t peer_id,enum dp_mod_id mod_id)275 __dp_peer_get_ref_by_id(struct dp_soc *soc,
276 uint16_t peer_id,
277 enum dp_mod_id mod_id)
278
279 {
280 struct dp_peer *peer;
281
282 qdf_spin_lock_bh(&soc->peer_map_lock);
283 peer = (peer_id >= soc->max_peer_id) ? NULL :
284 soc->peer_id_to_obj_map[peer_id];
285 if (!peer ||
286 (dp_peer_get_ref(soc, peer, mod_id) != QDF_STATUS_SUCCESS)) {
287 qdf_spin_unlock_bh(&soc->peer_map_lock);
288 return NULL;
289 }
290
291 qdf_spin_unlock_bh(&soc->peer_map_lock);
292 return peer;
293 }
294
295 /**
296 * dp_peer_get_ref_by_id() - Returns peer object given the peer id
297 * if peer state is active
298 *
299 * @soc: core DP soc context
300 * @peer_id: peer id from peer object can be retrieved
301 * @mod_id: ID of module requesting reference
302 *
303 * Return: struct dp_peer*: Pointer to DP peer object
304 */
305 static inline
dp_peer_get_ref_by_id(struct dp_soc * soc,uint16_t peer_id,enum dp_mod_id mod_id)306 struct dp_peer *dp_peer_get_ref_by_id(struct dp_soc *soc,
307 uint16_t peer_id,
308 enum dp_mod_id mod_id)
309 {
310 struct dp_peer *peer;
311
312 qdf_spin_lock_bh(&soc->peer_map_lock);
313 peer = (peer_id >= soc->max_peer_id) ? NULL :
314 soc->peer_id_to_obj_map[peer_id];
315
316 if (!peer || peer->peer_state >= DP_PEER_STATE_LOGICAL_DELETE ||
317 (dp_peer_get_ref(soc, peer, mod_id) != QDF_STATUS_SUCCESS)) {
318 qdf_spin_unlock_bh(&soc->peer_map_lock);
319 return NULL;
320 }
321
322 qdf_spin_unlock_bh(&soc->peer_map_lock);
323
324 return peer;
325 }
326
327 /**
328 * dp_txrx_peer_get_ref_by_id() - Returns txrx peer object given the peer id
329 *
330 * @soc: core DP soc context
331 * @peer_id: peer id from peer object can be retrieved
332 * @handle: reference handle
333 * @mod_id: ID of module requesting reference
334 *
335 * Return: struct dp_txrx_peer*: Pointer to txrx DP peer object
336 */
337 static inline struct dp_txrx_peer *
dp_txrx_peer_get_ref_by_id(struct dp_soc * soc,uint16_t peer_id,dp_txrx_ref_handle * handle,enum dp_mod_id mod_id)338 dp_txrx_peer_get_ref_by_id(struct dp_soc *soc,
339 uint16_t peer_id,
340 dp_txrx_ref_handle *handle,
341 enum dp_mod_id mod_id)
342
343 {
344 struct dp_peer *peer;
345
346 peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
347 if (!peer)
348 return NULL;
349
350 if (!peer->txrx_peer) {
351 dp_peer_unref_delete(peer, mod_id);
352 return NULL;
353 }
354
355 *handle = (dp_txrx_ref_handle)peer;
356 return peer->txrx_peer;
357 }
358
359 #ifdef PEER_CACHE_RX_PKTS
360 /**
361 * dp_rx_flush_rx_cached() - flush cached rx frames
362 * @peer: peer
363 * @drop: set flag to drop frames
364 *
365 * Return: None
366 */
367 void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop);
368 #else
dp_rx_flush_rx_cached(struct dp_peer * peer,bool drop)369 static inline void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop)
370 {
371 }
372 #endif
373
374 static inline void
dp_clear_peer_internal(struct dp_soc * soc,struct dp_peer * peer)375 dp_clear_peer_internal(struct dp_soc *soc, struct dp_peer *peer)
376 {
377 qdf_spin_lock_bh(&peer->peer_info_lock);
378 peer->state = OL_TXRX_PEER_STATE_DISC;
379 qdf_spin_unlock_bh(&peer->peer_info_lock);
380
381 dp_rx_flush_rx_cached(peer, true);
382 }
383
384 /**
385 * dp_vdev_iterate_peer() - API to iterate through vdev peer list
386 *
387 * @vdev: DP vdev context
388 * @func: function to be called for each peer
389 * @arg: argument need to be passed to func
390 * @mod_id: module_id
391 *
392 * Return: void
393 */
394 static inline void
dp_vdev_iterate_peer(struct dp_vdev * vdev,dp_peer_iter_func * func,void * arg,enum dp_mod_id mod_id)395 dp_vdev_iterate_peer(struct dp_vdev *vdev, dp_peer_iter_func *func, void *arg,
396 enum dp_mod_id mod_id)
397 {
398 struct dp_peer *peer;
399 struct dp_peer *tmp_peer;
400 struct dp_soc *soc = NULL;
401
402 if (!vdev || !vdev->pdev || !vdev->pdev->soc)
403 return;
404
405 soc = vdev->pdev->soc;
406
407 qdf_spin_lock_bh(&vdev->peer_list_lock);
408 TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
409 peer_list_elem,
410 tmp_peer) {
411 if (dp_peer_get_ref(soc, peer, mod_id) ==
412 QDF_STATUS_SUCCESS) {
413 (*func)(soc, peer, arg);
414 dp_peer_unref_delete(peer, mod_id);
415 }
416 }
417 qdf_spin_unlock_bh(&vdev->peer_list_lock);
418 }
419
420 /**
421 * dp_pdev_iterate_peer() - API to iterate through all peers of pdev
422 *
423 * @pdev: DP pdev context
424 * @func: function to be called for each peer
425 * @arg: argument need to be passed to func
426 * @mod_id: module_id
427 *
428 * Return: void
429 */
430 static inline void
dp_pdev_iterate_peer(struct dp_pdev * pdev,dp_peer_iter_func * func,void * arg,enum dp_mod_id mod_id)431 dp_pdev_iterate_peer(struct dp_pdev *pdev, dp_peer_iter_func *func, void *arg,
432 enum dp_mod_id mod_id)
433 {
434 struct dp_vdev *vdev;
435
436 if (!pdev)
437 return;
438
439 qdf_spin_lock_bh(&pdev->vdev_list_lock);
440 DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev)
441 dp_vdev_iterate_peer(vdev, func, arg, mod_id);
442 qdf_spin_unlock_bh(&pdev->vdev_list_lock);
443 }
444
445 /**
446 * dp_soc_iterate_peer() - API to iterate through all peers of soc
447 *
448 * @soc: DP soc context
449 * @func: function to be called for each peer
450 * @arg: argument need to be passed to func
451 * @mod_id: module_id
452 *
453 * Return: void
454 */
455 static inline void
dp_soc_iterate_peer(struct dp_soc * soc,dp_peer_iter_func * func,void * arg,enum dp_mod_id mod_id)456 dp_soc_iterate_peer(struct dp_soc *soc, dp_peer_iter_func *func, void *arg,
457 enum dp_mod_id mod_id)
458 {
459 struct dp_pdev *pdev;
460 int i;
461
462 if (!soc)
463 return;
464
465 for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
466 pdev = soc->pdev_list[i];
467 dp_pdev_iterate_peer(pdev, func, arg, mod_id);
468 }
469 }
470
471 /**
472 * dp_vdev_iterate_peer_lock_safe() - API to iterate through vdev list
473 *
474 * This API will cache the peers in local allocated memory and calls
475 * iterate function outside the lock.
476 *
477 * As this API is allocating new memory it is suggested to use this
478 * only when lock cannot be held
479 *
480 * @vdev: DP vdev context
481 * @func: function to be called for each peer
482 * @arg: argument need to be passed to func
483 * @mod_id: module_id
484 *
485 * Return: void
486 */
487 static inline void
dp_vdev_iterate_peer_lock_safe(struct dp_vdev * vdev,dp_peer_iter_func * func,void * arg,enum dp_mod_id mod_id)488 dp_vdev_iterate_peer_lock_safe(struct dp_vdev *vdev,
489 dp_peer_iter_func *func,
490 void *arg,
491 enum dp_mod_id mod_id)
492 {
493 struct dp_peer *peer;
494 struct dp_peer *tmp_peer;
495 struct dp_soc *soc = NULL;
496 struct dp_peer **peer_array = NULL;
497 int i = 0;
498 uint32_t num_peers = 0;
499
500 if (!vdev || !vdev->pdev || !vdev->pdev->soc)
501 return;
502
503 num_peers = vdev->num_peers;
504
505 soc = vdev->pdev->soc;
506
507 peer_array = qdf_mem_malloc(num_peers * sizeof(struct dp_peer *));
508 if (!peer_array)
509 return;
510
511 qdf_spin_lock_bh(&vdev->peer_list_lock);
512 TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
513 peer_list_elem,
514 tmp_peer) {
515 if (i >= num_peers)
516 break;
517
518 if (dp_peer_get_ref(soc, peer, mod_id) == QDF_STATUS_SUCCESS) {
519 peer_array[i] = peer;
520 i = (i + 1);
521 }
522 }
523 qdf_spin_unlock_bh(&vdev->peer_list_lock);
524
525 for (i = 0; i < num_peers; i++) {
526 peer = peer_array[i];
527
528 if (!peer)
529 continue;
530
531 (*func)(soc, peer, arg);
532 dp_peer_unref_delete(peer, mod_id);
533 }
534
535 qdf_mem_free(peer_array);
536 }
537
538 /**
539 * dp_pdev_iterate_peer_lock_safe() - API to iterate through all peers of pdev
540 *
541 * This API will cache the peers in local allocated memory and calls
542 * iterate function outside the lock.
543 *
544 * As this API is allocating new memory it is suggested to use this
545 * only when lock cannot be held
546 *
547 * @pdev: DP pdev context
548 * @func: function to be called for each peer
549 * @arg: argument need to be passed to func
550 * @mod_id: module_id
551 *
552 * Return: void
553 */
554 static inline void
dp_pdev_iterate_peer_lock_safe(struct dp_pdev * pdev,dp_peer_iter_func * func,void * arg,enum dp_mod_id mod_id)555 dp_pdev_iterate_peer_lock_safe(struct dp_pdev *pdev,
556 dp_peer_iter_func *func,
557 void *arg,
558 enum dp_mod_id mod_id)
559 {
560 struct dp_peer *peer;
561 struct dp_peer *tmp_peer;
562 struct dp_soc *soc = NULL;
563 struct dp_vdev *vdev = NULL;
564 struct dp_peer **peer_array[DP_PDEV_MAX_VDEVS] = {0};
565 int i = 0;
566 int j = 0;
567 uint32_t num_peers[DP_PDEV_MAX_VDEVS] = {0};
568
569 if (!pdev || !pdev->soc)
570 return;
571
572 soc = pdev->soc;
573
574 qdf_spin_lock_bh(&pdev->vdev_list_lock);
575 DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
576 num_peers[i] = vdev->num_peers;
577 peer_array[i] = qdf_mem_malloc(num_peers[i] *
578 sizeof(struct dp_peer *));
579 if (!peer_array[i])
580 break;
581
582 qdf_spin_lock_bh(&vdev->peer_list_lock);
583 TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
584 peer_list_elem,
585 tmp_peer) {
586 if (j >= num_peers[i])
587 break;
588
589 if (dp_peer_get_ref(soc, peer, mod_id) ==
590 QDF_STATUS_SUCCESS) {
591 peer_array[i][j] = peer;
592
593 j = (j + 1);
594 }
595 }
596 qdf_spin_unlock_bh(&vdev->peer_list_lock);
597 i = (i + 1);
598 }
599 qdf_spin_unlock_bh(&pdev->vdev_list_lock);
600
601 for (i = 0; i < DP_PDEV_MAX_VDEVS; i++) {
602 if (!peer_array[i])
603 break;
604
605 for (j = 0; j < num_peers[i]; j++) {
606 peer = peer_array[i][j];
607
608 if (!peer)
609 continue;
610
611 (*func)(soc, peer, arg);
612 dp_peer_unref_delete(peer, mod_id);
613 }
614
615 qdf_mem_free(peer_array[i]);
616 }
617 }
618
619 /**
620 * dp_soc_iterate_peer_lock_safe() - API to iterate through all peers of soc
621 *
622 * This API will cache the peers in local allocated memory and calls
623 * iterate function outside the lock.
624 *
625 * As this API is allocating new memory it is suggested to use this
626 * only when lock cannot be held
627 *
628 * @soc: DP soc context
629 * @func: function to be called for each peer
630 * @arg: argument need to be passed to func
631 * @mod_id: module_id
632 *
633 * Return: void
634 */
635 static inline void
dp_soc_iterate_peer_lock_safe(struct dp_soc * soc,dp_peer_iter_func * func,void * arg,enum dp_mod_id mod_id)636 dp_soc_iterate_peer_lock_safe(struct dp_soc *soc,
637 dp_peer_iter_func *func,
638 void *arg,
639 enum dp_mod_id mod_id)
640 {
641 struct dp_pdev *pdev;
642 int i;
643
644 if (!soc)
645 return;
646
647 for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
648 pdev = soc->pdev_list[i];
649 dp_pdev_iterate_peer_lock_safe(pdev, func, arg, mod_id);
650 }
651 }
652
653 #ifdef DP_PEER_STATE_DEBUG
654 #define DP_PEER_STATE_ASSERT(_peer, _new_state, _condition) \
655 do { \
656 if (!(_condition)) { \
657 dp_alert("Invalid state shift from %u to %u peer " \
658 QDF_MAC_ADDR_FMT, \
659 (_peer)->peer_state, (_new_state), \
660 QDF_MAC_ADDR_REF((_peer)->mac_addr.raw)); \
661 QDF_ASSERT(0); \
662 } \
663 } while (0)
664
665 #else
666 #define DP_PEER_STATE_ASSERT(_peer, _new_state, _condition) \
667 do { \
668 if (!(_condition)) { \
669 dp_alert("Invalid state shift from %u to %u peer " \
670 QDF_MAC_ADDR_FMT, \
671 (_peer)->peer_state, (_new_state), \
672 QDF_MAC_ADDR_REF((_peer)->mac_addr.raw)); \
673 } \
674 } while (0)
675 #endif
676
677 /**
678 * dp_peer_state_cmp() - compare dp peer state
679 *
680 * @peer: DP peer
681 * @state: state
682 *
683 * Return: true if state matches with peer state
684 * false if it does not match
685 */
686 static inline bool
dp_peer_state_cmp(struct dp_peer * peer,enum dp_peer_state state)687 dp_peer_state_cmp(struct dp_peer *peer,
688 enum dp_peer_state state)
689 {
690 bool is_status_equal = false;
691
692 qdf_spin_lock_bh(&peer->peer_state_lock);
693 is_status_equal = (peer->peer_state == state);
694 qdf_spin_unlock_bh(&peer->peer_state_lock);
695
696 return is_status_equal;
697 }
698
699 /**
700 * dp_print_ast_stats() - Dump AST table contents
701 * @soc: Datapath soc handle
702 *
703 * Return: void
704 */
705 void dp_print_ast_stats(struct dp_soc *soc);
706
707 /**
708 * dp_rx_peer_map_handler() - handle peer map event from firmware
709 * @soc: generic soc handle
710 * @peer_id: peer_id from firmware
711 * @hw_peer_id: ast index for this peer
712 * @vdev_id: vdev ID
713 * @peer_mac_addr: mac address of the peer
714 * @ast_hash: ast hash value
715 * @is_wds: flag to indicate peer map event for WDS ast entry
716 *
717 * associate the peer_id that firmware provided with peer entry
718 * and update the ast table in the host with the hw_peer_id.
719 *
720 * Return: QDF_STATUS code
721 */
722
723 QDF_STATUS dp_rx_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
724 uint16_t hw_peer_id, uint8_t vdev_id,
725 uint8_t *peer_mac_addr, uint16_t ast_hash,
726 uint8_t is_wds);
727
728 /**
729 * dp_rx_peer_unmap_handler() - handle peer unmap event from firmware
730 * @soc: generic soc handle
731 * @peer_id: peer_id from firmware
732 * @vdev_id: vdev ID
733 * @peer_mac_addr: mac address of the peer or wds entry
734 * @is_wds: flag to indicate peer map event for WDS ast entry
735 * @free_wds_count: number of wds entries freed by FW with peer delete
736 *
737 * Return: none
738 */
739 void dp_rx_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id,
740 uint8_t vdev_id, uint8_t *peer_mac_addr,
741 uint8_t is_wds, uint32_t free_wds_count);
742
743 #if defined(WLAN_FEATURE_11BE_MLO) && defined(DP_MLO_LINK_STATS_SUPPORT)
744 /**
745 * dp_rx_peer_ext_evt() - handle peer extended event from firmware
746 * @soc: DP soc handle
747 * @info: extended evt info
748 *
749 *
750 * Return: QDF_STATUS
751 */
752
753 QDF_STATUS
754 dp_rx_peer_ext_evt(struct dp_soc *soc, struct dp_peer_ext_evt_info *info);
755 #endif
756 #ifdef DP_RX_UDP_OVER_PEER_ROAM
757 /**
758 * dp_rx_reset_roaming_peer() - Reset the roamed peer in vdev
759 * @soc: dp soc pointer
760 * @vdev_id: vdev id
761 * @peer_mac_addr: mac address of the peer
762 *
763 * This function resets the roamed peer auth status and mac address
764 * after peer map indication of same peer is received from firmware.
765 *
766 * Return: None
767 */
768 void dp_rx_reset_roaming_peer(struct dp_soc *soc, uint8_t vdev_id,
769 uint8_t *peer_mac_addr);
770 #else
dp_rx_reset_roaming_peer(struct dp_soc * soc,uint8_t vdev_id,uint8_t * peer_mac_addr)771 static inline void dp_rx_reset_roaming_peer(struct dp_soc *soc, uint8_t vdev_id,
772 uint8_t *peer_mac_addr)
773 {
774 }
775 #endif
776
777 #ifdef WLAN_FEATURE_11BE_MLO
778 /**
779 * dp_rx_mlo_peer_map_handler() - handle MLO peer map event from firmware
780 * @soc: generic soc handle
781 * @peer_id: ML peer_id from firmware
782 * @peer_mac_addr: mac address of the peer
783 * @mlo_flow_info: MLO AST flow info
784 * @mlo_link_info: MLO link info
785 *
786 * associate the ML peer_id that firmware provided with peer entry
787 * and update the ast table in the host with the hw_peer_id.
788 *
789 * Return: QDF_STATUS code
790 */
791 QDF_STATUS
792 dp_rx_mlo_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
793 uint8_t *peer_mac_addr,
794 struct dp_mlo_flow_override_info *mlo_flow_info,
795 struct dp_mlo_link_info *mlo_link_info);
796
797 /**
798 * dp_rx_mlo_peer_unmap_handler() - handle MLO peer unmap event from firmware
799 * @soc: generic soc handle
800 * @peer_id: peer_id from firmware
801 *
802 * Return: none
803 */
804 void dp_rx_mlo_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id);
805 #endif
806
807 void dp_rx_sec_ind_handler(struct dp_soc *soc, uint16_t peer_id,
808 enum cdp_sec_type sec_type, int is_unicast,
809 u_int32_t *michael_key, u_int32_t *rx_pn);
810
811 uint8_t dp_get_peer_mac_addr_frm_id(struct cdp_soc_t *soc_handle,
812 uint16_t peer_id, uint8_t *peer_mac);
813
814 /**
815 * dp_peer_add_ast() - Allocate and add AST entry into peer list
816 * @soc: SoC handle
817 * @peer: peer to which ast node belongs
818 * @mac_addr: MAC address of ast node
819 * @type: AST entry type
820 * @flags: AST configuration flags
821 *
822 * This API is used by WDS source port learning function to
823 * add a new AST entry into peer AST list
824 *
825 * Return: QDF_STATUS code
826 */
827 QDF_STATUS dp_peer_add_ast(struct dp_soc *soc, struct dp_peer *peer,
828 uint8_t *mac_addr, enum cdp_txrx_ast_entry_type type,
829 uint32_t flags);
830
831 /**
832 * dp_peer_del_ast() - Delete and free AST entry
833 * @soc: SoC handle
834 * @ast_entry: AST entry of the node
835 *
836 * This function removes the AST entry from peer and soc tables
837 * It assumes caller has taken the ast lock to protect the access to these
838 * tables
839 *
840 * Return: None
841 */
842 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry);
843
844 void dp_peer_ast_unmap_handler(struct dp_soc *soc,
845 struct dp_ast_entry *ast_entry);
846
847 /**
848 * dp_peer_update_ast() - Delete and free AST entry
849 * @soc: SoC handle
850 * @peer: peer to which ast node belongs
851 * @ast_entry: AST entry of the node
852 * @flags: wds or hmwds
853 *
854 * This function update the AST entry to the roamed peer and soc tables
855 * It assumes caller has taken the ast lock to protect the access to these
856 * tables
857 *
858 * Return: 0 if ast entry is updated successfully
859 * -1 failure
860 */
861 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
862 struct dp_ast_entry *ast_entry, uint32_t flags);
863
864 /**
865 * dp_peer_ast_hash_find_by_pdevid() - Find AST entry by MAC address
866 * @soc: SoC handle
867 * @ast_mac_addr: Mac address
868 * @pdev_id: pdev Id
869 *
870 * It assumes caller has taken the ast lock to protect the access to
871 * AST hash table
872 *
873 * Return: AST entry
874 */
875 struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
876 uint8_t *ast_mac_addr,
877 uint8_t pdev_id);
878
879 /**
880 * dp_peer_ast_hash_find_by_vdevid() - Find AST entry by MAC address
881 * @soc: SoC handle
882 * @ast_mac_addr: Mac address
883 * @vdev_id: vdev Id
884 *
885 * It assumes caller has taken the ast lock to protect the access to
886 * AST hash table
887 *
888 * Return: AST entry
889 */
890 struct dp_ast_entry *dp_peer_ast_hash_find_by_vdevid(struct dp_soc *soc,
891 uint8_t *ast_mac_addr,
892 uint8_t vdev_id);
893
894 /**
895 * dp_peer_ast_hash_find_soc() - Find AST entry by MAC address
896 * @soc: SoC handle
897 * @ast_mac_addr: Mac address
898 *
899 * It assumes caller has taken the ast lock to protect the access to
900 * AST hash table
901 *
902 * Return: AST entry
903 */
904 struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
905 uint8_t *ast_mac_addr);
906
907 /**
908 * dp_peer_ast_hash_find_soc_by_type() - Find AST entry by MAC address
909 * and AST type
910 * @soc: SoC handle
911 * @ast_mac_addr: Mac address
912 * @type: AST entry type
913 *
914 * It assumes caller has taken the ast lock to protect the access to
915 * AST hash table
916 *
917 * Return: AST entry
918 */
919 struct dp_ast_entry *dp_peer_ast_hash_find_soc_by_type(
920 struct dp_soc *soc,
921 uint8_t *ast_mac_addr,
922 enum cdp_txrx_ast_entry_type type);
923
924 /**
925 * dp_peer_ast_get_pdev_id() - get pdev_id from the ast entry
926 * @soc: SoC handle
927 * @ast_entry: AST entry of the node
928 *
929 * This function gets the pdev_id from the ast entry.
930 *
931 * Return: (uint8_t) pdev_id
932 */
933 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
934 struct dp_ast_entry *ast_entry);
935
936
937 /**
938 * dp_peer_ast_get_next_hop() - get next_hop from the ast entry
939 * @soc: SoC handle
940 * @ast_entry: AST entry of the node
941 *
942 * This function gets the next hop from the ast entry.
943 *
944 * Return: (uint8_t) next_hop
945 */
946 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
947 struct dp_ast_entry *ast_entry);
948
949 /**
950 * dp_peer_ast_set_type() - set type from the ast entry
951 * @soc: SoC handle
952 * @ast_entry: AST entry of the node
953 * @type: AST entry type
954 *
955 * This function sets the type in the ast entry.
956 *
957 * Return:
958 */
959 void dp_peer_ast_set_type(struct dp_soc *soc,
960 struct dp_ast_entry *ast_entry,
961 enum cdp_txrx_ast_entry_type type);
962
963 void dp_peer_ast_send_wds_del(struct dp_soc *soc,
964 struct dp_ast_entry *ast_entry,
965 struct dp_peer *peer);
966
967 #ifdef WLAN_FEATURE_MULTI_AST_DEL
968 void dp_peer_ast_send_multi_wds_del(
969 struct dp_soc *soc, uint8_t vdev_id,
970 struct peer_del_multi_wds_entries *wds_list);
971 #endif
972
973 void dp_peer_free_hmwds_cb(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
974 struct cdp_soc *dp_soc,
975 void *cookie,
976 enum cdp_ast_free_status status);
977
978 /**
979 * dp_peer_ast_hash_remove() - Look up and remove AST entry from hash table
980 * @soc: SoC handle
981 * @ase: Address search entry
982 *
983 * This function removes the AST entry from soc AST hash table
984 * It assumes caller has taken the ast lock to protect the access to this table
985 *
986 * Return: None
987 */
988 void dp_peer_ast_hash_remove(struct dp_soc *soc,
989 struct dp_ast_entry *ase);
990
991 /**
992 * dp_peer_free_ast_entry() - Free up the ast entry memory
993 * @soc: SoC handle
994 * @ast_entry: Address search entry
995 *
996 * This API is used to free up the memory associated with
997 * AST entry.
998 *
999 * Return: None
1000 */
1001 void dp_peer_free_ast_entry(struct dp_soc *soc,
1002 struct dp_ast_entry *ast_entry);
1003
1004 /**
1005 * dp_peer_unlink_ast_entry() - Free up the ast entry memory
1006 * @soc: SoC handle
1007 * @ast_entry: Address search entry
1008 * @peer: peer
1009 *
1010 * This API is used to remove/unlink AST entry from the peer list
1011 * and hash list.
1012 *
1013 * Return: None
1014 */
1015 void dp_peer_unlink_ast_entry(struct dp_soc *soc,
1016 struct dp_ast_entry *ast_entry,
1017 struct dp_peer *peer);
1018
1019 /**
1020 * dp_peer_mec_detach_entry() - Detach the MEC entry
1021 * @soc: SoC handle
1022 * @mecentry: MEC entry of the node
1023 * @ptr: pointer to free list
1024 *
1025 * The MEC entry is detached from MEC table and added to free_list
1026 * to free the object outside lock
1027 *
1028 * Return: None
1029 */
1030 void dp_peer_mec_detach_entry(struct dp_soc *soc, struct dp_mec_entry *mecentry,
1031 void *ptr);
1032
1033 /**
1034 * dp_peer_mec_free_list() - free the MEC entry from free_list
1035 * @soc: SoC handle
1036 * @ptr: pointer to free list
1037 *
1038 * Return: None
1039 */
1040 void dp_peer_mec_free_list(struct dp_soc *soc, void *ptr);
1041
1042 /**
1043 * dp_peer_mec_add_entry()
1044 * @soc: SoC handle
1045 * @vdev: vdev to which mec node belongs
1046 * @mac_addr: MAC address of mec node
1047 *
1048 * This function allocates and adds MEC entry to MEC table.
1049 * It assumes caller has taken the mec lock to protect the access to these
1050 * tables
1051 *
1052 * Return: QDF_STATUS
1053 */
1054 QDF_STATUS dp_peer_mec_add_entry(struct dp_soc *soc,
1055 struct dp_vdev *vdev,
1056 uint8_t *mac_addr);
1057
1058 /**
1059 * dp_peer_mec_hash_find_by_pdevid() - Find MEC entry by PDEV Id
1060 * within pdev
1061 * @soc: SoC handle
1062 * @pdev_id: pdev Id
1063 * @mec_mac_addr: MAC address of mec node
1064 *
1065 * It assumes caller has taken the mec_lock to protect the access to
1066 * MEC hash table
1067 *
1068 * Return: MEC entry
1069 */
1070 struct dp_mec_entry *dp_peer_mec_hash_find_by_pdevid(struct dp_soc *soc,
1071 uint8_t pdev_id,
1072 uint8_t *mec_mac_addr);
1073
1074 #define DP_AST_ASSERT(_condition) \
1075 do { \
1076 if (!(_condition)) { \
1077 dp_print_ast_stats(soc);\
1078 QDF_BUG(_condition); \
1079 } \
1080 } while (0)
1081
1082 /**
1083 * dp_peer_update_inactive_time() - Update inactive time for peer
1084 * @pdev: pdev object
1085 * @tag_type: htt_tlv_tag type
1086 * @tag_buf: buf message
1087 */
1088 void
1089 dp_peer_update_inactive_time(struct dp_pdev *pdev, uint32_t tag_type,
1090 uint32_t *tag_buf);
1091
1092 #ifndef QCA_MULTIPASS_SUPPORT
1093 static inline
1094 /**
1095 * dp_peer_set_vlan_id() - set vlan_id for this peer
1096 * @cdp_soc: soc handle
1097 * @vdev_id: id of vdev object
1098 * @peer_mac: mac address
1099 * @vlan_id: vlan id for peer
1100 *
1101 * Return: void
1102 */
dp_peer_set_vlan_id(struct cdp_soc_t * cdp_soc,uint8_t vdev_id,uint8_t * peer_mac,uint16_t vlan_id)1103 void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
1104 uint8_t vdev_id, uint8_t *peer_mac,
1105 uint16_t vlan_id)
1106 {
1107 }
1108
1109 /**
1110 * dp_set_vlan_groupkey() - set vlan map for vdev
1111 * @soc_hdl: pointer to soc
1112 * @vdev_id: id of vdev handle
1113 * @vlan_id: vlan_id
1114 * @group_key: group key for vlan
1115 *
1116 * Return: set success/failure
1117 */
1118 static inline
dp_set_vlan_groupkey(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,uint16_t vlan_id,uint16_t group_key)1119 QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
1120 uint16_t vlan_id, uint16_t group_key)
1121 {
1122 return QDF_STATUS_SUCCESS;
1123 }
1124
1125 /**
1126 * dp_peer_multipass_list_init() - initialize multipass peer list
1127 * @vdev: pointer to vdev
1128 *
1129 * Return: void
1130 */
1131 static inline
dp_peer_multipass_list_init(struct dp_vdev * vdev)1132 void dp_peer_multipass_list_init(struct dp_vdev *vdev)
1133 {
1134 }
1135
1136 /**
1137 * dp_peer_multipass_list_remove() - remove peer from special peer list
1138 * @peer: peer handle
1139 *
1140 * Return: void
1141 */
1142 static inline
dp_peer_multipass_list_remove(struct dp_peer * peer)1143 void dp_peer_multipass_list_remove(struct dp_peer *peer)
1144 {
1145 }
1146 #else
1147 void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
1148 uint8_t vdev_id, uint8_t *peer_mac,
1149 uint16_t vlan_id);
1150 QDF_STATUS dp_set_vlan_groupkey(struct cdp_soc_t *soc, uint8_t vdev_id,
1151 uint16_t vlan_id, uint16_t group_key);
1152 void dp_peer_multipass_list_init(struct dp_vdev *vdev);
1153 void dp_peer_multipass_list_remove(struct dp_peer *peer);
1154 #endif
1155
1156
1157 #ifndef QCA_PEER_MULTIQ_SUPPORT
1158 /**
1159 * dp_peer_reset_flowq_map() - reset peer flowq map table
1160 * @peer: dp peer handle
1161 *
1162 * Return: none
1163 */
1164 static inline
dp_peer_reset_flowq_map(struct dp_peer * peer)1165 void dp_peer_reset_flowq_map(struct dp_peer *peer)
1166 {
1167 }
1168
1169 /**
1170 * dp_peer_ast_index_flow_queue_map_create() - create ast index flow queue map
1171 * @soc_hdl: generic soc handle
1172 * @is_wds: flag to indicate if peer is wds
1173 * @peer_id: peer_id from htt peer map message
1174 * @peer_mac_addr: mac address of the peer
1175 * @ast_info: ast flow override information from peer map
1176 *
1177 * Return: none
1178 */
1179 static inline
dp_peer_ast_index_flow_queue_map_create(void * soc_hdl,bool is_wds,uint16_t peer_id,uint8_t * peer_mac_addr,struct dp_ast_flow_override_info * ast_info)1180 void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
1181 bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr,
1182 struct dp_ast_flow_override_info *ast_info)
1183 {
1184 }
1185 #else
1186 void dp_peer_reset_flowq_map(struct dp_peer *peer);
1187
1188 void dp_peer_ast_index_flow_queue_map_create(void *soc_hdl,
1189 bool is_wds, uint16_t peer_id, uint8_t *peer_mac_addr,
1190 struct dp_ast_flow_override_info *ast_info);
1191 #endif
1192
1193 #ifdef QCA_PEER_EXT_STATS
1194 /**
1195 * dp_peer_delay_stats_ctx_alloc() - Allocate peer delay stats content
1196 * @soc: DP SoC context
1197 * @txrx_peer: DP txrx peer context
1198 *
1199 * Allocate the peer delay stats context
1200 *
1201 * Return: QDF_STATUS_SUCCESS if allocation is
1202 * successful
1203 */
1204 QDF_STATUS dp_peer_delay_stats_ctx_alloc(struct dp_soc *soc,
1205 struct dp_txrx_peer *txrx_peer);
1206
1207 /**
1208 * dp_peer_delay_stats_ctx_dealloc() - Dealloc the peer delay stats context
1209 * @soc: DP SoC context
1210 * @txrx_peer: txrx DP peer context
1211 *
1212 * Free the peer delay stats context
1213 *
1214 * Return: Void
1215 */
1216 void dp_peer_delay_stats_ctx_dealloc(struct dp_soc *soc,
1217 struct dp_txrx_peer *txrx_peer);
1218
1219 /**
1220 * dp_peer_delay_stats_ctx_clr() - Clear delay stats context of peer
1221 * @txrx_peer: dp_txrx_peer handle
1222 *
1223 * Return: void
1224 */
1225 void dp_peer_delay_stats_ctx_clr(struct dp_txrx_peer *txrx_peer);
1226 #else
1227 static inline
dp_peer_delay_stats_ctx_alloc(struct dp_soc * soc,struct dp_txrx_peer * txrx_peer)1228 QDF_STATUS dp_peer_delay_stats_ctx_alloc(struct dp_soc *soc,
1229 struct dp_txrx_peer *txrx_peer)
1230 {
1231 return QDF_STATUS_SUCCESS;
1232 }
1233
1234 static inline
dp_peer_delay_stats_ctx_dealloc(struct dp_soc * soc,struct dp_txrx_peer * txrx_peer)1235 void dp_peer_delay_stats_ctx_dealloc(struct dp_soc *soc,
1236 struct dp_txrx_peer *txrx_peer)
1237 {
1238 }
1239
1240 static inline
dp_peer_delay_stats_ctx_clr(struct dp_txrx_peer * txrx_peer)1241 void dp_peer_delay_stats_ctx_clr(struct dp_txrx_peer *txrx_peer)
1242 {
1243 }
1244 #endif
1245
1246 #ifdef WLAN_PEER_JITTER
1247 /**
1248 * dp_peer_jitter_stats_ctx_alloc() - Allocate jitter stats context for peer
1249 * @pdev: Datapath pdev handle
1250 * @txrx_peer: dp_txrx_peer handle
1251 *
1252 * Return: QDF_STATUS
1253 */
1254 QDF_STATUS dp_peer_jitter_stats_ctx_alloc(struct dp_pdev *pdev,
1255 struct dp_txrx_peer *txrx_peer);
1256
1257 /**
1258 * dp_peer_jitter_stats_ctx_dealloc() - Deallocate jitter stats context
1259 * @pdev: Datapath pdev handle
1260 * @txrx_peer: dp_txrx_peer handle
1261 *
1262 * Return: void
1263 */
1264 void dp_peer_jitter_stats_ctx_dealloc(struct dp_pdev *pdev,
1265 struct dp_txrx_peer *txrx_peer);
1266
1267 /**
1268 * dp_peer_jitter_stats_ctx_clr() - Clear jitter stats context of peer
1269 * @txrx_peer: dp_txrx_peer handle
1270 *
1271 * Return: void
1272 */
1273 void dp_peer_jitter_stats_ctx_clr(struct dp_txrx_peer *txrx_peer);
1274 #else
1275 static inline
dp_peer_jitter_stats_ctx_alloc(struct dp_pdev * pdev,struct dp_txrx_peer * txrx_peer)1276 QDF_STATUS dp_peer_jitter_stats_ctx_alloc(struct dp_pdev *pdev,
1277 struct dp_txrx_peer *txrx_peer)
1278 {
1279 return QDF_STATUS_SUCCESS;
1280 }
1281
1282 static inline
dp_peer_jitter_stats_ctx_dealloc(struct dp_pdev * pdev,struct dp_txrx_peer * txrx_peer)1283 void dp_peer_jitter_stats_ctx_dealloc(struct dp_pdev *pdev,
1284 struct dp_txrx_peer *txrx_peer)
1285 {
1286 }
1287
1288 static inline
dp_peer_jitter_stats_ctx_clr(struct dp_txrx_peer * txrx_peer)1289 void dp_peer_jitter_stats_ctx_clr(struct dp_txrx_peer *txrx_peer)
1290 {
1291 }
1292 #endif
1293
1294 #ifndef CONFIG_SAWF_DEF_QUEUES
dp_peer_sawf_ctx_alloc(struct dp_soc * soc,struct dp_peer * peer)1295 static inline QDF_STATUS dp_peer_sawf_ctx_alloc(struct dp_soc *soc,
1296 struct dp_peer *peer)
1297 {
1298 return QDF_STATUS_SUCCESS;
1299 }
1300
dp_peer_sawf_ctx_free(struct dp_soc * soc,struct dp_peer * peer)1301 static inline QDF_STATUS dp_peer_sawf_ctx_free(struct dp_soc *soc,
1302 struct dp_peer *peer)
1303 {
1304 return QDF_STATUS_SUCCESS;
1305 }
1306
1307 #endif
1308
1309 #ifndef CONFIG_SAWF
1310 static inline
dp_peer_sawf_stats_ctx_alloc(struct dp_soc * soc,struct dp_txrx_peer * txrx_peer)1311 QDF_STATUS dp_peer_sawf_stats_ctx_alloc(struct dp_soc *soc,
1312 struct dp_txrx_peer *txrx_peer)
1313 {
1314 return QDF_STATUS_SUCCESS;
1315 }
1316
1317 static inline
dp_peer_sawf_stats_ctx_free(struct dp_soc * soc,struct dp_txrx_peer * txrx_peer)1318 QDF_STATUS dp_peer_sawf_stats_ctx_free(struct dp_soc *soc,
1319 struct dp_txrx_peer *txrx_peer)
1320 {
1321 return QDF_STATUS_SUCCESS;
1322 }
1323 #endif
1324
1325 /**
1326 * dp_vdev_bss_peer_ref_n_get: Get bss peer of a vdev
1327 * @soc: DP soc
1328 * @vdev: vdev
1329 * @mod_id: id of module requesting reference
1330 *
1331 * Return: VDEV BSS peer
1332 */
1333 struct dp_peer *dp_vdev_bss_peer_ref_n_get(struct dp_soc *soc,
1334 struct dp_vdev *vdev,
1335 enum dp_mod_id mod_id);
1336
1337 /**
1338 * dp_sta_vdev_self_peer_ref_n_get: Get self peer of sta vdev
1339 * @soc: DP soc
1340 * @vdev: vdev
1341 * @mod_id: id of module requesting reference
1342 *
1343 * Return: VDEV self peer
1344 */
1345 struct dp_peer *dp_sta_vdev_self_peer_ref_n_get(struct dp_soc *soc,
1346 struct dp_vdev *vdev,
1347 enum dp_mod_id mod_id);
1348
1349 void dp_peer_ast_table_detach(struct dp_soc *soc);
1350
1351 /**
1352 * dp_peer_find_map_detach() - cleanup memory for peer_id_to_obj_map
1353 * @soc: soc handle
1354 *
1355 * Return: none
1356 */
1357 void dp_peer_find_map_detach(struct dp_soc *soc);
1358
1359 void dp_soc_wds_detach(struct dp_soc *soc);
1360 QDF_STATUS dp_peer_ast_table_attach(struct dp_soc *soc);
1361
1362 /**
1363 * dp_find_peer_by_macaddr() - Finding the peer from mac address provided.
1364 * @soc: soc handle
1365 * @mac_addr: MAC address to be used to find peer
1366 * @vdev_id: VDEV id
1367 * @mod_id: MODULE ID
1368 *
1369 * Return: struct dp_peer
1370 */
1371 struct dp_peer *dp_find_peer_by_macaddr(struct dp_soc *soc, uint8_t *mac_addr,
1372 uint8_t vdev_id, enum dp_mod_id mod_id);
1373 /**
1374 * dp_peer_ast_hash_attach() - Allocate and initialize AST Hash Table
1375 * @soc: SoC handle
1376 *
1377 * Return: QDF_STATUS
1378 */
1379 QDF_STATUS dp_peer_ast_hash_attach(struct dp_soc *soc);
1380
1381 /**
1382 * dp_peer_mec_hash_attach() - Allocate and initialize MEC Hash Table
1383 * @soc: SoC handle
1384 *
1385 * Return: QDF_STATUS
1386 */
1387 QDF_STATUS dp_peer_mec_hash_attach(struct dp_soc *soc);
1388
1389 /**
1390 * dp_del_wds_entry_wrapper() - delete a WDS AST entry
1391 * @soc: DP soc structure pointer
1392 * @vdev_id: vdev_id
1393 * @wds_macaddr: MAC address of ast node
1394 * @type: type from enum cdp_txrx_ast_entry_type
1395 * @delete_in_fw: Flag to indicate if entry needs to be deleted in fw
1396 *
1397 * This API is used to delete an AST entry from fw
1398 *
1399 * Return: None
1400 */
1401 void dp_del_wds_entry_wrapper(struct dp_soc *soc, uint8_t vdev_id,
1402 uint8_t *wds_macaddr, uint8_t type,
1403 uint8_t delete_in_fw);
1404
1405 void dp_soc_wds_attach(struct dp_soc *soc);
1406
1407 /**
1408 * dp_peer_mec_hash_detach() - Free MEC Hash table
1409 * @soc: SoC handle
1410 *
1411 * Return: None
1412 */
1413 void dp_peer_mec_hash_detach(struct dp_soc *soc);
1414
1415 /**
1416 * dp_peer_ast_hash_detach() - Free AST Hash table
1417 * @soc: SoC handle
1418 *
1419 * Return: None
1420 */
1421 void dp_peer_ast_hash_detach(struct dp_soc *soc);
1422
1423 #ifdef FEATURE_AST
1424 /**
1425 * dp_peer_delete_ast_entries(): Delete all AST entries for a peer
1426 * @soc: datapath soc handle
1427 * @peer: datapath peer handle
1428 *
1429 * Delete the AST entries belonging to a peer
1430 */
dp_peer_delete_ast_entries(struct dp_soc * soc,struct dp_peer * peer)1431 static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
1432 struct dp_peer *peer)
1433 {
1434 struct dp_ast_entry *ast_entry, *temp_ast_entry;
1435
1436 dp_peer_debug("peer: %pK, self_ast: %pK", peer, peer->self_ast_entry);
1437 /*
1438 * Delete peer self ast entry. This is done to handle scenarios
1439 * where peer is freed before peer map is received(for ex in case
1440 * of auth disallow due to ACL) in such cases self ast is not added
1441 * to peer->ast_list.
1442 */
1443 if (peer->self_ast_entry) {
1444 dp_peer_del_ast(soc, peer->self_ast_entry);
1445 peer->self_ast_entry = NULL;
1446 }
1447
1448 DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry)
1449 dp_peer_del_ast(soc, ast_entry);
1450 }
1451
1452 /**
1453 * dp_print_peer_ast_entries() - Dump AST entries of peer
1454 * @soc: Datapath soc handle
1455 * @peer: Datapath peer
1456 * @arg: argument to iterate function
1457 *
1458 * Return: void
1459 */
1460 void dp_print_peer_ast_entries(struct dp_soc *soc, struct dp_peer *peer,
1461 void *arg);
1462 #else
dp_print_peer_ast_entries(struct dp_soc * soc,struct dp_peer * peer,void * arg)1463 static inline void dp_print_peer_ast_entries(struct dp_soc *soc,
1464 struct dp_peer *peer, void *arg)
1465 {
1466 }
1467
dp_peer_delete_ast_entries(struct dp_soc * soc,struct dp_peer * peer)1468 static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
1469 struct dp_peer *peer)
1470 {
1471 }
1472 #endif
1473
1474 #ifdef FEATURE_MEC
1475 /**
1476 * dp_peer_mec_spinlock_create() - Create the MEC spinlock
1477 * @soc: SoC handle
1478 *
1479 * Return: none
1480 */
1481 void dp_peer_mec_spinlock_create(struct dp_soc *soc);
1482
1483 /**
1484 * dp_peer_mec_spinlock_destroy() - Destroy the MEC spinlock
1485 * @soc: SoC handle
1486 *
1487 * Return: none
1488 */
1489 void dp_peer_mec_spinlock_destroy(struct dp_soc *soc);
1490
1491 /**
1492 * dp_peer_mec_flush_entries() - Delete all mec entries in table
1493 * @soc: Datapath SOC
1494 *
1495 * Return: None
1496 */
1497 void dp_peer_mec_flush_entries(struct dp_soc *soc);
1498 #else
dp_peer_mec_spinlock_create(struct dp_soc * soc)1499 static inline void dp_peer_mec_spinlock_create(struct dp_soc *soc)
1500 {
1501 }
1502
dp_peer_mec_spinlock_destroy(struct dp_soc * soc)1503 static inline void dp_peer_mec_spinlock_destroy(struct dp_soc *soc)
1504 {
1505 }
1506
dp_peer_mec_flush_entries(struct dp_soc * soc)1507 static inline void dp_peer_mec_flush_entries(struct dp_soc *soc)
1508 {
1509 }
1510 #endif
1511
dp_peer_find_mac_addr_cmp(union dp_align_mac_addr * mac_addr1,union dp_align_mac_addr * mac_addr2)1512 static inline int dp_peer_find_mac_addr_cmp(
1513 union dp_align_mac_addr *mac_addr1,
1514 union dp_align_mac_addr *mac_addr2)
1515 {
1516 /*
1517 * Intentionally use & rather than &&.
1518 * because the operands are binary rather than generic boolean,
1519 * the functionality is equivalent.
1520 * Using && has the advantage of short-circuited evaluation,
1521 * but using & has the advantage of no conditional branching,
1522 * which is a more significant benefit.
1523 */
1524 return !((mac_addr1->align4.bytes_abcd == mac_addr2->align4.bytes_abcd)
1525 & (mac_addr1->align4.bytes_ef == mac_addr2->align4.bytes_ef));
1526 }
1527
1528 /**
1529 * dp_peer_delete() - delete DP peer
1530 *
1531 * @soc: Datatpath soc
1532 * @peer: Datapath peer
1533 * @arg: argument to iter function
1534 *
1535 * Return: void
1536 */
1537 void dp_peer_delete(struct dp_soc *soc,
1538 struct dp_peer *peer,
1539 void *arg);
1540
1541 /**
1542 * dp_mlo_peer_delete() - delete MLO DP peer
1543 *
1544 * @soc: Datapath soc
1545 * @peer: Datapath peer
1546 * @arg: argument to iter function
1547 *
1548 * Return: void
1549 */
1550 void dp_mlo_peer_delete(struct dp_soc *soc, struct dp_peer *peer, void *arg);
1551
1552 #ifdef WLAN_FEATURE_11BE_MLO
1553
1554 /* is MLO connection mld peer */
1555 #define IS_MLO_DP_MLD_TXRX_PEER(_peer) ((_peer)->mld_peer)
1556
1557 /* set peer type */
1558 #define DP_PEER_SET_TYPE(_peer, _type_val) \
1559 ((_peer)->peer_type = (_type_val))
1560
1561 /* is legacy peer */
1562 #define IS_DP_LEGACY_PEER(_peer) \
1563 ((_peer)->peer_type == CDP_LINK_PEER_TYPE && !((_peer)->mld_peer))
1564 /* is MLO connection link peer */
1565 #define IS_MLO_DP_LINK_PEER(_peer) \
1566 ((_peer)->peer_type == CDP_LINK_PEER_TYPE && (_peer)->mld_peer)
1567 /* is MLO connection mld peer */
1568 #define IS_MLO_DP_MLD_PEER(_peer) \
1569 ((_peer)->peer_type == CDP_MLD_PEER_TYPE)
1570 /* Get Mld peer from link peer */
1571 #define DP_GET_MLD_PEER_FROM_PEER(link_peer) \
1572 ((link_peer)->mld_peer)
1573
1574 #ifdef WLAN_MLO_MULTI_CHIP
dp_get_chip_id(struct dp_soc * soc)1575 static inline uint8_t dp_get_chip_id(struct dp_soc *soc)
1576 {
1577 if (soc->arch_ops.mlo_get_chip_id)
1578 return soc->arch_ops.mlo_get_chip_id(soc);
1579
1580 return 0;
1581 }
1582
1583 static inline struct dp_peer *
dp_link_peer_hash_find_by_chip_id(struct dp_soc * soc,uint8_t * peer_mac_addr,int mac_addr_is_aligned,uint8_t vdev_id,uint8_t chip_id,enum dp_mod_id mod_id)1584 dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
1585 uint8_t *peer_mac_addr,
1586 int mac_addr_is_aligned,
1587 uint8_t vdev_id,
1588 uint8_t chip_id,
1589 enum dp_mod_id mod_id)
1590 {
1591 if (soc->arch_ops.mlo_link_peer_find_hash_find_by_chip_id)
1592 return soc->arch_ops.mlo_link_peer_find_hash_find_by_chip_id
1593 (soc, peer_mac_addr,
1594 mac_addr_is_aligned,
1595 vdev_id, chip_id,
1596 mod_id);
1597
1598 return NULL;
1599 }
1600 #else
dp_get_chip_id(struct dp_soc * soc)1601 static inline uint8_t dp_get_chip_id(struct dp_soc *soc)
1602 {
1603 return 0;
1604 }
1605
1606 static inline struct dp_peer *
dp_link_peer_hash_find_by_chip_id(struct dp_soc * soc,uint8_t * peer_mac_addr,int mac_addr_is_aligned,uint8_t vdev_id,uint8_t chip_id,enum dp_mod_id mod_id)1607 dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
1608 uint8_t *peer_mac_addr,
1609 int mac_addr_is_aligned,
1610 uint8_t vdev_id,
1611 uint8_t chip_id,
1612 enum dp_mod_id mod_id)
1613 {
1614 return dp_peer_find_hash_find(soc, peer_mac_addr,
1615 mac_addr_is_aligned,
1616 vdev_id, mod_id);
1617 }
1618 #endif
1619
1620 /**
1621 * dp_mld_peer_find_hash_find() - returns mld peer from mld peer_hash_table
1622 * matching mac_address
1623 * @soc: soc handle
1624 * @peer_mac_addr: mld peer mac address
1625 * @mac_addr_is_aligned: is mac addr aligned
1626 * @vdev_id: vdev_id
1627 * @mod_id: id of module requesting reference
1628 *
1629 * Return: peer in success
1630 * NULL in failure
1631 */
1632 static inline
dp_mld_peer_find_hash_find(struct dp_soc * soc,uint8_t * peer_mac_addr,int mac_addr_is_aligned,uint8_t vdev_id,enum dp_mod_id mod_id)1633 struct dp_peer *dp_mld_peer_find_hash_find(struct dp_soc *soc,
1634 uint8_t *peer_mac_addr,
1635 int mac_addr_is_aligned,
1636 uint8_t vdev_id,
1637 enum dp_mod_id mod_id)
1638 {
1639 if (soc->arch_ops.mlo_peer_find_hash_find)
1640 return soc->arch_ops.mlo_peer_find_hash_find(soc,
1641 peer_mac_addr,
1642 mac_addr_is_aligned,
1643 mod_id, vdev_id);
1644 return NULL;
1645 }
1646
1647 /**
1648 * dp_peer_hash_find_wrapper() - find link peer or mld per according to
1649 * peer_type
1650 * @soc: DP SOC handle
1651 * @peer_info: peer information for hash find
1652 * @mod_id: ID of module requesting reference
1653 *
1654 * Return: peer handle
1655 */
1656 static inline
dp_peer_hash_find_wrapper(struct dp_soc * soc,struct cdp_peer_info * peer_info,enum dp_mod_id mod_id)1657 struct dp_peer *dp_peer_hash_find_wrapper(struct dp_soc *soc,
1658 struct cdp_peer_info *peer_info,
1659 enum dp_mod_id mod_id)
1660 {
1661 struct dp_peer *peer = NULL;
1662
1663 if (peer_info->peer_type == CDP_LINK_PEER_TYPE ||
1664 peer_info->peer_type == CDP_WILD_PEER_TYPE) {
1665 peer = dp_peer_find_hash_find(soc, peer_info->mac_addr,
1666 peer_info->mac_addr_is_aligned,
1667 peer_info->vdev_id,
1668 mod_id);
1669 if (peer)
1670 return peer;
1671 }
1672 if (peer_info->peer_type == CDP_MLD_PEER_TYPE ||
1673 peer_info->peer_type == CDP_WILD_PEER_TYPE)
1674 peer = dp_mld_peer_find_hash_find(
1675 soc, peer_info->mac_addr,
1676 peer_info->mac_addr_is_aligned,
1677 peer_info->vdev_id,
1678 mod_id);
1679 return peer;
1680 }
1681
1682 /**
1683 * dp_link_peer_add_mld_peer() - add mld peer pointer to link peer,
1684 * increase mld peer ref_cnt
1685 * @link_peer: link peer pointer
1686 * @mld_peer: mld peer pointer
1687 *
1688 * Return: none
1689 */
1690 static inline
dp_link_peer_add_mld_peer(struct dp_peer * link_peer,struct dp_peer * mld_peer)1691 void dp_link_peer_add_mld_peer(struct dp_peer *link_peer,
1692 struct dp_peer *mld_peer)
1693 {
1694 /* increase mld_peer ref_cnt */
1695 dp_peer_get_ref(NULL, mld_peer, DP_MOD_ID_CDP);
1696 link_peer->mld_peer = mld_peer;
1697 }
1698
1699 /**
1700 * dp_link_peer_del_mld_peer() - delete mld peer pointer from link peer,
1701 * decrease mld peer ref_cnt
1702 * @link_peer: link peer pointer
1703 *
1704 * Return: None
1705 */
1706 static inline
dp_link_peer_del_mld_peer(struct dp_peer * link_peer)1707 void dp_link_peer_del_mld_peer(struct dp_peer *link_peer)
1708 {
1709 dp_peer_unref_delete(link_peer->mld_peer, DP_MOD_ID_CDP);
1710 link_peer->mld_peer = NULL;
1711 }
1712
1713 /**
1714 * dp_mld_peer_init_link_peers_info() - init link peers info in mld peer
1715 * @mld_peer: mld peer pointer
1716 *
1717 * Return: None
1718 */
1719 static inline
dp_mld_peer_init_link_peers_info(struct dp_peer * mld_peer)1720 void dp_mld_peer_init_link_peers_info(struct dp_peer *mld_peer)
1721 {
1722 int i;
1723
1724 qdf_spinlock_create(&mld_peer->link_peers_info_lock);
1725 mld_peer->num_links = 0;
1726 for (i = 0; i < DP_MAX_MLO_LINKS; i++)
1727 mld_peer->link_peers[i].is_valid = false;
1728 }
1729
1730 /**
1731 * dp_mld_peer_deinit_link_peers_info() - Deinit link peers info in mld peer
1732 * @mld_peer: mld peer pointer
1733 *
1734 * Return: None
1735 */
1736 static inline
dp_mld_peer_deinit_link_peers_info(struct dp_peer * mld_peer)1737 void dp_mld_peer_deinit_link_peers_info(struct dp_peer *mld_peer)
1738 {
1739 qdf_spinlock_destroy(&mld_peer->link_peers_info_lock);
1740 }
1741
1742 /**
1743 * dp_mld_peer_add_link_peer() - add link peer info to mld peer
1744 * @mld_peer: mld dp peer pointer
1745 * @link_peer: link dp peer pointer
1746 * @is_bridge_peer: flag to indicate if peer is bridge peer
1747 *
1748 * Return: None
1749 */
1750 static inline
dp_mld_peer_add_link_peer(struct dp_peer * mld_peer,struct dp_peer * link_peer,uint8_t is_bridge_peer)1751 void dp_mld_peer_add_link_peer(struct dp_peer *mld_peer,
1752 struct dp_peer *link_peer,
1753 uint8_t is_bridge_peer)
1754 {
1755 int i;
1756 struct dp_peer_link_info *link_peer_info;
1757 struct dp_soc *soc = mld_peer->vdev->pdev->soc;
1758
1759 qdf_spin_lock_bh(&mld_peer->link_peers_info_lock);
1760 for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
1761 link_peer_info = &mld_peer->link_peers[i];
1762 if (!link_peer_info->is_valid) {
1763 qdf_mem_copy(link_peer_info->mac_addr.raw,
1764 link_peer->mac_addr.raw,
1765 QDF_MAC_ADDR_SIZE);
1766 link_peer_info->is_valid = true;
1767 link_peer_info->vdev_id = link_peer->vdev->vdev_id;
1768 link_peer_info->chip_id =
1769 dp_get_chip_id(link_peer->vdev->pdev->soc);
1770 link_peer_info->is_bridge_peer = is_bridge_peer;
1771 mld_peer->num_links++;
1772 break;
1773 }
1774 }
1775 qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock);
1776
1777 dp_peer_info("%s addition of link peer %pK (" QDF_MAC_ADDR_FMT ") "
1778 "to MLD peer %pK (" QDF_MAC_ADDR_FMT "), "
1779 "idx %u num_links %u",
1780 (i != DP_MAX_MLO_LINKS) ? "Successful" : "Failed",
1781 link_peer, QDF_MAC_ADDR_REF(link_peer->mac_addr.raw),
1782 mld_peer, QDF_MAC_ADDR_REF(mld_peer->mac_addr.raw),
1783 i, mld_peer->num_links);
1784
1785 dp_cfg_event_record_mlo_link_delink_evt(soc, DP_CFG_EVENT_MLO_ADD_LINK,
1786 mld_peer, link_peer, i,
1787 (i != DP_MAX_MLO_LINKS) ? 1 : 0);
1788 }
1789
1790 /**
1791 * dp_mld_peer_del_link_peer() - Delete link peer info from MLD peer
1792 * @mld_peer: MLD dp peer pointer
1793 * @link_peer: link dp peer pointer
1794 *
1795 * Return: number of links left after deletion
1796 */
1797 static inline
dp_mld_peer_del_link_peer(struct dp_peer * mld_peer,struct dp_peer * link_peer)1798 uint8_t dp_mld_peer_del_link_peer(struct dp_peer *mld_peer,
1799 struct dp_peer *link_peer)
1800 {
1801 int i;
1802 struct dp_peer_link_info *link_peer_info;
1803 uint8_t num_links;
1804 struct dp_soc *soc = mld_peer->vdev->pdev->soc;
1805
1806 qdf_spin_lock_bh(&mld_peer->link_peers_info_lock);
1807 for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
1808 link_peer_info = &mld_peer->link_peers[i];
1809 if (link_peer_info->is_valid &&
1810 !dp_peer_find_mac_addr_cmp(&link_peer->mac_addr,
1811 &link_peer_info->mac_addr)) {
1812 link_peer_info->is_valid = false;
1813 mld_peer->num_links--;
1814 break;
1815 }
1816 }
1817 num_links = mld_peer->num_links;
1818 qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock);
1819
1820 dp_peer_info("%s deletion of link peer %pK (" QDF_MAC_ADDR_FMT ") "
1821 "from MLD peer %pK (" QDF_MAC_ADDR_FMT "), "
1822 "idx %u num_links %u",
1823 (i != DP_MAX_MLO_LINKS) ? "Successful" : "Failed",
1824 link_peer, QDF_MAC_ADDR_REF(link_peer->mac_addr.raw),
1825 mld_peer, QDF_MAC_ADDR_REF(mld_peer->mac_addr.raw),
1826 i, mld_peer->num_links);
1827
1828 dp_cfg_event_record_mlo_link_delink_evt(soc, DP_CFG_EVENT_MLO_DEL_LINK,
1829 mld_peer, link_peer, i,
1830 (i != DP_MAX_MLO_LINKS) ? 1 : 0);
1831
1832 return num_links;
1833 }
1834
1835 /**
1836 * dp_get_link_peers_ref_from_mld_peer() - get link peers pointer and
1837 * increase link peers ref_cnt
1838 * @soc: dp_soc handle
1839 * @mld_peer: dp mld peer pointer
1840 * @mld_link_peers: structure that hold links peers pointer array and number
1841 * @mod_id: id of module requesting reference
1842 *
1843 * Return: None
1844 */
1845 static inline
dp_get_link_peers_ref_from_mld_peer(struct dp_soc * soc,struct dp_peer * mld_peer,struct dp_mld_link_peers * mld_link_peers,enum dp_mod_id mod_id)1846 void dp_get_link_peers_ref_from_mld_peer(
1847 struct dp_soc *soc,
1848 struct dp_peer *mld_peer,
1849 struct dp_mld_link_peers *mld_link_peers,
1850 enum dp_mod_id mod_id)
1851 {
1852 struct dp_peer *peer;
1853 uint8_t i = 0, j = 0;
1854 struct dp_peer_link_info *link_peer_info;
1855
1856 qdf_mem_zero(mld_link_peers, sizeof(*mld_link_peers));
1857 qdf_spin_lock_bh(&mld_peer->link_peers_info_lock);
1858 for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
1859 link_peer_info = &mld_peer->link_peers[i];
1860 if (link_peer_info->is_valid) {
1861 peer = dp_link_peer_hash_find_by_chip_id(
1862 soc,
1863 link_peer_info->mac_addr.raw,
1864 true,
1865 link_peer_info->vdev_id,
1866 link_peer_info->chip_id,
1867 mod_id);
1868 if (peer)
1869 mld_link_peers->link_peers[j++] = peer;
1870 }
1871 }
1872 qdf_spin_unlock_bh(&mld_peer->link_peers_info_lock);
1873
1874 mld_link_peers->num_links = j;
1875 }
1876
1877 /**
1878 * dp_release_link_peers_ref() - release all link peers reference
1879 * @mld_link_peers: structure that hold links peers pointer array and number
1880 * @mod_id: id of module requesting reference
1881 *
1882 * Return: None.
1883 */
1884 static inline
dp_release_link_peers_ref(struct dp_mld_link_peers * mld_link_peers,enum dp_mod_id mod_id)1885 void dp_release_link_peers_ref(
1886 struct dp_mld_link_peers *mld_link_peers,
1887 enum dp_mod_id mod_id)
1888 {
1889 struct dp_peer *peer;
1890 uint8_t i;
1891
1892 for (i = 0; i < mld_link_peers->num_links; i++) {
1893 peer = mld_link_peers->link_peers[i];
1894 if (peer)
1895 dp_peer_unref_delete(peer, mod_id);
1896 mld_link_peers->link_peers[i] = NULL;
1897 }
1898
1899 mld_link_peers->num_links = 0;
1900 }
1901
1902 /**
1903 * dp_get_link_peer_id_by_lmac_id() - Get link peer id using peer id and lmac id
1904 * @soc: Datapath soc handle
1905 * @peer_id: peer id
1906 * @lmac_id: lmac id to find the link peer on given lmac
1907 *
1908 * Return: peer_id of link peer if found
1909 * else return HTT_INVALID_PEER
1910 */
1911 static inline
dp_get_link_peer_id_by_lmac_id(struct dp_soc * soc,uint16_t peer_id,uint8_t lmac_id)1912 uint16_t dp_get_link_peer_id_by_lmac_id(struct dp_soc *soc, uint16_t peer_id,
1913 uint8_t lmac_id)
1914 {
1915 uint8_t i;
1916 struct dp_peer *peer;
1917 struct dp_peer *link_peer;
1918 struct dp_soc *link_peer_soc;
1919 struct dp_mld_link_peers link_peers_info;
1920 uint16_t link_peer_id = HTT_INVALID_PEER;
1921
1922 peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_CDP);
1923
1924 if (!peer)
1925 return HTT_INVALID_PEER;
1926
1927 if (IS_MLO_DP_MLD_PEER(peer)) {
1928 /* get link peers with reference */
1929 dp_get_link_peers_ref_from_mld_peer(soc, peer, &link_peers_info,
1930 DP_MOD_ID_CDP);
1931
1932 for (i = 0; i < link_peers_info.num_links; i++) {
1933 link_peer = link_peers_info.link_peers[i];
1934 link_peer_soc = link_peer->vdev->pdev->soc;
1935 if ((link_peer_soc == soc) &&
1936 (link_peer->vdev->pdev->lmac_id == lmac_id)) {
1937 link_peer_id = link_peer->peer_id;
1938 break;
1939 }
1940 }
1941 /* release link peers reference */
1942 dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP);
1943 } else {
1944 link_peer_id = peer_id;
1945 }
1946
1947 dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
1948
1949 return link_peer_id;
1950 }
1951
1952 /**
1953 * dp_peer_get_tgt_peer_hash_find() - get dp_peer handle
1954 * @soc: soc handle
1955 * @peer_mac: peer mac address
1956 * @mac_addr_is_aligned: is mac addr aligned
1957 * @vdev_id: vdev_id
1958 * @mod_id: id of module requesting reference
1959 *
1960 * for MLO connection, get corresponding MLD peer,
1961 * otherwise get link peer for non-MLO case.
1962 *
1963 * Return: peer in success
1964 * NULL in failure
1965 */
1966 static inline
dp_peer_get_tgt_peer_hash_find(struct dp_soc * soc,uint8_t * peer_mac,int mac_addr_is_aligned,uint8_t vdev_id,enum dp_mod_id mod_id)1967 struct dp_peer *dp_peer_get_tgt_peer_hash_find(struct dp_soc *soc,
1968 uint8_t *peer_mac,
1969 int mac_addr_is_aligned,
1970 uint8_t vdev_id,
1971 enum dp_mod_id mod_id)
1972 {
1973 struct dp_peer *ta_peer = NULL;
1974 struct dp_peer *peer = dp_peer_find_hash_find(soc,
1975 peer_mac, 0, vdev_id,
1976 mod_id);
1977
1978 if (peer) {
1979 /* mlo connection link peer, get mld peer with reference */
1980 if (IS_MLO_DP_LINK_PEER(peer)) {
1981 /* increase mld peer ref_cnt */
1982 if (QDF_STATUS_SUCCESS ==
1983 dp_peer_get_ref(soc, peer->mld_peer, mod_id))
1984 ta_peer = peer->mld_peer;
1985 else
1986 ta_peer = NULL;
1987
1988 /* release peer reference that added by hash find */
1989 dp_peer_unref_delete(peer, mod_id);
1990 } else {
1991 /* mlo MLD peer or non-mlo link peer */
1992 ta_peer = peer;
1993 }
1994 } else {
1995 dp_peer_err("fail to find peer:" QDF_MAC_ADDR_FMT " vdev_id: %u",
1996 QDF_MAC_ADDR_REF(peer_mac), vdev_id);
1997 }
1998
1999 return ta_peer;
2000 }
2001
2002 /**
2003 * dp_peer_get_tgt_peer_by_id() - Returns target peer object given the peer id
2004 * @soc: core DP soc context
2005 * @peer_id: peer id from peer object can be retrieved
2006 * @mod_id: ID of module requesting reference
2007 *
2008 * for MLO connection, get corresponding MLD peer,
2009 * otherwise get link peer for non-MLO case.
2010 *
2011 * Return: peer in success
2012 * NULL in failure
2013 */
2014 static inline
dp_peer_get_tgt_peer_by_id(struct dp_soc * soc,uint16_t peer_id,enum dp_mod_id mod_id)2015 struct dp_peer *dp_peer_get_tgt_peer_by_id(struct dp_soc *soc,
2016 uint16_t peer_id,
2017 enum dp_mod_id mod_id)
2018 {
2019 struct dp_peer *ta_peer = NULL;
2020 struct dp_peer *peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
2021
2022 if (peer) {
2023 /* mlo connection link peer, get mld peer with reference */
2024 if (IS_MLO_DP_LINK_PEER(peer)) {
2025 /* increase mld peer ref_cnt */
2026 if (QDF_STATUS_SUCCESS ==
2027 dp_peer_get_ref(soc, peer->mld_peer, mod_id))
2028 ta_peer = peer->mld_peer;
2029 else
2030 ta_peer = NULL;
2031
2032 /* release peer reference that added by hash find */
2033 dp_peer_unref_delete(peer, mod_id);
2034 } else {
2035 /* mlo MLD peer or non-mlo link peer */
2036 ta_peer = peer;
2037 }
2038 }
2039
2040 return ta_peer;
2041 }
2042
2043 /**
2044 * dp_peer_mlo_delete() - peer MLO related delete operation
2045 * @peer: DP peer handle
2046 * Return: None
2047 */
2048 static inline
dp_peer_mlo_delete(struct dp_peer * peer)2049 void dp_peer_mlo_delete(struct dp_peer *peer)
2050 {
2051 struct dp_peer *ml_peer;
2052 struct dp_soc *soc;
2053
2054 dp_info("peer " QDF_MAC_ADDR_FMT " type %d",
2055 QDF_MAC_ADDR_REF(peer->mac_addr.raw), peer->peer_type);
2056
2057 /* MLO connection link peer */
2058 if (IS_MLO_DP_LINK_PEER(peer)) {
2059 ml_peer = peer->mld_peer;
2060 soc = ml_peer->vdev->pdev->soc;
2061
2062 /* if last link peer deletion, delete MLD peer */
2063 if (dp_mld_peer_del_link_peer(peer->mld_peer, peer) == 0)
2064 dp_peer_delete(soc, peer->mld_peer, NULL);
2065 }
2066 }
2067
2068 /**
2069 * dp_peer_mlo_setup() - create MLD peer and MLO related initialization
2070 * @soc: Soc handle
2071 * @peer: DP peer handle
2072 * @vdev_id: Vdev ID
2073 * @setup_info: peer setup information for MLO
2074 */
2075 QDF_STATUS dp_peer_mlo_setup(
2076 struct dp_soc *soc,
2077 struct dp_peer *peer,
2078 uint8_t vdev_id,
2079 struct cdp_peer_setup_info *setup_info);
2080
2081 /**
2082 * dp_get_tgt_peer_from_peer() - Get target peer from the given peer
2083 * @peer: datapath peer
2084 *
2085 * Return: MLD peer in case of MLO Link peer
2086 * Peer itself in other cases
2087 */
2088 static inline
dp_get_tgt_peer_from_peer(struct dp_peer * peer)2089 struct dp_peer *dp_get_tgt_peer_from_peer(struct dp_peer *peer)
2090 {
2091 return IS_MLO_DP_LINK_PEER(peer) ? peer->mld_peer : peer;
2092 }
2093
2094 /**
2095 * dp_get_primary_link_peer_by_id(): Get primary link peer from the given
2096 * peer id
2097 * @soc: core DP soc context
2098 * @peer_id: peer id
2099 * @mod_id: ID of module requesting reference
2100 *
2101 * Return: primary link peer for the MLO peer
2102 * legacy peer itself in case of legacy peer
2103 */
2104 static inline
dp_get_primary_link_peer_by_id(struct dp_soc * soc,uint16_t peer_id,enum dp_mod_id mod_id)2105 struct dp_peer *dp_get_primary_link_peer_by_id(struct dp_soc *soc,
2106 uint16_t peer_id,
2107 enum dp_mod_id mod_id)
2108 {
2109 uint8_t i;
2110 struct dp_mld_link_peers link_peers_info;
2111 struct dp_peer *peer;
2112 struct dp_peer *link_peer;
2113 struct dp_peer *primary_peer = NULL;
2114
2115 peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
2116
2117 if (!peer)
2118 return NULL;
2119
2120 if (IS_MLO_DP_MLD_PEER(peer)) {
2121 /* get link peers with reference */
2122 dp_get_link_peers_ref_from_mld_peer(soc, peer, &link_peers_info,
2123 mod_id);
2124
2125 for (i = 0; i < link_peers_info.num_links; i++) {
2126 link_peer = link_peers_info.link_peers[i];
2127 if (link_peer->primary_link) {
2128 /*
2129 * Take additional reference over
2130 * primary link peer.
2131 */
2132 if (QDF_STATUS_SUCCESS ==
2133 dp_peer_get_ref(NULL, link_peer, mod_id))
2134 primary_peer = link_peer;
2135 break;
2136 }
2137 }
2138 /* release link peers reference */
2139 dp_release_link_peers_ref(&link_peers_info, mod_id);
2140 dp_peer_unref_delete(peer, mod_id);
2141 } else {
2142 primary_peer = peer;
2143 }
2144
2145 return primary_peer;
2146 }
2147
2148 /**
2149 * dp_get_txrx_peer() - Get dp_txrx_peer from passed dp_peer
2150 * @peer: Datapath peer
2151 *
2152 * Return: dp_txrx_peer from MLD peer if peer type is link peer
2153 * dp_txrx_peer from peer itself for other cases
2154 */
2155 static inline
dp_get_txrx_peer(struct dp_peer * peer)2156 struct dp_txrx_peer *dp_get_txrx_peer(struct dp_peer *peer)
2157 {
2158 return IS_MLO_DP_LINK_PEER(peer) ?
2159 peer->mld_peer->txrx_peer : peer->txrx_peer;
2160 }
2161
2162 /**
2163 * dp_peer_is_primary_link_peer() - Check if peer is primary link peer
2164 * @peer: Datapath peer
2165 *
2166 * Return: true if peer is primary link peer or legacy peer
2167 * false otherwise
2168 */
2169 static inline
dp_peer_is_primary_link_peer(struct dp_peer * peer)2170 bool dp_peer_is_primary_link_peer(struct dp_peer *peer)
2171 {
2172 if (IS_MLO_DP_LINK_PEER(peer) && peer->primary_link)
2173 return true;
2174 else if (IS_DP_LEGACY_PEER(peer))
2175 return true;
2176 else
2177 return false;
2178 }
2179
2180 /**
2181 * dp_tgt_txrx_peer_get_ref_by_id() - Gets tgt txrx peer for given the peer id
2182 *
2183 * @soc: core DP soc context
2184 * @peer_id: peer id from peer object can be retrieved
2185 * @handle: reference handle
2186 * @mod_id: ID of module requesting reference
2187 *
2188 * Return: struct dp_txrx_peer*: Pointer to txrx DP peer object
2189 */
2190 static inline struct dp_txrx_peer *
dp_tgt_txrx_peer_get_ref_by_id(struct dp_soc * soc,uint16_t peer_id,dp_txrx_ref_handle * handle,enum dp_mod_id mod_id)2191 dp_tgt_txrx_peer_get_ref_by_id(struct dp_soc *soc,
2192 uint16_t peer_id,
2193 dp_txrx_ref_handle *handle,
2194 enum dp_mod_id mod_id)
2195
2196 {
2197 struct dp_peer *peer;
2198 struct dp_txrx_peer *txrx_peer;
2199
2200 peer = dp_peer_get_ref_by_id(soc, peer_id, mod_id);
2201 if (!peer)
2202 return NULL;
2203
2204 txrx_peer = dp_get_txrx_peer(peer);
2205 if (txrx_peer) {
2206 *handle = (dp_txrx_ref_handle)peer;
2207 return txrx_peer;
2208 }
2209
2210 dp_peer_unref_delete(peer, mod_id);
2211 return NULL;
2212 }
2213
2214 /**
2215 * dp_print_mlo_ast_stats_be() - Print AST stats for MLO peers
2216 *
2217 * @soc: core DP soc context
2218 *
2219 * Return: void
2220 */
2221 void dp_print_mlo_ast_stats_be(struct dp_soc *soc);
2222
2223 /**
2224 * dp_get_peer_link_id() - Get Link peer Link ID
2225 * @peer: Datapath peer
2226 *
2227 * Return: Link peer Link ID
2228 */
2229 uint8_t dp_get_peer_link_id(struct dp_peer *peer);
2230 #else
2231
2232 #define IS_MLO_DP_MLD_TXRX_PEER(_peer) false
2233
2234 #define DP_PEER_SET_TYPE(_peer, _type_val) /* no op */
2235 /* is legacy peer */
2236 #define IS_DP_LEGACY_PEER(_peer) true
2237 #define IS_MLO_DP_LINK_PEER(_peer) false
2238 #define IS_MLO_DP_MLD_PEER(_peer) false
2239 #define DP_GET_MLD_PEER_FROM_PEER(link_peer) NULL
2240
2241 static inline
dp_peer_hash_find_wrapper(struct dp_soc * soc,struct cdp_peer_info * peer_info,enum dp_mod_id mod_id)2242 struct dp_peer *dp_peer_hash_find_wrapper(struct dp_soc *soc,
2243 struct cdp_peer_info *peer_info,
2244 enum dp_mod_id mod_id)
2245 {
2246 return dp_peer_find_hash_find(soc, peer_info->mac_addr,
2247 peer_info->mac_addr_is_aligned,
2248 peer_info->vdev_id,
2249 mod_id);
2250 }
2251
2252 static inline
dp_peer_get_tgt_peer_hash_find(struct dp_soc * soc,uint8_t * peer_mac,int mac_addr_is_aligned,uint8_t vdev_id,enum dp_mod_id mod_id)2253 struct dp_peer *dp_peer_get_tgt_peer_hash_find(struct dp_soc *soc,
2254 uint8_t *peer_mac,
2255 int mac_addr_is_aligned,
2256 uint8_t vdev_id,
2257 enum dp_mod_id mod_id)
2258 {
2259 return dp_peer_find_hash_find(soc, peer_mac,
2260 mac_addr_is_aligned, vdev_id,
2261 mod_id);
2262 }
2263
2264 static inline
dp_peer_get_tgt_peer_by_id(struct dp_soc * soc,uint16_t peer_id,enum dp_mod_id mod_id)2265 struct dp_peer *dp_peer_get_tgt_peer_by_id(struct dp_soc *soc,
2266 uint16_t peer_id,
2267 enum dp_mod_id mod_id)
2268 {
2269 return dp_peer_get_ref_by_id(soc, peer_id, mod_id);
2270 }
2271
2272 static inline
dp_peer_mlo_setup(struct dp_soc * soc,struct dp_peer * peer,uint8_t vdev_id,struct cdp_peer_setup_info * setup_info)2273 QDF_STATUS dp_peer_mlo_setup(
2274 struct dp_soc *soc,
2275 struct dp_peer *peer,
2276 uint8_t vdev_id,
2277 struct cdp_peer_setup_info *setup_info)
2278 {
2279 return QDF_STATUS_SUCCESS;
2280 }
2281
2282 static inline
dp_mld_peer_init_link_peers_info(struct dp_peer * mld_peer)2283 void dp_mld_peer_init_link_peers_info(struct dp_peer *mld_peer)
2284 {
2285 }
2286
2287 static inline
dp_mld_peer_deinit_link_peers_info(struct dp_peer * mld_peer)2288 void dp_mld_peer_deinit_link_peers_info(struct dp_peer *mld_peer)
2289 {
2290 }
2291
2292 static inline
dp_link_peer_del_mld_peer(struct dp_peer * link_peer)2293 void dp_link_peer_del_mld_peer(struct dp_peer *link_peer)
2294 {
2295 }
2296
2297 static inline
dp_peer_mlo_delete(struct dp_peer * peer)2298 void dp_peer_mlo_delete(struct dp_peer *peer)
2299 {
2300 }
2301
2302 static inline
dp_mlo_peer_authorize(struct dp_soc * soc,struct dp_peer * link_peer)2303 void dp_mlo_peer_authorize(struct dp_soc *soc,
2304 struct dp_peer *link_peer)
2305 {
2306 }
2307
dp_get_chip_id(struct dp_soc * soc)2308 static inline uint8_t dp_get_chip_id(struct dp_soc *soc)
2309 {
2310 return 0;
2311 }
2312
2313 static inline struct dp_peer *
dp_link_peer_hash_find_by_chip_id(struct dp_soc * soc,uint8_t * peer_mac_addr,int mac_addr_is_aligned,uint8_t vdev_id,uint8_t chip_id,enum dp_mod_id mod_id)2314 dp_link_peer_hash_find_by_chip_id(struct dp_soc *soc,
2315 uint8_t *peer_mac_addr,
2316 int mac_addr_is_aligned,
2317 uint8_t vdev_id,
2318 uint8_t chip_id,
2319 enum dp_mod_id mod_id)
2320 {
2321 return dp_peer_find_hash_find(soc, peer_mac_addr,
2322 mac_addr_is_aligned,
2323 vdev_id, mod_id);
2324 }
2325
2326 static inline
dp_get_tgt_peer_from_peer(struct dp_peer * peer)2327 struct dp_peer *dp_get_tgt_peer_from_peer(struct dp_peer *peer)
2328 {
2329 return peer;
2330 }
2331
2332 static inline
dp_get_primary_link_peer_by_id(struct dp_soc * soc,uint16_t peer_id,enum dp_mod_id mod_id)2333 struct dp_peer *dp_get_primary_link_peer_by_id(struct dp_soc *soc,
2334 uint16_t peer_id,
2335 enum dp_mod_id mod_id)
2336 {
2337 return dp_peer_get_ref_by_id(soc, peer_id, mod_id);
2338 }
2339
2340 static inline
dp_get_txrx_peer(struct dp_peer * peer)2341 struct dp_txrx_peer *dp_get_txrx_peer(struct dp_peer *peer)
2342 {
2343 return peer->txrx_peer;
2344 }
2345
2346 static inline
dp_peer_is_primary_link_peer(struct dp_peer * peer)2347 bool dp_peer_is_primary_link_peer(struct dp_peer *peer)
2348 {
2349 return true;
2350 }
2351
2352 /**
2353 * dp_tgt_txrx_peer_get_ref_by_id() - Gets tgt txrx peer for given the peer id
2354 *
2355 * @soc: core DP soc context
2356 * @peer_id: peer id from peer object can be retrieved
2357 * @handle: reference handle
2358 * @mod_id: ID of module requesting reference
2359 *
2360 * Return: struct dp_txrx_peer*: Pointer to txrx DP peer object
2361 */
2362 static inline struct dp_txrx_peer *
dp_tgt_txrx_peer_get_ref_by_id(struct dp_soc * soc,uint16_t peer_id,dp_txrx_ref_handle * handle,enum dp_mod_id mod_id)2363 dp_tgt_txrx_peer_get_ref_by_id(struct dp_soc *soc,
2364 uint16_t peer_id,
2365 dp_txrx_ref_handle *handle,
2366 enum dp_mod_id mod_id)
2367
2368 {
2369 return dp_txrx_peer_get_ref_by_id(soc, peer_id, handle, mod_id);
2370 }
2371
2372 static inline
dp_get_link_peer_id_by_lmac_id(struct dp_soc * soc,uint16_t peer_id,uint8_t lmac_id)2373 uint16_t dp_get_link_peer_id_by_lmac_id(struct dp_soc *soc, uint16_t peer_id,
2374 uint8_t lmac_id)
2375 {
2376 return peer_id;
2377 }
2378
dp_print_mlo_ast_stats_be(struct dp_soc * soc)2379 static inline void dp_print_mlo_ast_stats_be(struct dp_soc *soc)
2380 {
2381 }
2382
dp_get_peer_link_id(struct dp_peer * peer)2383 static inline uint8_t dp_get_peer_link_id(struct dp_peer *peer)
2384 {
2385 return 0;
2386 }
2387 #endif /* WLAN_FEATURE_11BE_MLO */
2388
2389 static inline
dp_peer_defrag_rx_tids_init(struct dp_txrx_peer * txrx_peer)2390 void dp_peer_defrag_rx_tids_init(struct dp_txrx_peer *txrx_peer)
2391 {
2392 uint8_t i;
2393
2394 qdf_mem_zero(&txrx_peer->rx_tid, DP_MAX_TIDS *
2395 sizeof(struct dp_rx_tid_defrag));
2396
2397 for (i = 0; i < DP_MAX_TIDS; i++)
2398 qdf_spinlock_create(&txrx_peer->rx_tid[i].defrag_tid_lock);
2399 }
2400
2401 static inline
dp_peer_defrag_rx_tids_deinit(struct dp_txrx_peer * txrx_peer)2402 void dp_peer_defrag_rx_tids_deinit(struct dp_txrx_peer *txrx_peer)
2403 {
2404 uint8_t i;
2405
2406 for (i = 0; i < DP_MAX_TIDS; i++)
2407 qdf_spinlock_destroy(&txrx_peer->rx_tid[i].defrag_tid_lock);
2408 }
2409
2410 #ifdef PEER_CACHE_RX_PKTS
2411 static inline
dp_peer_rx_bufq_resources_init(struct dp_txrx_peer * txrx_peer)2412 void dp_peer_rx_bufq_resources_init(struct dp_txrx_peer *txrx_peer)
2413 {
2414 qdf_spinlock_create(&txrx_peer->bufq_info.bufq_lock);
2415 txrx_peer->bufq_info.thresh = DP_RX_CACHED_BUFQ_THRESH;
2416 qdf_list_create(&txrx_peer->bufq_info.cached_bufq,
2417 DP_RX_CACHED_BUFQ_THRESH);
2418 }
2419
2420 static inline
dp_peer_rx_bufq_resources_deinit(struct dp_txrx_peer * txrx_peer)2421 void dp_peer_rx_bufq_resources_deinit(struct dp_txrx_peer *txrx_peer)
2422 {
2423 qdf_list_destroy(&txrx_peer->bufq_info.cached_bufq);
2424 qdf_spinlock_destroy(&txrx_peer->bufq_info.bufq_lock);
2425 }
2426
2427 #else
2428 static inline
dp_peer_rx_bufq_resources_init(struct dp_txrx_peer * txrx_peer)2429 void dp_peer_rx_bufq_resources_init(struct dp_txrx_peer *txrx_peer)
2430 {
2431 }
2432
2433 static inline
dp_peer_rx_bufq_resources_deinit(struct dp_txrx_peer * txrx_peer)2434 void dp_peer_rx_bufq_resources_deinit(struct dp_txrx_peer *txrx_peer)
2435 {
2436 }
2437 #endif
2438
2439 /**
2440 * dp_peer_update_state() - update dp peer state
2441 *
2442 * @soc: core DP soc context
2443 * @peer: DP peer
2444 * @state: new state
2445 *
2446 * Return: None
2447 */
2448 static inline void
dp_peer_update_state(struct dp_soc * soc,struct dp_peer * peer,enum dp_peer_state state)2449 dp_peer_update_state(struct dp_soc *soc,
2450 struct dp_peer *peer,
2451 enum dp_peer_state state)
2452 {
2453 uint8_t peer_state;
2454
2455 qdf_spin_lock_bh(&peer->peer_state_lock);
2456 peer_state = peer->peer_state;
2457
2458 switch (state) {
2459 case DP_PEER_STATE_INIT:
2460 DP_PEER_STATE_ASSERT
2461 (peer, state, (peer_state != DP_PEER_STATE_ACTIVE) &&
2462 (peer_state != DP_PEER_STATE_LOGICAL_DELETE));
2463 break;
2464
2465 case DP_PEER_STATE_ACTIVE:
2466 DP_PEER_STATE_ASSERT(peer, state,
2467 (peer_state == DP_PEER_STATE_INIT));
2468 break;
2469
2470 case DP_PEER_STATE_LOGICAL_DELETE:
2471 DP_PEER_STATE_ASSERT(peer, state,
2472 (peer_state == DP_PEER_STATE_ACTIVE) ||
2473 (peer_state == DP_PEER_STATE_INIT));
2474 break;
2475
2476 case DP_PEER_STATE_INACTIVE:
2477 if (IS_MLO_DP_MLD_PEER(peer))
2478 DP_PEER_STATE_ASSERT
2479 (peer, state,
2480 (peer_state == DP_PEER_STATE_ACTIVE));
2481 else
2482 DP_PEER_STATE_ASSERT
2483 (peer, state,
2484 (peer_state == DP_PEER_STATE_LOGICAL_DELETE));
2485 break;
2486
2487 case DP_PEER_STATE_FREED:
2488 if (peer->sta_self_peer)
2489 DP_PEER_STATE_ASSERT
2490 (peer, state, (peer_state == DP_PEER_STATE_INIT));
2491 else
2492 DP_PEER_STATE_ASSERT
2493 (peer, state,
2494 (peer_state == DP_PEER_STATE_INACTIVE) ||
2495 (peer_state == DP_PEER_STATE_LOGICAL_DELETE));
2496 break;
2497
2498 default:
2499 qdf_spin_unlock_bh(&peer->peer_state_lock);
2500 dp_alert("Invalid peer state %u for peer " QDF_MAC_ADDR_FMT,
2501 state, QDF_MAC_ADDR_REF(peer->mac_addr.raw));
2502 return;
2503 }
2504 peer->peer_state = state;
2505 qdf_spin_unlock_bh(&peer->peer_state_lock);
2506 dp_info("Updating peer state from %u to %u mac " QDF_MAC_ADDR_FMT "\n",
2507 peer_state, state,
2508 QDF_MAC_ADDR_REF(peer->mac_addr.raw));
2509 }
2510
2511 /**
2512 * dp_vdev_iterate_specific_peer_type() - API to iterate through vdev peer
2513 * list based on type of peer (Legacy or MLD peer)
2514 *
2515 * @vdev: DP vdev context
2516 * @func: function to be called for each peer
2517 * @arg: argument need to be passed to func
2518 * @mod_id: module_id
2519 * @peer_type: type of peer - MLO Link Peer or Legacy Peer
2520 *
2521 * Return: void
2522 */
2523 static inline void
dp_vdev_iterate_specific_peer_type(struct dp_vdev * vdev,dp_peer_iter_func * func,void * arg,enum dp_mod_id mod_id,enum dp_peer_type peer_type)2524 dp_vdev_iterate_specific_peer_type(struct dp_vdev *vdev,
2525 dp_peer_iter_func *func,
2526 void *arg, enum dp_mod_id mod_id,
2527 enum dp_peer_type peer_type)
2528 {
2529 struct dp_peer *peer;
2530 struct dp_peer *tmp_peer;
2531 struct dp_soc *soc = NULL;
2532
2533 if (!vdev || !vdev->pdev || !vdev->pdev->soc)
2534 return;
2535
2536 soc = vdev->pdev->soc;
2537
2538 qdf_spin_lock_bh(&vdev->peer_list_lock);
2539 TAILQ_FOREACH_SAFE(peer, &vdev->peer_list,
2540 peer_list_elem,
2541 tmp_peer) {
2542 if (dp_peer_get_ref(soc, peer, mod_id) ==
2543 QDF_STATUS_SUCCESS) {
2544 if ((peer_type == DP_PEER_TYPE_LEGACY &&
2545 (IS_DP_LEGACY_PEER(peer))) ||
2546 (peer_type == DP_PEER_TYPE_MLO_LINK &&
2547 (IS_MLO_DP_LINK_PEER(peer)))) {
2548 (*func)(soc, peer, arg);
2549 }
2550 dp_peer_unref_delete(peer, mod_id);
2551 }
2552 }
2553 qdf_spin_unlock_bh(&vdev->peer_list_lock);
2554 }
2555
2556 #ifdef REO_SHARED_QREF_TABLE_EN
2557 void dp_peer_rx_reo_shared_qaddr_delete(struct dp_soc *soc,
2558 struct dp_peer *peer);
2559 #else
dp_peer_rx_reo_shared_qaddr_delete(struct dp_soc * soc,struct dp_peer * peer)2560 static inline void dp_peer_rx_reo_shared_qaddr_delete(struct dp_soc *soc,
2561 struct dp_peer *peer) {}
2562 #endif
2563
2564 /**
2565 * dp_peer_check_wds_ext_peer() - Check WDS ext peer
2566 *
2567 * @peer: DP peer
2568 *
2569 * Return: True for WDS ext peer, false otherwise
2570 */
2571 bool dp_peer_check_wds_ext_peer(struct dp_peer *peer);
2572
2573 /**
2574 * dp_gen_ml_peer_id() - Generate MLD peer id for DP
2575 *
2576 * @soc: DP soc context
2577 * @peer_id: mld peer id
2578 *
2579 * Return: DP MLD peer id
2580 */
2581 uint16_t dp_gen_ml_peer_id(struct dp_soc *soc, uint16_t peer_id);
2582
2583 #ifdef FEATURE_AST
2584 /**
2585 * dp_peer_host_add_map_ast() - Add ast entry with HW AST Index
2586 * @soc: SoC handle
2587 * @peer_id: peer id from firmware
2588 * @mac_addr: MAC address of ast node
2589 * @hw_peer_id: HW AST Index returned by target in peer map event
2590 * @vdev_id: vdev id for VAP to which the peer belongs to
2591 * @ast_hash: ast hash value in HW
2592 * @is_wds: flag to indicate peer map event for WDS ast entry
2593 *
2594 * Return: QDF_STATUS code
2595 */
2596 QDF_STATUS dp_peer_host_add_map_ast(struct dp_soc *soc, uint16_t peer_id,
2597 uint8_t *mac_addr, uint16_t hw_peer_id,
2598 uint8_t vdev_id, uint16_t ast_hash,
2599 uint8_t is_wds);
2600 #endif
2601
2602 #if defined(WLAN_FEATURE_11BE_MLO) && defined(DP_MLO_LINK_STATS_SUPPORT)
2603 /**
2604 * dp_map_link_id_band: Set link id to band mapping in txrx_peer
2605 * @peer: dp peer pointer
2606 *
2607 * Return: None
2608 */
2609 void dp_map_link_id_band(struct dp_peer *peer);
2610 #else
2611 static inline
dp_map_link_id_band(struct dp_peer * peer)2612 void dp_map_link_id_band(struct dp_peer *peer)
2613 {
2614 }
2615 #endif
2616 #endif /* _DP_PEER_H_ */
2617