1 /*
2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3 * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for
6 * any purpose with or without fee is hereby granted, provided that the
7 * above copyright notice and this permission notice appear in all
8 * copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17 * PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 #include <qdf_types.h>
21 #include <qdf_lock.h>
22 #include <hal_hw_headers.h>
23 #include "dp_htt.h"
24 #include "dp_types.h"
25 #include "dp_internal.h"
26 #include "dp_peer.h"
27 #include "dp_rx_defrag.h"
28 #include "dp_rx.h"
29 #include <hal_api.h>
30 #include <hal_reo.h>
31 #include <cdp_txrx_handle.h>
32 #include <wlan_cfg.h>
33 #ifdef WIFI_MONITOR_SUPPORT
34 #include <dp_mon.h>
35 #endif
36 #ifdef FEATURE_WDS
37 #include "dp_txrx_wds.h"
38 #endif
39 #include <qdf_module.h>
40 #ifdef QCA_PEER_EXT_STATS
41 #include "dp_hist.h"
42 #endif
43 #ifdef BYPASS_OL_OPS
44 #include <target_if_dp.h>
45 #endif
46 #if defined(WLAN_FEATURE_11BE_MLO) && defined(DP_MLO_LINK_STATS_SUPPORT)
47 #include "reg_services_common.h"
48 #endif
49 #ifdef FEATURE_AST
50 #ifdef BYPASS_OL_OPS
51 /**
52 * dp_add_wds_entry_wrapper() - Add new AST entry for the wds station
53 * @soc: DP soc structure pointer
54 * @peer: dp peer structure
55 * @dest_macaddr: MAC address of ast node
56 * @flags: wds or hmwds
57 * @type: type from enum cdp_txrx_ast_entry_type
58 *
59 * This API is used by WDS source port learning function to
60 * add a new AST entry in the fw.
61 *
62 * Return: 0 on success, error code otherwise.
63 */
dp_add_wds_entry_wrapper(struct dp_soc * soc,struct dp_peer * peer,const uint8_t * dest_macaddr,uint32_t flags,uint8_t type)64 static int dp_add_wds_entry_wrapper(struct dp_soc *soc,
65 struct dp_peer *peer,
66 const uint8_t *dest_macaddr,
67 uint32_t flags,
68 uint8_t type)
69 {
70 QDF_STATUS status;
71
72 status = target_if_add_wds_entry(soc->ctrl_psoc,
73 peer->vdev->vdev_id,
74 peer->mac_addr.raw,
75 dest_macaddr,
76 WMI_HOST_WDS_FLAG_STATIC,
77 type);
78
79 return qdf_status_to_os_return(status);
80 }
81
82 /**
83 * dp_update_wds_entry_wrapper() - update an existing wds entry with new peer
84 * @soc: DP soc structure pointer
85 * @peer: dp peer structure
86 * @dest_macaddr: MAC address of ast node
87 * @flags: wds or hmwds
88 *
89 * This API is used by update the peer mac address for the ast
90 * in the fw.
91 *
92 * Return: 0 on success, error code otherwise.
93 */
dp_update_wds_entry_wrapper(struct dp_soc * soc,struct dp_peer * peer,uint8_t * dest_macaddr,uint32_t flags)94 static int dp_update_wds_entry_wrapper(struct dp_soc *soc,
95 struct dp_peer *peer,
96 uint8_t *dest_macaddr,
97 uint32_t flags)
98 {
99 QDF_STATUS status;
100
101 status = target_if_update_wds_entry(soc->ctrl_psoc,
102 peer->vdev->vdev_id,
103 dest_macaddr,
104 peer->mac_addr.raw,
105 WMI_HOST_WDS_FLAG_STATIC);
106
107 return qdf_status_to_os_return(status);
108 }
109
110 /**
111 * dp_del_wds_entry_wrapper() - delete a WSD AST entry
112 * @soc: DP soc structure pointer
113 * @vdev_id: vdev_id
114 * @wds_macaddr: MAC address of ast node
115 * @type: type from enum cdp_txrx_ast_entry_type
116 * @delete_in_fw: Flag to indicate if entry needs to be deleted in fw
117 *
118 * This API is used to delete an AST entry from fw
119 *
120 * Return: None
121 */
dp_del_wds_entry_wrapper(struct dp_soc * soc,uint8_t vdev_id,uint8_t * wds_macaddr,uint8_t type,uint8_t delete_in_fw)122 void dp_del_wds_entry_wrapper(struct dp_soc *soc,
123 uint8_t vdev_id,
124 uint8_t *wds_macaddr,
125 uint8_t type,
126 uint8_t delete_in_fw)
127 {
128 target_if_del_wds_entry(soc->ctrl_psoc, vdev_id,
129 wds_macaddr, type, delete_in_fw);
130 }
131 #else
dp_add_wds_entry_wrapper(struct dp_soc * soc,struct dp_peer * peer,const uint8_t * dest_macaddr,uint32_t flags,uint8_t type)132 static int dp_add_wds_entry_wrapper(struct dp_soc *soc,
133 struct dp_peer *peer,
134 const uint8_t *dest_macaddr,
135 uint32_t flags,
136 uint8_t type)
137 {
138 int status;
139
140 status = soc->cdp_soc.ol_ops->peer_add_wds_entry(
141 soc->ctrl_psoc,
142 peer->vdev->vdev_id,
143 peer->mac_addr.raw,
144 peer->peer_id,
145 dest_macaddr,
146 peer->mac_addr.raw,
147 flags,
148 type);
149
150 return status;
151 }
152
dp_update_wds_entry_wrapper(struct dp_soc * soc,struct dp_peer * peer,uint8_t * dest_macaddr,uint32_t flags)153 static int dp_update_wds_entry_wrapper(struct dp_soc *soc,
154 struct dp_peer *peer,
155 uint8_t *dest_macaddr,
156 uint32_t flags)
157 {
158 int status;
159
160 status = soc->cdp_soc.ol_ops->peer_update_wds_entry(
161 soc->ctrl_psoc,
162 peer->vdev->vdev_id,
163 dest_macaddr,
164 peer->mac_addr.raw,
165 flags);
166
167 return status;
168 }
169
dp_del_wds_entry_wrapper(struct dp_soc * soc,uint8_t vdev_id,uint8_t * wds_macaddr,uint8_t type,uint8_t delete_in_fw)170 void dp_del_wds_entry_wrapper(struct dp_soc *soc,
171 uint8_t vdev_id,
172 uint8_t *wds_macaddr,
173 uint8_t type,
174 uint8_t delete_in_fw)
175 {
176 soc->cdp_soc.ol_ops->peer_del_wds_entry(soc->ctrl_psoc,
177 vdev_id,
178 wds_macaddr,
179 type,
180 delete_in_fw);
181 }
182 #endif /* BYPASS_OL_OPS */
183 #else
dp_del_wds_entry_wrapper(struct dp_soc * soc,uint8_t vdev_id,uint8_t * wds_macaddr,uint8_t type,uint8_t delete_in_fw)184 void dp_del_wds_entry_wrapper(struct dp_soc *soc,
185 uint8_t vdev_id,
186 uint8_t *wds_macaddr,
187 uint8_t type,
188 uint8_t delete_in_fw)
189 {
190 }
191 #endif /* FEATURE_AST */
192
193 #ifdef FEATURE_WDS
194 static inline bool
dp_peer_ast_free_in_unmap_supported(struct dp_soc * soc,struct dp_ast_entry * ast_entry)195 dp_peer_ast_free_in_unmap_supported(struct dp_soc *soc,
196 struct dp_ast_entry *ast_entry)
197 {
198 /* if peer map v2 is enabled we are not freeing ast entry
199 * here and it is supposed to be freed in unmap event (after
200 * we receive delete confirmation from target)
201 *
202 * if peer_id is invalid we did not get the peer map event
203 * for the peer free ast entry from here only in this case
204 */
205
206 if ((ast_entry->type != CDP_TXRX_AST_TYPE_WDS_HM_SEC) &&
207 (ast_entry->type != CDP_TXRX_AST_TYPE_SELF))
208 return true;
209
210 return false;
211 }
212 #else
213 static inline bool
dp_peer_ast_free_in_unmap_supported(struct dp_soc * soc,struct dp_ast_entry * ast_entry)214 dp_peer_ast_free_in_unmap_supported(struct dp_soc *soc,
215 struct dp_ast_entry *ast_entry)
216 {
217 return false;
218 }
219
dp_soc_wds_attach(struct dp_soc * soc)220 void dp_soc_wds_attach(struct dp_soc *soc)
221 {
222 }
223
dp_soc_wds_detach(struct dp_soc * soc)224 void dp_soc_wds_detach(struct dp_soc *soc)
225 {
226 }
227 #endif
228
229 #ifdef QCA_SUPPORT_WDS_EXTENDED
dp_peer_check_wds_ext_peer(struct dp_peer * peer)230 bool dp_peer_check_wds_ext_peer(struct dp_peer *peer)
231 {
232 struct dp_vdev *vdev = peer->vdev;
233 struct dp_txrx_peer *txrx_peer;
234
235 if (!vdev->wds_ext_enabled)
236 return false;
237
238 txrx_peer = dp_get_txrx_peer(peer);
239 if (!txrx_peer)
240 return false;
241
242 if (qdf_atomic_test_bit(WDS_EXT_PEER_INIT_BIT,
243 &txrx_peer->wds_ext.init))
244 return true;
245
246 return false;
247 }
248 #else
dp_peer_check_wds_ext_peer(struct dp_peer * peer)249 bool dp_peer_check_wds_ext_peer(struct dp_peer *peer)
250 {
251 return false;
252 }
253 #endif
254
dp_peer_ast_table_attach(struct dp_soc * soc)255 QDF_STATUS dp_peer_ast_table_attach(struct dp_soc *soc)
256 {
257 uint32_t max_ast_index;
258
259 max_ast_index = wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx);
260 /* allocate ast_table for ast entry to ast_index map */
261 dp_peer_info("\n%pK:<=== cfg max ast idx %d ====>", soc, max_ast_index);
262 soc->ast_table = qdf_mem_malloc(max_ast_index *
263 sizeof(struct dp_ast_entry *));
264 if (!soc->ast_table) {
265 dp_peer_err("%pK: ast_table memory allocation failed", soc);
266 return QDF_STATUS_E_NOMEM;
267 }
268 return QDF_STATUS_SUCCESS; /* success */
269 }
270
271 /**
272 * dp_find_peer_by_macaddr() - Finding the peer from mac address provided.
273 * @soc: soc handle
274 * @mac_addr: MAC address to be used to find peer
275 * @vdev_id: VDEV id
276 * @mod_id: MODULE ID
277 *
278 * Return: struct dp_peer
279 */
dp_find_peer_by_macaddr(struct dp_soc * soc,uint8_t * mac_addr,uint8_t vdev_id,enum dp_mod_id mod_id)280 struct dp_peer *dp_find_peer_by_macaddr(struct dp_soc *soc, uint8_t *mac_addr,
281 uint8_t vdev_id, enum dp_mod_id mod_id)
282 {
283 bool ast_ind_disable = wlan_cfg_get_ast_indication_disable(
284 soc->wlan_cfg_ctx);
285 struct cdp_peer_info peer_info = {0};
286
287 if ((!soc->ast_offload_support) || (!ast_ind_disable)) {
288 struct dp_ast_entry *ast_entry = NULL;
289 uint16_t peer_id;
290
291 qdf_spin_lock_bh(&soc->ast_lock);
292
293 if (vdev_id == DP_VDEV_ALL)
294 ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
295 else
296 ast_entry = dp_peer_ast_hash_find_by_vdevid
297 (soc, mac_addr, vdev_id);
298
299 if (!ast_entry) {
300 qdf_spin_unlock_bh(&soc->ast_lock);
301 dp_err("NULL ast entry");
302 return NULL;
303 }
304
305 peer_id = ast_entry->peer_id;
306 qdf_spin_unlock_bh(&soc->ast_lock);
307
308 if (peer_id == HTT_INVALID_PEER)
309 return NULL;
310
311 return dp_peer_get_ref_by_id(soc, peer_id, mod_id);
312 }
313
314 DP_PEER_INFO_PARAMS_INIT(&peer_info, vdev_id, mac_addr, false,
315 CDP_WILD_PEER_TYPE);
316 return dp_peer_hash_find_wrapper(soc, &peer_info, mod_id);
317 }
318
319 /**
320 * dp_peer_find_map_attach() - allocate memory for peer_id_to_obj_map
321 * @soc: soc handle
322 *
323 * return: QDF_STATUS
324 */
dp_peer_find_map_attach(struct dp_soc * soc)325 static QDF_STATUS dp_peer_find_map_attach(struct dp_soc *soc)
326 {
327 uint32_t max_peers, peer_map_size;
328
329 max_peers = soc->max_peer_id;
330 /* allocate the peer ID -> peer object map */
331 dp_peer_info("\n%pK:<=== cfg max peer id %d ====>", soc, max_peers);
332 peer_map_size = max_peers * sizeof(soc->peer_id_to_obj_map[0]);
333 soc->peer_id_to_obj_map = qdf_mem_malloc(peer_map_size);
334 if (!soc->peer_id_to_obj_map) {
335 dp_peer_err("%pK: peer map memory allocation failed", soc);
336 return QDF_STATUS_E_NOMEM;
337 }
338
339 /*
340 * The peer_id_to_obj_map doesn't really need to be initialized,
341 * since elements are only used after they have been individually
342 * initialized.
343 * However, it is convenient for debugging to have all elements
344 * that are not in use set to 0.
345 */
346 qdf_mem_zero(soc->peer_id_to_obj_map, peer_map_size);
347
348 qdf_spinlock_create(&soc->peer_map_lock);
349 return QDF_STATUS_SUCCESS; /* success */
350 }
351
352 #define DP_AST_HASH_LOAD_MULT 2
353 #define DP_AST_HASH_LOAD_SHIFT 0
354
355 static inline uint32_t
dp_peer_find_hash_index(struct dp_soc * soc,union dp_align_mac_addr * mac_addr)356 dp_peer_find_hash_index(struct dp_soc *soc,
357 union dp_align_mac_addr *mac_addr)
358 {
359 uint32_t index;
360
361 index =
362 mac_addr->align2.bytes_ab ^
363 mac_addr->align2.bytes_cd ^
364 mac_addr->align2.bytes_ef;
365
366 index ^= index >> soc->peer_hash.idx_bits;
367 index &= soc->peer_hash.mask;
368 return index;
369 }
370
dp_peer_find_hash_find(struct dp_soc * soc,uint8_t * peer_mac_addr,int mac_addr_is_aligned,uint8_t vdev_id,enum dp_mod_id mod_id)371 struct dp_peer *dp_peer_find_hash_find(
372 struct dp_soc *soc, uint8_t *peer_mac_addr,
373 int mac_addr_is_aligned, uint8_t vdev_id,
374 enum dp_mod_id mod_id)
375 {
376 union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
377 uint32_t index;
378 struct dp_peer *peer;
379
380 if (!soc->peer_hash.bins)
381 return NULL;
382
383 if (mac_addr_is_aligned) {
384 mac_addr = (union dp_align_mac_addr *)peer_mac_addr;
385 } else {
386 qdf_mem_copy(
387 &local_mac_addr_aligned.raw[0],
388 peer_mac_addr, QDF_MAC_ADDR_SIZE);
389 mac_addr = &local_mac_addr_aligned;
390 }
391 index = dp_peer_find_hash_index(soc, mac_addr);
392 qdf_spin_lock_bh(&soc->peer_hash_lock);
393 TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) {
394 if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 &&
395 ((peer->vdev->vdev_id == vdev_id) ||
396 (vdev_id == DP_VDEV_ALL))) {
397 /* take peer reference before returning */
398 if (dp_peer_get_ref(soc, peer, mod_id) !=
399 QDF_STATUS_SUCCESS)
400 peer = NULL;
401
402 qdf_spin_unlock_bh(&soc->peer_hash_lock);
403 return peer;
404 }
405 }
406 qdf_spin_unlock_bh(&soc->peer_hash_lock);
407 return NULL; /* failure */
408 }
409
410 qdf_export_symbol(dp_peer_find_hash_find);
411
412 #ifdef WLAN_FEATURE_11BE_MLO
413 /**
414 * dp_peer_find_hash_detach() - cleanup memory for peer_hash table
415 * @soc: soc handle
416 *
417 * return: none
418 */
dp_peer_find_hash_detach(struct dp_soc * soc)419 static void dp_peer_find_hash_detach(struct dp_soc *soc)
420 {
421 if (soc->peer_hash.bins) {
422 qdf_mem_free(soc->peer_hash.bins);
423 soc->peer_hash.bins = NULL;
424 qdf_spinlock_destroy(&soc->peer_hash_lock);
425 }
426
427 if (soc->arch_ops.mlo_peer_find_hash_detach)
428 soc->arch_ops.mlo_peer_find_hash_detach(soc);
429 }
430
431 /**
432 * dp_peer_find_hash_attach() - allocate memory for peer_hash table
433 * @soc: soc handle
434 *
435 * return: QDF_STATUS
436 */
dp_peer_find_hash_attach(struct dp_soc * soc)437 static QDF_STATUS dp_peer_find_hash_attach(struct dp_soc *soc)
438 {
439 int i, hash_elems, log2;
440
441 /* allocate the peer MAC address -> peer object hash table */
442 hash_elems = soc->max_peers;
443 hash_elems *= DP_PEER_HASH_LOAD_MULT;
444 hash_elems >>= DP_PEER_HASH_LOAD_SHIFT;
445 log2 = dp_log2_ceil(hash_elems);
446 hash_elems = 1 << log2;
447
448 soc->peer_hash.mask = hash_elems - 1;
449 soc->peer_hash.idx_bits = log2;
450 /* allocate an array of TAILQ peer object lists */
451 soc->peer_hash.bins = qdf_mem_malloc(
452 hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q, dp_peer)));
453 if (!soc->peer_hash.bins)
454 return QDF_STATUS_E_NOMEM;
455
456 for (i = 0; i < hash_elems; i++)
457 TAILQ_INIT(&soc->peer_hash.bins[i]);
458
459 qdf_spinlock_create(&soc->peer_hash_lock);
460
461 if (soc->arch_ops.mlo_peer_find_hash_attach &&
462 (soc->arch_ops.mlo_peer_find_hash_attach(soc) !=
463 QDF_STATUS_SUCCESS)) {
464 dp_peer_find_hash_detach(soc);
465 return QDF_STATUS_E_NOMEM;
466 }
467 return QDF_STATUS_SUCCESS;
468 }
469
dp_peer_find_hash_add(struct dp_soc * soc,struct dp_peer * peer)470 void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer)
471 {
472 unsigned index;
473
474 index = dp_peer_find_hash_index(soc, &peer->mac_addr);
475 if (peer->peer_type == CDP_LINK_PEER_TYPE) {
476 qdf_spin_lock_bh(&soc->peer_hash_lock);
477
478 if (QDF_IS_STATUS_ERROR(dp_peer_get_ref(soc, peer,
479 DP_MOD_ID_CONFIG))) {
480 dp_err("fail to get peer ref:" QDF_MAC_ADDR_FMT,
481 QDF_MAC_ADDR_REF(peer->mac_addr.raw));
482 qdf_spin_unlock_bh(&soc->peer_hash_lock);
483 return;
484 }
485
486 /*
487 * It is important to add the new peer at the tail of
488 * peer list with the bin index. Together with having
489 * the hash_find function search from head to tail,
490 * this ensures that if two entries with the same MAC address
491 * are stored, the one added first will be found first.
492 */
493 TAILQ_INSERT_TAIL(&soc->peer_hash.bins[index], peer,
494 hash_list_elem);
495
496 qdf_spin_unlock_bh(&soc->peer_hash_lock);
497 } else if (peer->peer_type == CDP_MLD_PEER_TYPE) {
498 if (soc->arch_ops.mlo_peer_find_hash_add)
499 soc->arch_ops.mlo_peer_find_hash_add(soc, peer);
500 } else {
501 dp_err("unknown peer type %d", peer->peer_type);
502 }
503 }
504
dp_peer_find_hash_remove(struct dp_soc * soc,struct dp_peer * peer)505 void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer)
506 {
507 unsigned index;
508 struct dp_peer *tmppeer = NULL;
509 int found = 0;
510
511 index = dp_peer_find_hash_index(soc, &peer->mac_addr);
512
513 if (peer->peer_type == CDP_LINK_PEER_TYPE) {
514 /* Check if tail is not empty before delete*/
515 QDF_ASSERT(!TAILQ_EMPTY(&soc->peer_hash.bins[index]));
516
517 qdf_spin_lock_bh(&soc->peer_hash_lock);
518 TAILQ_FOREACH(tmppeer, &soc->peer_hash.bins[index],
519 hash_list_elem) {
520 if (tmppeer == peer) {
521 found = 1;
522 break;
523 }
524 }
525 QDF_ASSERT(found);
526 TAILQ_REMOVE(&soc->peer_hash.bins[index], peer,
527 hash_list_elem);
528
529 dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
530 qdf_spin_unlock_bh(&soc->peer_hash_lock);
531 } else if (peer->peer_type == CDP_MLD_PEER_TYPE) {
532 if (soc->arch_ops.mlo_peer_find_hash_remove)
533 soc->arch_ops.mlo_peer_find_hash_remove(soc, peer);
534 } else {
535 dp_err("unknown peer type %d", peer->peer_type);
536 }
537 }
538
dp_get_peer_link_id(struct dp_peer * peer)539 uint8_t dp_get_peer_link_id(struct dp_peer *peer)
540 {
541 uint8_t link_id;
542
543 link_id = IS_MLO_DP_LINK_PEER(peer) ? peer->link_id + 1 : 0;
544 if (link_id < 1 || link_id > DP_MAX_MLO_LINKS)
545 link_id = 0;
546
547 return link_id;
548 }
549 #else
dp_peer_find_hash_attach(struct dp_soc * soc)550 static QDF_STATUS dp_peer_find_hash_attach(struct dp_soc *soc)
551 {
552 int i, hash_elems, log2;
553
554 /* allocate the peer MAC address -> peer object hash table */
555 hash_elems = soc->max_peers;
556 hash_elems *= DP_PEER_HASH_LOAD_MULT;
557 hash_elems >>= DP_PEER_HASH_LOAD_SHIFT;
558 log2 = dp_log2_ceil(hash_elems);
559 hash_elems = 1 << log2;
560
561 soc->peer_hash.mask = hash_elems - 1;
562 soc->peer_hash.idx_bits = log2;
563 /* allocate an array of TAILQ peer object lists */
564 soc->peer_hash.bins = qdf_mem_malloc(
565 hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q, dp_peer)));
566 if (!soc->peer_hash.bins)
567 return QDF_STATUS_E_NOMEM;
568
569 for (i = 0; i < hash_elems; i++)
570 TAILQ_INIT(&soc->peer_hash.bins[i]);
571
572 qdf_spinlock_create(&soc->peer_hash_lock);
573 return QDF_STATUS_SUCCESS;
574 }
575
dp_peer_find_hash_detach(struct dp_soc * soc)576 static void dp_peer_find_hash_detach(struct dp_soc *soc)
577 {
578 if (soc->peer_hash.bins) {
579 qdf_mem_free(soc->peer_hash.bins);
580 soc->peer_hash.bins = NULL;
581 qdf_spinlock_destroy(&soc->peer_hash_lock);
582 }
583 }
584
dp_peer_find_hash_add(struct dp_soc * soc,struct dp_peer * peer)585 void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer)
586 {
587 unsigned index;
588
589 index = dp_peer_find_hash_index(soc, &peer->mac_addr);
590 qdf_spin_lock_bh(&soc->peer_hash_lock);
591
592 if (QDF_IS_STATUS_ERROR(dp_peer_get_ref(soc, peer, DP_MOD_ID_CONFIG))) {
593 dp_err("unable to get peer ref at MAP mac: "QDF_MAC_ADDR_FMT,
594 QDF_MAC_ADDR_REF(peer->mac_addr.raw));
595 qdf_spin_unlock_bh(&soc->peer_hash_lock);
596 return;
597 }
598
599 /*
600 * It is important to add the new peer at the tail of the peer list
601 * with the bin index. Together with having the hash_find function
602 * search from head to tail, this ensures that if two entries with
603 * the same MAC address are stored, the one added first will be
604 * found first.
605 */
606 TAILQ_INSERT_TAIL(&soc->peer_hash.bins[index], peer, hash_list_elem);
607
608 qdf_spin_unlock_bh(&soc->peer_hash_lock);
609 }
610
dp_peer_find_hash_remove(struct dp_soc * soc,struct dp_peer * peer)611 void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer)
612 {
613 unsigned index;
614 struct dp_peer *tmppeer = NULL;
615 int found = 0;
616
617 index = dp_peer_find_hash_index(soc, &peer->mac_addr);
618 /* Check if tail is not empty before delete*/
619 QDF_ASSERT(!TAILQ_EMPTY(&soc->peer_hash.bins[index]));
620
621 qdf_spin_lock_bh(&soc->peer_hash_lock);
622 TAILQ_FOREACH(tmppeer, &soc->peer_hash.bins[index], hash_list_elem) {
623 if (tmppeer == peer) {
624 found = 1;
625 break;
626 }
627 }
628 QDF_ASSERT(found);
629 TAILQ_REMOVE(&soc->peer_hash.bins[index], peer, hash_list_elem);
630
631 dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
632 qdf_spin_unlock_bh(&soc->peer_hash_lock);
633 }
634
635
636 #endif/* WLAN_FEATURE_11BE_MLO */
637
dp_peer_vdev_list_add(struct dp_soc * soc,struct dp_vdev * vdev,struct dp_peer * peer)638 void dp_peer_vdev_list_add(struct dp_soc *soc, struct dp_vdev *vdev,
639 struct dp_peer *peer)
640 {
641 /* only link peer will be added to vdev peer list */
642 if (IS_MLO_DP_MLD_PEER(peer))
643 return;
644
645 qdf_spin_lock_bh(&vdev->peer_list_lock);
646 if (QDF_IS_STATUS_ERROR(dp_peer_get_ref(soc, peer, DP_MOD_ID_CONFIG))) {
647 dp_err("unable to get peer ref at MAP mac: "QDF_MAC_ADDR_FMT,
648 QDF_MAC_ADDR_REF(peer->mac_addr.raw));
649 qdf_spin_unlock_bh(&vdev->peer_list_lock);
650 return;
651 }
652
653 /* add this peer into the vdev's list */
654 if (wlan_op_mode_sta == vdev->opmode)
655 TAILQ_INSERT_HEAD(&vdev->peer_list, peer, peer_list_elem);
656 else
657 TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
658
659 vdev->num_peers++;
660 qdf_spin_unlock_bh(&vdev->peer_list_lock);
661 }
662
dp_peer_vdev_list_remove(struct dp_soc * soc,struct dp_vdev * vdev,struct dp_peer * peer)663 void dp_peer_vdev_list_remove(struct dp_soc *soc, struct dp_vdev *vdev,
664 struct dp_peer *peer)
665 {
666 uint8_t found = 0;
667 struct dp_peer *tmppeer = NULL;
668
669 /* only link peer will be added to vdev peer list */
670 if (IS_MLO_DP_MLD_PEER(peer))
671 return;
672
673 qdf_spin_lock_bh(&vdev->peer_list_lock);
674 TAILQ_FOREACH(tmppeer, &peer->vdev->peer_list, peer_list_elem) {
675 if (tmppeer == peer) {
676 found = 1;
677 break;
678 }
679 }
680
681 if (found) {
682 TAILQ_REMOVE(&peer->vdev->peer_list, peer,
683 peer_list_elem);
684 dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
685 vdev->num_peers--;
686 } else {
687 /*Ignoring the remove operation as peer not found*/
688 dp_peer_debug("%pK: peer:%pK not found in vdev:%pK peerlist:%pK"
689 , soc, peer, vdev, &peer->vdev->peer_list);
690 }
691 qdf_spin_unlock_bh(&vdev->peer_list_lock);
692 }
693
dp_txrx_peer_attach_add(struct dp_soc * soc,struct dp_peer * peer,struct dp_txrx_peer * txrx_peer)694 void dp_txrx_peer_attach_add(struct dp_soc *soc,
695 struct dp_peer *peer,
696 struct dp_txrx_peer *txrx_peer)
697 {
698 qdf_spin_lock_bh(&soc->peer_map_lock);
699
700 peer->txrx_peer = txrx_peer;
701 txrx_peer->bss_peer = peer->bss_peer;
702
703 if (peer->peer_id == HTT_INVALID_PEER) {
704 qdf_spin_unlock_bh(&soc->peer_map_lock);
705 return;
706 }
707
708 txrx_peer->peer_id = peer->peer_id;
709
710 QDF_ASSERT(soc->peer_id_to_obj_map[peer->peer_id]);
711
712 qdf_spin_unlock_bh(&soc->peer_map_lock);
713 }
714
dp_peer_find_id_to_obj_add(struct dp_soc * soc,struct dp_peer * peer,uint16_t peer_id)715 void dp_peer_find_id_to_obj_add(struct dp_soc *soc,
716 struct dp_peer *peer,
717 uint16_t peer_id)
718 {
719 QDF_ASSERT(peer_id <= soc->max_peer_id);
720
721 qdf_spin_lock_bh(&soc->peer_map_lock);
722
723 peer->peer_id = peer_id;
724
725 if (QDF_IS_STATUS_ERROR(dp_peer_get_ref(soc, peer, DP_MOD_ID_CONFIG))) {
726 dp_err("unable to get peer ref at MAP mac: "QDF_MAC_ADDR_FMT" peer_id %u",
727 QDF_MAC_ADDR_REF(peer->mac_addr.raw), peer_id);
728 qdf_spin_unlock_bh(&soc->peer_map_lock);
729 return;
730 }
731
732 if (!soc->peer_id_to_obj_map[peer_id]) {
733 soc->peer_id_to_obj_map[peer_id] = peer;
734 if (peer->txrx_peer)
735 peer->txrx_peer->peer_id = peer_id;
736 } else {
737 /* Peer map event came for peer_id which
738 * is already mapped, this is not expected
739 */
740 dp_err("peer %pK(" QDF_MAC_ADDR_FMT ")map failed, id %d mapped "
741 "to peer %pK, Stats: peer(map %u unmap %u "
742 "invalid unmap %u) mld per(map %u unmap %u)",
743 peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw), peer_id,
744 soc->peer_id_to_obj_map[peer_id],
745 soc->stats.t2h_msg_stats.peer_map,
746 (soc->stats.t2h_msg_stats.peer_unmap -
747 soc->stats.t2h_msg_stats.ml_peer_unmap),
748 soc->stats.t2h_msg_stats.invalid_peer_unmap,
749 soc->stats.t2h_msg_stats.ml_peer_map,
750 soc->stats.t2h_msg_stats.ml_peer_unmap);
751 dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
752 qdf_assert_always(0);
753 }
754 qdf_spin_unlock_bh(&soc->peer_map_lock);
755 }
756
dp_peer_find_id_to_obj_remove(struct dp_soc * soc,uint16_t peer_id)757 void dp_peer_find_id_to_obj_remove(struct dp_soc *soc,
758 uint16_t peer_id)
759 {
760 struct dp_peer *peer = NULL;
761 QDF_ASSERT(peer_id <= soc->max_peer_id);
762
763 qdf_spin_lock_bh(&soc->peer_map_lock);
764 peer = soc->peer_id_to_obj_map[peer_id];
765 if (!peer) {
766 dp_err("unable to get peer during peer id obj map remove");
767 qdf_spin_unlock_bh(&soc->peer_map_lock);
768 return;
769 }
770 peer->peer_id = HTT_INVALID_PEER;
771 if (peer->txrx_peer)
772 peer->txrx_peer->peer_id = HTT_INVALID_PEER;
773 soc->peer_id_to_obj_map[peer_id] = NULL;
774 dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
775 qdf_spin_unlock_bh(&soc->peer_map_lock);
776 }
777
778 #ifdef FEATURE_MEC
dp_peer_mec_hash_attach(struct dp_soc * soc)779 QDF_STATUS dp_peer_mec_hash_attach(struct dp_soc *soc)
780 {
781 int log2, hash_elems, i;
782
783 log2 = dp_log2_ceil(DP_PEER_MAX_MEC_IDX);
784 hash_elems = 1 << log2;
785
786 soc->mec_hash.mask = hash_elems - 1;
787 soc->mec_hash.idx_bits = log2;
788
789 dp_peer_info("%pK: max mec index: %d",
790 soc, DP_PEER_MAX_MEC_IDX);
791
792 /* allocate an array of TAILQ mec object lists */
793 soc->mec_hash.bins = qdf_mem_malloc(hash_elems *
794 sizeof(TAILQ_HEAD(anonymous_tail_q,
795 dp_mec_entry)));
796
797 if (!soc->mec_hash.bins)
798 return QDF_STATUS_E_NOMEM;
799
800 for (i = 0; i < hash_elems; i++)
801 TAILQ_INIT(&soc->mec_hash.bins[i]);
802
803 return QDF_STATUS_SUCCESS;
804 }
805
806 /**
807 * dp_peer_mec_hash_index() - Compute the MEC hash from MAC address
808 * @soc: SoC handle
809 * @mac_addr: MAC address
810 *
811 * Return: MEC hash
812 */
dp_peer_mec_hash_index(struct dp_soc * soc,union dp_align_mac_addr * mac_addr)813 static inline uint32_t dp_peer_mec_hash_index(struct dp_soc *soc,
814 union dp_align_mac_addr *mac_addr)
815 {
816 uint32_t index;
817
818 index =
819 mac_addr->align2.bytes_ab ^
820 mac_addr->align2.bytes_cd ^
821 mac_addr->align2.bytes_ef;
822 index ^= index >> soc->mec_hash.idx_bits;
823 index &= soc->mec_hash.mask;
824 return index;
825 }
826
dp_peer_mec_hash_find_by_pdevid(struct dp_soc * soc,uint8_t pdev_id,uint8_t * mec_mac_addr)827 struct dp_mec_entry *dp_peer_mec_hash_find_by_pdevid(struct dp_soc *soc,
828 uint8_t pdev_id,
829 uint8_t *mec_mac_addr)
830 {
831 union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
832 uint32_t index;
833 struct dp_mec_entry *mecentry;
834
835 qdf_mem_copy(&local_mac_addr_aligned.raw[0],
836 mec_mac_addr, QDF_MAC_ADDR_SIZE);
837 mac_addr = &local_mac_addr_aligned;
838
839 index = dp_peer_mec_hash_index(soc, mac_addr);
840 TAILQ_FOREACH(mecentry, &soc->mec_hash.bins[index], hash_list_elem) {
841 if ((pdev_id == mecentry->pdev_id) &&
842 !dp_peer_find_mac_addr_cmp(mac_addr, &mecentry->mac_addr))
843 return mecentry;
844 }
845
846 return NULL;
847 }
848
849 /**
850 * dp_peer_mec_hash_add() - Add MEC entry into hash table
851 * @soc: SoC handle
852 * @mecentry: MEC entry
853 *
854 * This function adds the MEC entry into SoC MEC hash table
855 *
856 * Return: None
857 */
dp_peer_mec_hash_add(struct dp_soc * soc,struct dp_mec_entry * mecentry)858 static inline void dp_peer_mec_hash_add(struct dp_soc *soc,
859 struct dp_mec_entry *mecentry)
860 {
861 uint32_t index;
862
863 index = dp_peer_mec_hash_index(soc, &mecentry->mac_addr);
864 qdf_spin_lock_bh(&soc->mec_lock);
865 TAILQ_INSERT_TAIL(&soc->mec_hash.bins[index], mecentry, hash_list_elem);
866 qdf_spin_unlock_bh(&soc->mec_lock);
867 }
868
dp_peer_mec_add_entry(struct dp_soc * soc,struct dp_vdev * vdev,uint8_t * mac_addr)869 QDF_STATUS dp_peer_mec_add_entry(struct dp_soc *soc,
870 struct dp_vdev *vdev,
871 uint8_t *mac_addr)
872 {
873 struct dp_mec_entry *mecentry = NULL;
874 struct dp_pdev *pdev = NULL;
875
876 if (!vdev) {
877 dp_peer_err("%pK: Peers vdev is NULL", soc);
878 return QDF_STATUS_E_INVAL;
879 }
880
881 pdev = vdev->pdev;
882
883 if (qdf_unlikely(qdf_atomic_read(&soc->mec_cnt) >=
884 DP_PEER_MAX_MEC_ENTRY)) {
885 dp_peer_warn("%pK: max MEC entry limit reached mac_addr: "
886 QDF_MAC_ADDR_FMT, soc, QDF_MAC_ADDR_REF(mac_addr));
887 return QDF_STATUS_E_NOMEM;
888 }
889
890 qdf_spin_lock_bh(&soc->mec_lock);
891 mecentry = dp_peer_mec_hash_find_by_pdevid(soc, pdev->pdev_id,
892 mac_addr);
893 if (qdf_likely(mecentry)) {
894 mecentry->is_active = TRUE;
895 qdf_spin_unlock_bh(&soc->mec_lock);
896 return QDF_STATUS_E_ALREADY;
897 }
898
899 qdf_spin_unlock_bh(&soc->mec_lock);
900
901 dp_peer_debug("%pK: pdevid: %u vdev: %u type: MEC mac_addr: "
902 QDF_MAC_ADDR_FMT,
903 soc, pdev->pdev_id, vdev->vdev_id,
904 QDF_MAC_ADDR_REF(mac_addr));
905
906 mecentry = (struct dp_mec_entry *)
907 qdf_mem_malloc(sizeof(struct dp_mec_entry));
908
909 if (qdf_unlikely(!mecentry)) {
910 dp_peer_err("%pK: fail to allocate mecentry", soc);
911 return QDF_STATUS_E_NOMEM;
912 }
913
914 qdf_copy_macaddr((struct qdf_mac_addr *)&mecentry->mac_addr.raw[0],
915 (struct qdf_mac_addr *)mac_addr);
916 mecentry->pdev_id = pdev->pdev_id;
917 mecentry->vdev_id = vdev->vdev_id;
918 mecentry->is_active = TRUE;
919 dp_peer_mec_hash_add(soc, mecentry);
920
921 qdf_atomic_inc(&soc->mec_cnt);
922 DP_STATS_INC(soc, mec.added, 1);
923
924 return QDF_STATUS_SUCCESS;
925 }
926
dp_peer_mec_detach_entry(struct dp_soc * soc,struct dp_mec_entry * mecentry,void * ptr)927 void dp_peer_mec_detach_entry(struct dp_soc *soc, struct dp_mec_entry *mecentry,
928 void *ptr)
929 {
930 uint32_t index = dp_peer_mec_hash_index(soc, &mecentry->mac_addr);
931
932 TAILQ_HEAD(, dp_mec_entry) * free_list = ptr;
933
934 TAILQ_REMOVE(&soc->mec_hash.bins[index], mecentry,
935 hash_list_elem);
936 TAILQ_INSERT_TAIL(free_list, mecentry, hash_list_elem);
937 }
938
dp_peer_mec_free_list(struct dp_soc * soc,void * ptr)939 void dp_peer_mec_free_list(struct dp_soc *soc, void *ptr)
940 {
941 struct dp_mec_entry *mecentry, *mecentry_next;
942
943 TAILQ_HEAD(, dp_mec_entry) * free_list = ptr;
944
945 TAILQ_FOREACH_SAFE(mecentry, free_list, hash_list_elem,
946 mecentry_next) {
947 dp_peer_debug("%pK: MEC delete for mac_addr " QDF_MAC_ADDR_FMT,
948 soc, QDF_MAC_ADDR_REF(&mecentry->mac_addr));
949 qdf_mem_free(mecentry);
950 qdf_atomic_dec(&soc->mec_cnt);
951 DP_STATS_INC(soc, mec.deleted, 1);
952 }
953 }
954
dp_peer_mec_hash_detach(struct dp_soc * soc)955 void dp_peer_mec_hash_detach(struct dp_soc *soc)
956 {
957 dp_peer_mec_flush_entries(soc);
958 qdf_mem_free(soc->mec_hash.bins);
959 soc->mec_hash.bins = NULL;
960 }
961
dp_peer_mec_spinlock_destroy(struct dp_soc * soc)962 void dp_peer_mec_spinlock_destroy(struct dp_soc *soc)
963 {
964 qdf_spinlock_destroy(&soc->mec_lock);
965 }
966
dp_peer_mec_spinlock_create(struct dp_soc * soc)967 void dp_peer_mec_spinlock_create(struct dp_soc *soc)
968 {
969 qdf_spinlock_create(&soc->mec_lock);
970 }
971 #else
dp_peer_mec_hash_attach(struct dp_soc * soc)972 QDF_STATUS dp_peer_mec_hash_attach(struct dp_soc *soc)
973 {
974 return QDF_STATUS_SUCCESS;
975 }
976
dp_peer_mec_hash_detach(struct dp_soc * soc)977 void dp_peer_mec_hash_detach(struct dp_soc *soc)
978 {
979 }
980 #endif
981
982 #ifdef FEATURE_AST
983 #ifdef WLAN_FEATURE_11BE_MLO
984 /**
985 * dp_peer_exist_on_pdev() - check if peer with mac address exist on pdev
986 *
987 * @soc: Datapath SOC handle
988 * @peer_mac_addr: peer mac address
989 * @mac_addr_is_aligned: is mac address aligned
990 * @pdev: Datapath PDEV handle
991 *
992 * Return: true if peer found else return false
993 */
dp_peer_exist_on_pdev(struct dp_soc * soc,uint8_t * peer_mac_addr,int mac_addr_is_aligned,struct dp_pdev * pdev)994 static bool dp_peer_exist_on_pdev(struct dp_soc *soc,
995 uint8_t *peer_mac_addr,
996 int mac_addr_is_aligned,
997 struct dp_pdev *pdev)
998 {
999 union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
1000 unsigned int index;
1001 struct dp_peer *peer;
1002 bool found = false;
1003
1004 if (mac_addr_is_aligned) {
1005 mac_addr = (union dp_align_mac_addr *)peer_mac_addr;
1006 } else {
1007 qdf_mem_copy(
1008 &local_mac_addr_aligned.raw[0],
1009 peer_mac_addr, QDF_MAC_ADDR_SIZE);
1010 mac_addr = &local_mac_addr_aligned;
1011 }
1012 index = dp_peer_find_hash_index(soc, mac_addr);
1013 qdf_spin_lock_bh(&soc->peer_hash_lock);
1014 TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) {
1015 if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 &&
1016 (peer->vdev->pdev == pdev)) {
1017 found = true;
1018 break;
1019 }
1020 }
1021 qdf_spin_unlock_bh(&soc->peer_hash_lock);
1022
1023 if (found)
1024 return found;
1025
1026 peer = dp_mld_peer_find_hash_find(soc, peer_mac_addr,
1027 mac_addr_is_aligned, DP_VDEV_ALL,
1028 DP_MOD_ID_CDP);
1029 if (peer) {
1030 if (peer->vdev->pdev == pdev)
1031 found = true;
1032 dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
1033 }
1034
1035 return found;
1036 }
1037 #else
dp_peer_exist_on_pdev(struct dp_soc * soc,uint8_t * peer_mac_addr,int mac_addr_is_aligned,struct dp_pdev * pdev)1038 static bool dp_peer_exist_on_pdev(struct dp_soc *soc,
1039 uint8_t *peer_mac_addr,
1040 int mac_addr_is_aligned,
1041 struct dp_pdev *pdev)
1042 {
1043 union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
1044 unsigned int index;
1045 struct dp_peer *peer;
1046 bool found = false;
1047
1048 if (mac_addr_is_aligned) {
1049 mac_addr = (union dp_align_mac_addr *)peer_mac_addr;
1050 } else {
1051 qdf_mem_copy(
1052 &local_mac_addr_aligned.raw[0],
1053 peer_mac_addr, QDF_MAC_ADDR_SIZE);
1054 mac_addr = &local_mac_addr_aligned;
1055 }
1056 index = dp_peer_find_hash_index(soc, mac_addr);
1057 qdf_spin_lock_bh(&soc->peer_hash_lock);
1058 TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) {
1059 if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 &&
1060 (peer->vdev->pdev == pdev)) {
1061 found = true;
1062 break;
1063 }
1064 }
1065 qdf_spin_unlock_bh(&soc->peer_hash_lock);
1066 return found;
1067 }
1068 #endif /* WLAN_FEATURE_11BE_MLO */
1069
dp_peer_ast_hash_attach(struct dp_soc * soc)1070 QDF_STATUS dp_peer_ast_hash_attach(struct dp_soc *soc)
1071 {
1072 int i, hash_elems, log2;
1073 unsigned int max_ast_idx = wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx);
1074
1075 hash_elems = ((max_ast_idx * DP_AST_HASH_LOAD_MULT) >>
1076 DP_AST_HASH_LOAD_SHIFT);
1077
1078 log2 = dp_log2_ceil(hash_elems);
1079 hash_elems = 1 << log2;
1080
1081 soc->ast_hash.mask = hash_elems - 1;
1082 soc->ast_hash.idx_bits = log2;
1083
1084 dp_peer_info("%pK: ast hash_elems: %d, max_ast_idx: %d",
1085 soc, hash_elems, max_ast_idx);
1086
1087 /* allocate an array of TAILQ peer object lists */
1088 soc->ast_hash.bins = qdf_mem_malloc(
1089 hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q,
1090 dp_ast_entry)));
1091
1092 if (!soc->ast_hash.bins)
1093 return QDF_STATUS_E_NOMEM;
1094
1095 for (i = 0; i < hash_elems; i++)
1096 TAILQ_INIT(&soc->ast_hash.bins[i]);
1097
1098 return QDF_STATUS_SUCCESS;
1099 }
1100
1101 /**
1102 * dp_peer_ast_cleanup() - cleanup the references
1103 * @soc: SoC handle
1104 * @ast: ast entry
1105 *
1106 * Return: None
1107 */
dp_peer_ast_cleanup(struct dp_soc * soc,struct dp_ast_entry * ast)1108 static inline void dp_peer_ast_cleanup(struct dp_soc *soc,
1109 struct dp_ast_entry *ast)
1110 {
1111 txrx_ast_free_cb cb = ast->callback;
1112 void *cookie = ast->cookie;
1113
1114 dp_peer_debug("mac_addr: " QDF_MAC_ADDR_FMT ", cb: %pK, cookie: %pK",
1115 QDF_MAC_ADDR_REF(ast->mac_addr.raw), cb, cookie);
1116
1117 /* Call the callbacks to free up the cookie */
1118 if (cb) {
1119 ast->callback = NULL;
1120 ast->cookie = NULL;
1121 cb(soc->ctrl_psoc,
1122 dp_soc_to_cdp_soc(soc),
1123 cookie,
1124 CDP_TXRX_AST_DELETE_IN_PROGRESS);
1125 }
1126 }
1127
dp_peer_ast_hash_detach(struct dp_soc * soc)1128 void dp_peer_ast_hash_detach(struct dp_soc *soc)
1129 {
1130 unsigned int index;
1131 struct dp_ast_entry *ast, *ast_next;
1132
1133 if (!soc->ast_hash.mask)
1134 return;
1135
1136 if (!soc->ast_hash.bins)
1137 return;
1138
1139 dp_peer_debug("%pK: num_ast_entries: %u", soc, soc->num_ast_entries);
1140
1141 qdf_spin_lock_bh(&soc->ast_lock);
1142 for (index = 0; index <= soc->ast_hash.mask; index++) {
1143 if (!TAILQ_EMPTY(&soc->ast_hash.bins[index])) {
1144 TAILQ_FOREACH_SAFE(ast, &soc->ast_hash.bins[index],
1145 hash_list_elem, ast_next) {
1146 TAILQ_REMOVE(&soc->ast_hash.bins[index], ast,
1147 hash_list_elem);
1148 dp_peer_ast_cleanup(soc, ast);
1149 soc->num_ast_entries--;
1150 qdf_mem_free(ast);
1151 }
1152 }
1153 }
1154 qdf_spin_unlock_bh(&soc->ast_lock);
1155
1156 qdf_mem_free(soc->ast_hash.bins);
1157 soc->ast_hash.bins = NULL;
1158 }
1159
1160 /**
1161 * dp_peer_ast_hash_index() - Compute the AST hash from MAC address
1162 * @soc: SoC handle
1163 * @mac_addr: MAC address
1164 *
1165 * Return: AST hash
1166 */
dp_peer_ast_hash_index(struct dp_soc * soc,union dp_align_mac_addr * mac_addr)1167 static inline uint32_t dp_peer_ast_hash_index(struct dp_soc *soc,
1168 union dp_align_mac_addr *mac_addr)
1169 {
1170 uint32_t index;
1171
1172 index =
1173 mac_addr->align2.bytes_ab ^
1174 mac_addr->align2.bytes_cd ^
1175 mac_addr->align2.bytes_ef;
1176 index ^= index >> soc->ast_hash.idx_bits;
1177 index &= soc->ast_hash.mask;
1178 return index;
1179 }
1180
1181 /**
1182 * dp_peer_ast_hash_add() - Add AST entry into hash table
1183 * @soc: SoC handle
1184 * @ase: AST entry
1185 *
1186 * This function adds the AST entry into SoC AST hash table
1187 * It assumes caller has taken the ast lock to protect the access to this table
1188 *
1189 * Return: None
1190 */
dp_peer_ast_hash_add(struct dp_soc * soc,struct dp_ast_entry * ase)1191 static inline void dp_peer_ast_hash_add(struct dp_soc *soc,
1192 struct dp_ast_entry *ase)
1193 {
1194 uint32_t index;
1195
1196 index = dp_peer_ast_hash_index(soc, &ase->mac_addr);
1197 TAILQ_INSERT_TAIL(&soc->ast_hash.bins[index], ase, hash_list_elem);
1198 }
1199
dp_peer_ast_hash_remove(struct dp_soc * soc,struct dp_ast_entry * ase)1200 void dp_peer_ast_hash_remove(struct dp_soc *soc,
1201 struct dp_ast_entry *ase)
1202 {
1203 unsigned index;
1204 struct dp_ast_entry *tmpase;
1205 int found = 0;
1206
1207 if (soc->ast_offload_support && !soc->host_ast_db_enable)
1208 return;
1209
1210 index = dp_peer_ast_hash_index(soc, &ase->mac_addr);
1211 /* Check if tail is not empty before delete*/
1212 QDF_ASSERT(!TAILQ_EMPTY(&soc->ast_hash.bins[index]));
1213
1214 dp_peer_debug("ID: %u idx: %u mac_addr: " QDF_MAC_ADDR_FMT,
1215 ase->peer_id, index, QDF_MAC_ADDR_REF(ase->mac_addr.raw));
1216
1217 TAILQ_FOREACH(tmpase, &soc->ast_hash.bins[index], hash_list_elem) {
1218 if (tmpase == ase) {
1219 found = 1;
1220 break;
1221 }
1222 }
1223
1224 QDF_ASSERT(found);
1225
1226 if (found)
1227 TAILQ_REMOVE(&soc->ast_hash.bins[index], ase, hash_list_elem);
1228 }
1229
dp_peer_ast_hash_find_by_vdevid(struct dp_soc * soc,uint8_t * ast_mac_addr,uint8_t vdev_id)1230 struct dp_ast_entry *dp_peer_ast_hash_find_by_vdevid(struct dp_soc *soc,
1231 uint8_t *ast_mac_addr,
1232 uint8_t vdev_id)
1233 {
1234 union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
1235 uint32_t index;
1236 struct dp_ast_entry *ase;
1237
1238 qdf_mem_copy(&local_mac_addr_aligned.raw[0],
1239 ast_mac_addr, QDF_MAC_ADDR_SIZE);
1240 mac_addr = &local_mac_addr_aligned;
1241
1242 index = dp_peer_ast_hash_index(soc, mac_addr);
1243 TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
1244 if ((vdev_id == ase->vdev_id) &&
1245 !dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr)) {
1246 return ase;
1247 }
1248 }
1249
1250 return NULL;
1251 }
1252
dp_peer_ast_hash_find_by_pdevid(struct dp_soc * soc,uint8_t * ast_mac_addr,uint8_t pdev_id)1253 struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
1254 uint8_t *ast_mac_addr,
1255 uint8_t pdev_id)
1256 {
1257 union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
1258 uint32_t index;
1259 struct dp_ast_entry *ase;
1260
1261 qdf_mem_copy(&local_mac_addr_aligned.raw[0],
1262 ast_mac_addr, QDF_MAC_ADDR_SIZE);
1263 mac_addr = &local_mac_addr_aligned;
1264
1265 index = dp_peer_ast_hash_index(soc, mac_addr);
1266 TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
1267 if ((pdev_id == ase->pdev_id) &&
1268 !dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr)) {
1269 return ase;
1270 }
1271 }
1272
1273 return NULL;
1274 }
1275
dp_peer_ast_hash_find_soc(struct dp_soc * soc,uint8_t * ast_mac_addr)1276 struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
1277 uint8_t *ast_mac_addr)
1278 {
1279 union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
1280 unsigned index;
1281 struct dp_ast_entry *ase;
1282
1283 if (!soc->ast_hash.bins)
1284 return NULL;
1285
1286 qdf_mem_copy(&local_mac_addr_aligned.raw[0],
1287 ast_mac_addr, QDF_MAC_ADDR_SIZE);
1288 mac_addr = &local_mac_addr_aligned;
1289
1290 index = dp_peer_ast_hash_index(soc, mac_addr);
1291 TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
1292 if (dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr) == 0) {
1293 return ase;
1294 }
1295 }
1296
1297 return NULL;
1298 }
1299
dp_peer_ast_hash_find_soc_by_type(struct dp_soc * soc,uint8_t * ast_mac_addr,enum cdp_txrx_ast_entry_type type)1300 struct dp_ast_entry *dp_peer_ast_hash_find_soc_by_type(
1301 struct dp_soc *soc,
1302 uint8_t *ast_mac_addr,
1303 enum cdp_txrx_ast_entry_type type)
1304 {
1305 union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
1306 unsigned index;
1307 struct dp_ast_entry *ase;
1308
1309 if (!soc->ast_hash.bins)
1310 return NULL;
1311
1312 qdf_mem_copy(&local_mac_addr_aligned.raw[0],
1313 ast_mac_addr, QDF_MAC_ADDR_SIZE);
1314 mac_addr = &local_mac_addr_aligned;
1315
1316 index = dp_peer_ast_hash_index(soc, mac_addr);
1317 TAILQ_FOREACH(ase, &soc->ast_hash.bins[index], hash_list_elem) {
1318 if (dp_peer_find_mac_addr_cmp(mac_addr, &ase->mac_addr) == 0 &&
1319 ase->type == type) {
1320 return ase;
1321 }
1322 }
1323
1324 return NULL;
1325 }
1326
1327 /**
1328 * dp_peer_map_ipa_evt() - Send peer map event to IPA
1329 * @soc: SoC handle
1330 * @peer: peer to which ast node belongs
1331 * @ast_entry: AST entry
1332 * @mac_addr: MAC address of ast node
1333 *
1334 * Return: None
1335 */
1336 #if defined(IPA_OFFLOAD) && defined(QCA_IPA_LL_TX_FLOW_CONTROL)
1337 static inline
dp_peer_map_ipa_evt(struct dp_soc * soc,struct dp_peer * peer,struct dp_ast_entry * ast_entry,uint8_t * mac_addr)1338 void dp_peer_map_ipa_evt(struct dp_soc *soc, struct dp_peer *peer,
1339 struct dp_ast_entry *ast_entry, uint8_t *mac_addr)
1340 {
1341 if (ast_entry || (peer->vdev && peer->vdev->proxysta_vdev)) {
1342 if (soc->cdp_soc.ol_ops->peer_map_event) {
1343 soc->cdp_soc.ol_ops->peer_map_event(
1344 soc->ctrl_psoc, ast_entry->peer_id,
1345 ast_entry->ast_idx, ast_entry->vdev_id,
1346 mac_addr, ast_entry->type, ast_entry->ast_hash_value);
1347 }
1348 } else {
1349 dp_peer_info("%pK: AST entry not found", soc);
1350 }
1351 }
1352
1353 /**
1354 * dp_peer_unmap_ipa_evt() - Send peer unmap event to IPA
1355 * @soc: SoC handle
1356 * @peer_id: Peerid
1357 * @vdev_id: Vdev id
1358 * @mac_addr: Peer mac address
1359 *
1360 * Return: None
1361 */
1362 static inline
dp_peer_unmap_ipa_evt(struct dp_soc * soc,uint16_t peer_id,uint8_t vdev_id,uint8_t * mac_addr)1363 void dp_peer_unmap_ipa_evt(struct dp_soc *soc, uint16_t peer_id,
1364 uint8_t vdev_id, uint8_t *mac_addr)
1365 {
1366 if (soc->cdp_soc.ol_ops->peer_unmap_event) {
1367 soc->cdp_soc.ol_ops->peer_unmap_event(soc->ctrl_psoc,
1368 peer_id, vdev_id,
1369 mac_addr);
1370 }
1371 }
1372 #else
1373 static inline
dp_peer_unmap_ipa_evt(struct dp_soc * soc,uint16_t peer_id,uint8_t vdev_id,uint8_t * mac_addr)1374 void dp_peer_unmap_ipa_evt(struct dp_soc *soc, uint16_t peer_id,
1375 uint8_t vdev_id, uint8_t *mac_addr)
1376 {
1377 }
1378
1379 static inline
dp_peer_map_ipa_evt(struct dp_soc * soc,struct dp_peer * peer,struct dp_ast_entry * ast_entry,uint8_t * mac_addr)1380 void dp_peer_map_ipa_evt(struct dp_soc *soc, struct dp_peer *peer,
1381 struct dp_ast_entry *ast_entry, uint8_t *mac_addr)
1382 {
1383 }
1384 #endif
1385
dp_peer_host_add_map_ast(struct dp_soc * soc,uint16_t peer_id,uint8_t * mac_addr,uint16_t hw_peer_id,uint8_t vdev_id,uint16_t ast_hash,uint8_t is_wds)1386 QDF_STATUS dp_peer_host_add_map_ast(struct dp_soc *soc, uint16_t peer_id,
1387 uint8_t *mac_addr, uint16_t hw_peer_id,
1388 uint8_t vdev_id, uint16_t ast_hash,
1389 uint8_t is_wds)
1390 {
1391 struct dp_vdev *vdev;
1392 struct dp_ast_entry *ast_entry;
1393 enum cdp_txrx_ast_entry_type type;
1394 struct dp_peer *peer;
1395 struct dp_peer *old_peer;
1396 QDF_STATUS status = QDF_STATUS_SUCCESS;
1397
1398 if (is_wds)
1399 type = CDP_TXRX_AST_TYPE_WDS;
1400 else
1401 type = CDP_TXRX_AST_TYPE_STATIC;
1402
1403 peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_HTT);
1404 if (!peer) {
1405 dp_peer_info("Peer not found soc:%pK: peer_id %d, peer_mac " QDF_MAC_ADDR_FMT ", vdev_id %d",
1406 soc, peer_id,
1407 QDF_MAC_ADDR_REF(mac_addr), vdev_id);
1408 return QDF_STATUS_E_INVAL;
1409 }
1410
1411 if (!is_wds && IS_MLO_DP_MLD_PEER(peer))
1412 type = CDP_TXRX_AST_TYPE_MLD;
1413
1414 vdev = peer->vdev;
1415 if (!vdev) {
1416 dp_peer_err("%pK: Peers vdev is NULL", soc);
1417 status = QDF_STATUS_E_INVAL;
1418 goto fail;
1419 }
1420
1421 if (!dp_peer_state_cmp(peer, DP_PEER_STATE_ACTIVE)) {
1422 if (type != CDP_TXRX_AST_TYPE_STATIC &&
1423 type != CDP_TXRX_AST_TYPE_MLD &&
1424 type != CDP_TXRX_AST_TYPE_SELF) {
1425 status = QDF_STATUS_E_BUSY;
1426 goto fail;
1427 }
1428 }
1429
1430 dp_peer_debug("%pK: vdev: %u ast_entry->type: %d peer_mac: " QDF_MAC_ADDR_FMT " peer: %pK mac " QDF_MAC_ADDR_FMT,
1431 soc, vdev->vdev_id, type,
1432 QDF_MAC_ADDR_REF(peer->mac_addr.raw), peer,
1433 QDF_MAC_ADDR_REF(mac_addr));
1434
1435 /*
1436 * In MLO scenario, there is possibility for same mac address
1437 * on both link mac address and MLD mac address.
1438 * Duplicate AST map needs to be handled for non-mld type.
1439 */
1440 qdf_spin_lock_bh(&soc->ast_lock);
1441 ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
1442 if (ast_entry && type != CDP_TXRX_AST_TYPE_MLD) {
1443 dp_peer_debug("AST present ID %d vid %d mac " QDF_MAC_ADDR_FMT,
1444 hw_peer_id, vdev_id,
1445 QDF_MAC_ADDR_REF(mac_addr));
1446
1447 old_peer = __dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
1448 DP_MOD_ID_AST);
1449 if (!old_peer) {
1450 dp_peer_info("Peer not found soc:%pK: peer_id %d, peer_mac " QDF_MAC_ADDR_FMT ", vdev_id %d",
1451 soc, ast_entry->peer_id,
1452 QDF_MAC_ADDR_REF(mac_addr), vdev_id);
1453 qdf_spin_unlock_bh(&soc->ast_lock);
1454 status = QDF_STATUS_E_INVAL;
1455 goto fail;
1456 }
1457
1458 dp_peer_unlink_ast_entry(soc, ast_entry, old_peer);
1459 dp_peer_free_ast_entry(soc, ast_entry);
1460 if (old_peer)
1461 dp_peer_unref_delete(old_peer, DP_MOD_ID_AST);
1462 }
1463
1464 ast_entry = (struct dp_ast_entry *)
1465 qdf_mem_malloc(sizeof(struct dp_ast_entry));
1466 if (!ast_entry) {
1467 dp_peer_err("%pK: fail to allocate ast_entry", soc);
1468 qdf_spin_unlock_bh(&soc->ast_lock);
1469 QDF_ASSERT(0);
1470 status = QDF_STATUS_E_NOMEM;
1471 goto fail;
1472 }
1473
1474 qdf_mem_copy(&ast_entry->mac_addr.raw[0], mac_addr, QDF_MAC_ADDR_SIZE);
1475 ast_entry->pdev_id = vdev->pdev->pdev_id;
1476 ast_entry->is_mapped = false;
1477 ast_entry->delete_in_progress = false;
1478 ast_entry->next_hop = 0;
1479 ast_entry->vdev_id = vdev->vdev_id;
1480 ast_entry->type = type;
1481
1482 switch (type) {
1483 case CDP_TXRX_AST_TYPE_STATIC:
1484 if (peer->vdev->opmode == wlan_op_mode_sta)
1485 ast_entry->type = CDP_TXRX_AST_TYPE_STA_BSS;
1486 break;
1487 case CDP_TXRX_AST_TYPE_WDS:
1488 ast_entry->next_hop = 1;
1489 break;
1490 case CDP_TXRX_AST_TYPE_MLD:
1491 break;
1492 default:
1493 dp_peer_alert("%pK: Incorrect AST entry type", soc);
1494 }
1495
1496 ast_entry->is_active = TRUE;
1497 DP_STATS_INC(soc, ast.added, 1);
1498 soc->num_ast_entries++;
1499 dp_peer_ast_hash_add(soc, ast_entry);
1500
1501 ast_entry->ast_idx = hw_peer_id;
1502 ast_entry->ast_hash_value = ast_hash;
1503 ast_entry->peer_id = peer_id;
1504 TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry,
1505 ase_list_elem);
1506
1507 dp_peer_map_ipa_evt(soc, peer, ast_entry, mac_addr);
1508
1509 qdf_spin_unlock_bh(&soc->ast_lock);
1510 fail:
1511 dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
1512
1513 return status;
1514 }
1515
1516 /**
1517 * dp_peer_map_ast() - Map the ast entry with HW AST Index
1518 * @soc: SoC handle
1519 * @peer: peer to which ast node belongs
1520 * @mac_addr: MAC address of ast node
1521 * @hw_peer_id: HW AST Index returned by target in peer map event
1522 * @vdev_id: vdev id for VAP to which the peer belongs to
1523 * @ast_hash: ast hash value in HW
1524 * @is_wds: flag to indicate peer map event for WDS ast entry
1525 *
1526 * Return: QDF_STATUS code
1527 */
dp_peer_map_ast(struct dp_soc * soc,struct dp_peer * peer,uint8_t * mac_addr,uint16_t hw_peer_id,uint8_t vdev_id,uint16_t ast_hash,uint8_t is_wds)1528 static inline QDF_STATUS dp_peer_map_ast(struct dp_soc *soc,
1529 struct dp_peer *peer,
1530 uint8_t *mac_addr,
1531 uint16_t hw_peer_id,
1532 uint8_t vdev_id,
1533 uint16_t ast_hash,
1534 uint8_t is_wds)
1535 {
1536 struct dp_ast_entry *ast_entry = NULL;
1537 enum cdp_txrx_ast_entry_type peer_type = CDP_TXRX_AST_TYPE_STATIC;
1538 void *cookie = NULL;
1539 txrx_ast_free_cb cb = NULL;
1540 QDF_STATUS err = QDF_STATUS_SUCCESS;
1541
1542 if (soc->ast_offload_support && !wlan_cfg_get_dp_soc_dpdk_cfg(soc->ctrl_psoc))
1543 return QDF_STATUS_SUCCESS;
1544
1545 dp_peer_err("%pK: peer %pK ID %d vid %d mac " QDF_MAC_ADDR_FMT,
1546 soc, peer, hw_peer_id, vdev_id,
1547 QDF_MAC_ADDR_REF(mac_addr));
1548
1549 qdf_spin_lock_bh(&soc->ast_lock);
1550
1551 ast_entry = dp_peer_ast_hash_find_by_vdevid(soc, mac_addr, vdev_id);
1552
1553 if (is_wds) {
1554 /*
1555 * While processing peer map of AST entry if the next hop peer is
1556 * deleted free the AST entry as it is not attached to peer yet
1557 */
1558 if (!peer) {
1559 if (ast_entry)
1560 dp_peer_free_ast_entry(soc, ast_entry);
1561
1562 qdf_spin_unlock_bh(&soc->ast_lock);
1563
1564 dp_peer_alert("Peer is NULL for WDS entry mac "
1565 QDF_MAC_ADDR_FMT " ",
1566 QDF_MAC_ADDR_REF(mac_addr));
1567 return QDF_STATUS_E_INVAL;
1568 }
1569 /*
1570 * In certain cases like Auth attack on a repeater
1571 * can result in the number of ast_entries falling
1572 * in the same hash bucket to exceed the max_skid
1573 * length supported by HW in root AP. In these cases
1574 * the FW will return the hw_peer_id (ast_index) as
1575 * 0xffff indicating HW could not add the entry in
1576 * its table. Host has to delete the entry from its
1577 * table in these cases.
1578 */
1579 if (hw_peer_id == HTT_INVALID_PEER) {
1580 DP_STATS_INC(soc, ast.map_err, 1);
1581 if (ast_entry) {
1582 if (ast_entry->is_mapped) {
1583 soc->ast_table[ast_entry->ast_idx] =
1584 NULL;
1585 }
1586
1587 cb = ast_entry->callback;
1588 cookie = ast_entry->cookie;
1589 peer_type = ast_entry->type;
1590
1591 dp_peer_unlink_ast_entry(soc, ast_entry, peer);
1592 dp_peer_free_ast_entry(soc, ast_entry);
1593
1594 qdf_spin_unlock_bh(&soc->ast_lock);
1595
1596 if (cb) {
1597 cb(soc->ctrl_psoc,
1598 dp_soc_to_cdp_soc(soc),
1599 cookie,
1600 CDP_TXRX_AST_DELETED);
1601 }
1602 } else {
1603 qdf_spin_unlock_bh(&soc->ast_lock);
1604 dp_peer_alert("AST entry not found with peer %pK peer_id %u peer_mac " QDF_MAC_ADDR_FMT " mac_addr " QDF_MAC_ADDR_FMT " vdev_id %u next_hop %u",
1605 peer, peer->peer_id,
1606 QDF_MAC_ADDR_REF(peer->mac_addr.raw),
1607 QDF_MAC_ADDR_REF(mac_addr),
1608 vdev_id, is_wds);
1609 }
1610 err = QDF_STATUS_E_INVAL;
1611
1612 dp_hmwds_ast_add_notify(peer, mac_addr,
1613 peer_type, err, true);
1614
1615 return err;
1616 }
1617 }
1618
1619 if (!peer) {
1620 qdf_spin_unlock_bh(&soc->ast_lock);
1621 dp_peer_alert("Peer is NULL for mac " QDF_MAC_ADDR_FMT " ",
1622 QDF_MAC_ADDR_REF(mac_addr));
1623 return QDF_STATUS_E_INVAL;
1624 }
1625
1626 if (ast_entry) {
1627 ast_entry->ast_idx = hw_peer_id;
1628 soc->ast_table[hw_peer_id] = ast_entry;
1629 ast_entry->is_active = TRUE;
1630 peer_type = ast_entry->type;
1631 ast_entry->ast_hash_value = ast_hash;
1632 ast_entry->is_mapped = TRUE;
1633 qdf_assert_always(ast_entry->peer_id == HTT_INVALID_PEER);
1634
1635 ast_entry->peer_id = peer->peer_id;
1636 TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry,
1637 ase_list_elem);
1638 }
1639
1640 if (ast_entry || (peer->vdev && peer->vdev->proxysta_vdev) ||
1641 wlan_cfg_get_dp_soc_dpdk_cfg(soc->ctrl_psoc)) {
1642 if (soc->cdp_soc.ol_ops->peer_map_event) {
1643 soc->cdp_soc.ol_ops->peer_map_event(
1644 soc->ctrl_psoc, peer->peer_id,
1645 hw_peer_id, vdev_id,
1646 mac_addr, peer_type, ast_hash);
1647 }
1648 } else {
1649 dp_peer_err("%pK: AST entry not found", soc);
1650 err = QDF_STATUS_E_NOENT;
1651 }
1652
1653 qdf_spin_unlock_bh(&soc->ast_lock);
1654
1655 dp_hmwds_ast_add_notify(peer, mac_addr,
1656 peer_type, err, true);
1657
1658 return err;
1659 }
1660
dp_peer_free_hmwds_cb(struct cdp_ctrl_objmgr_psoc * ctrl_psoc,struct cdp_soc * dp_soc,void * cookie,enum cdp_ast_free_status status)1661 void dp_peer_free_hmwds_cb(struct cdp_ctrl_objmgr_psoc *ctrl_psoc,
1662 struct cdp_soc *dp_soc,
1663 void *cookie,
1664 enum cdp_ast_free_status status)
1665 {
1666 struct dp_ast_free_cb_params *param =
1667 (struct dp_ast_free_cb_params *)cookie;
1668 struct dp_soc *soc = (struct dp_soc *)dp_soc;
1669 struct dp_peer *peer = NULL;
1670 QDF_STATUS err = QDF_STATUS_SUCCESS;
1671
1672 if (status != CDP_TXRX_AST_DELETED) {
1673 qdf_mem_free(cookie);
1674 return;
1675 }
1676
1677 peer = dp_peer_find_hash_find(soc, ¶m->peer_mac_addr.raw[0],
1678 0, param->vdev_id, DP_MOD_ID_AST);
1679 if (peer) {
1680 err = dp_peer_add_ast(soc, peer,
1681 ¶m->mac_addr.raw[0],
1682 param->type,
1683 param->flags);
1684
1685 dp_hmwds_ast_add_notify(peer, ¶m->mac_addr.raw[0],
1686 param->type, err, false);
1687
1688 dp_peer_unref_delete(peer, DP_MOD_ID_AST);
1689 }
1690 qdf_mem_free(cookie);
1691 }
1692
dp_peer_add_ast(struct dp_soc * soc,struct dp_peer * peer,uint8_t * mac_addr,enum cdp_txrx_ast_entry_type type,uint32_t flags)1693 QDF_STATUS dp_peer_add_ast(struct dp_soc *soc,
1694 struct dp_peer *peer,
1695 uint8_t *mac_addr,
1696 enum cdp_txrx_ast_entry_type type,
1697 uint32_t flags)
1698 {
1699 struct dp_ast_entry *ast_entry = NULL;
1700 struct dp_vdev *vdev = NULL;
1701 struct dp_pdev *pdev = NULL;
1702 txrx_ast_free_cb cb = NULL;
1703 void *cookie = NULL;
1704 struct dp_peer *vap_bss_peer = NULL;
1705 bool is_peer_found = false;
1706 int status = 0;
1707
1708 if (soc->ast_offload_support)
1709 return QDF_STATUS_E_INVAL;
1710
1711 vdev = peer->vdev;
1712 if (!vdev) {
1713 dp_peer_err("%pK: Peers vdev is NULL", soc);
1714 QDF_ASSERT(0);
1715 return QDF_STATUS_E_INVAL;
1716 }
1717
1718 pdev = vdev->pdev;
1719
1720 is_peer_found = dp_peer_exist_on_pdev(soc, mac_addr, 0, pdev);
1721
1722 qdf_spin_lock_bh(&soc->ast_lock);
1723
1724 if (!dp_peer_state_cmp(peer, DP_PEER_STATE_ACTIVE)) {
1725 if ((type != CDP_TXRX_AST_TYPE_STATIC) &&
1726 (type != CDP_TXRX_AST_TYPE_SELF)) {
1727 qdf_spin_unlock_bh(&soc->ast_lock);
1728 return QDF_STATUS_E_BUSY;
1729 }
1730 }
1731
1732 dp_peer_debug("%pK: pdevid: %u vdev: %u ast_entry->type: %d flags: 0x%x peer_mac: " QDF_MAC_ADDR_FMT " peer: %pK mac " QDF_MAC_ADDR_FMT,
1733 soc, pdev->pdev_id, vdev->vdev_id, type, flags,
1734 QDF_MAC_ADDR_REF(peer->mac_addr.raw), peer,
1735 QDF_MAC_ADDR_REF(mac_addr));
1736
1737 /* fw supports only 2 times the max_peers ast entries */
1738 if (soc->num_ast_entries >=
1739 wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx)) {
1740 qdf_spin_unlock_bh(&soc->ast_lock);
1741 dp_peer_err("%pK: Max ast entries reached", soc);
1742 return QDF_STATUS_E_RESOURCES;
1743 }
1744
1745 /* If AST entry already exists , just return from here
1746 * ast entry with same mac address can exist on different radios
1747 * if ast_override support is enabled use search by pdev in this
1748 * case
1749 */
1750 if (soc->ast_override_support) {
1751 ast_entry = dp_peer_ast_hash_find_by_pdevid(soc, mac_addr,
1752 pdev->pdev_id);
1753 if (ast_entry) {
1754 qdf_spin_unlock_bh(&soc->ast_lock);
1755 return QDF_STATUS_E_ALREADY;
1756 }
1757
1758 if (is_peer_found) {
1759 /* During WDS to static roaming, peer is added
1760 * to the list before static AST entry create.
1761 * So, allow AST entry for STATIC type
1762 * even if peer is present
1763 */
1764 if (type != CDP_TXRX_AST_TYPE_STATIC) {
1765 qdf_spin_unlock_bh(&soc->ast_lock);
1766 return QDF_STATUS_E_ALREADY;
1767 }
1768 }
1769 } else {
1770 /* For HWMWDS_SEC entries can be added for same mac address
1771 * do not check for existing entry
1772 */
1773 if (type == CDP_TXRX_AST_TYPE_WDS_HM_SEC)
1774 goto add_ast_entry;
1775
1776 ast_entry = dp_peer_ast_hash_find_soc(soc, mac_addr);
1777
1778 if (ast_entry) {
1779 if ((ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM) &&
1780 !ast_entry->delete_in_progress) {
1781 qdf_spin_unlock_bh(&soc->ast_lock);
1782 return QDF_STATUS_E_ALREADY;
1783 }
1784
1785 /* Add for HMWDS entry we cannot be ignored if there
1786 * is AST entry with same mac address
1787 *
1788 * if ast entry exists with the requested mac address
1789 * send a delete command and register callback which
1790 * can take care of adding HMWDS ast entry on delete
1791 * confirmation from target
1792 */
1793 if (type == CDP_TXRX_AST_TYPE_WDS_HM) {
1794 struct dp_ast_free_cb_params *param = NULL;
1795
1796 if (ast_entry->type ==
1797 CDP_TXRX_AST_TYPE_WDS_HM_SEC)
1798 goto add_ast_entry;
1799
1800 /* save existing callback */
1801 if (ast_entry->callback) {
1802 cb = ast_entry->callback;
1803 cookie = ast_entry->cookie;
1804 }
1805
1806 param = qdf_mem_malloc(sizeof(*param));
1807 if (!param) {
1808 QDF_TRACE(QDF_MODULE_ID_TXRX,
1809 QDF_TRACE_LEVEL_ERROR,
1810 "Allocation failed");
1811 qdf_spin_unlock_bh(&soc->ast_lock);
1812 return QDF_STATUS_E_NOMEM;
1813 }
1814
1815 qdf_mem_copy(¶m->mac_addr.raw[0], mac_addr,
1816 QDF_MAC_ADDR_SIZE);
1817 qdf_mem_copy(¶m->peer_mac_addr.raw[0],
1818 &peer->mac_addr.raw[0],
1819 QDF_MAC_ADDR_SIZE);
1820 param->type = type;
1821 param->flags = flags;
1822 param->vdev_id = vdev->vdev_id;
1823 ast_entry->callback = dp_peer_free_hmwds_cb;
1824 ast_entry->pdev_id = vdev->pdev->pdev_id;
1825 ast_entry->type = type;
1826 ast_entry->cookie = (void *)param;
1827 if (!ast_entry->delete_in_progress)
1828 dp_peer_del_ast(soc, ast_entry);
1829
1830 qdf_spin_unlock_bh(&soc->ast_lock);
1831
1832 /* Call the saved callback*/
1833 if (cb) {
1834 cb(soc->ctrl_psoc,
1835 dp_soc_to_cdp_soc(soc),
1836 cookie,
1837 CDP_TXRX_AST_DELETE_IN_PROGRESS);
1838 }
1839 return QDF_STATUS_E_AGAIN;
1840 }
1841
1842 qdf_spin_unlock_bh(&soc->ast_lock);
1843 return QDF_STATUS_E_ALREADY;
1844 }
1845 }
1846
1847 add_ast_entry:
1848 ast_entry = (struct dp_ast_entry *)
1849 qdf_mem_malloc(sizeof(struct dp_ast_entry));
1850
1851 if (!ast_entry) {
1852 qdf_spin_unlock_bh(&soc->ast_lock);
1853 dp_peer_err("%pK: fail to allocate ast_entry", soc);
1854 QDF_ASSERT(0);
1855 return QDF_STATUS_E_NOMEM;
1856 }
1857
1858 qdf_mem_copy(&ast_entry->mac_addr.raw[0], mac_addr, QDF_MAC_ADDR_SIZE);
1859 ast_entry->pdev_id = vdev->pdev->pdev_id;
1860 ast_entry->is_mapped = false;
1861 ast_entry->delete_in_progress = false;
1862 ast_entry->peer_id = HTT_INVALID_PEER;
1863 ast_entry->next_hop = 0;
1864 ast_entry->vdev_id = vdev->vdev_id;
1865
1866 switch (type) {
1867 case CDP_TXRX_AST_TYPE_STATIC:
1868 peer->self_ast_entry = ast_entry;
1869 ast_entry->type = CDP_TXRX_AST_TYPE_STATIC;
1870 if (peer->vdev->opmode == wlan_op_mode_sta)
1871 ast_entry->type = CDP_TXRX_AST_TYPE_STA_BSS;
1872 break;
1873 case CDP_TXRX_AST_TYPE_SELF:
1874 peer->self_ast_entry = ast_entry;
1875 ast_entry->type = CDP_TXRX_AST_TYPE_SELF;
1876 break;
1877 case CDP_TXRX_AST_TYPE_WDS:
1878 ast_entry->next_hop = 1;
1879 ast_entry->type = CDP_TXRX_AST_TYPE_WDS;
1880 break;
1881 case CDP_TXRX_AST_TYPE_WDS_HM:
1882 ast_entry->next_hop = 1;
1883 ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM;
1884 break;
1885 case CDP_TXRX_AST_TYPE_WDS_HM_SEC:
1886 ast_entry->next_hop = 1;
1887 ast_entry->type = CDP_TXRX_AST_TYPE_WDS_HM_SEC;
1888 ast_entry->peer_id = peer->peer_id;
1889 TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry,
1890 ase_list_elem);
1891 break;
1892 case CDP_TXRX_AST_TYPE_DA:
1893 vap_bss_peer = dp_vdev_bss_peer_ref_n_get(soc, vdev,
1894 DP_MOD_ID_AST);
1895 if (!vap_bss_peer) {
1896 qdf_spin_unlock_bh(&soc->ast_lock);
1897 qdf_mem_free(ast_entry);
1898 return QDF_STATUS_E_FAILURE;
1899 }
1900 peer = vap_bss_peer;
1901 ast_entry->next_hop = 1;
1902 ast_entry->type = CDP_TXRX_AST_TYPE_DA;
1903 break;
1904 default:
1905 dp_peer_err("%pK: Incorrect AST entry type", soc);
1906 }
1907
1908 ast_entry->is_active = TRUE;
1909 DP_STATS_INC(soc, ast.added, 1);
1910 soc->num_ast_entries++;
1911 dp_peer_ast_hash_add(soc, ast_entry);
1912
1913 if ((ast_entry->type != CDP_TXRX_AST_TYPE_STATIC) &&
1914 (ast_entry->type != CDP_TXRX_AST_TYPE_SELF) &&
1915 (ast_entry->type != CDP_TXRX_AST_TYPE_STA_BSS) &&
1916 (ast_entry->type != CDP_TXRX_AST_TYPE_WDS_HM_SEC))
1917 status = dp_add_wds_entry_wrapper(soc,
1918 peer,
1919 mac_addr,
1920 flags,
1921 ast_entry->type);
1922
1923 if (vap_bss_peer)
1924 dp_peer_unref_delete(vap_bss_peer, DP_MOD_ID_AST);
1925
1926 qdf_spin_unlock_bh(&soc->ast_lock);
1927 return qdf_status_from_os_return(status);
1928 }
1929
1930 qdf_export_symbol(dp_peer_add_ast);
1931
dp_peer_free_ast_entry(struct dp_soc * soc,struct dp_ast_entry * ast_entry)1932 void dp_peer_free_ast_entry(struct dp_soc *soc,
1933 struct dp_ast_entry *ast_entry)
1934 {
1935 /*
1936 * NOTE: Ensure that call to this API is done
1937 * after soc->ast_lock is taken
1938 */
1939 dp_peer_debug("type: %d ID: %u vid: %u mac_addr: " QDF_MAC_ADDR_FMT,
1940 ast_entry->type, ast_entry->peer_id, ast_entry->vdev_id,
1941 QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw));
1942
1943 ast_entry->callback = NULL;
1944 ast_entry->cookie = NULL;
1945
1946 DP_STATS_INC(soc, ast.deleted, 1);
1947 dp_peer_ast_hash_remove(soc, ast_entry);
1948 dp_peer_ast_cleanup(soc, ast_entry);
1949 qdf_mem_free(ast_entry);
1950 soc->num_ast_entries--;
1951 }
1952
dp_peer_unlink_ast_entry(struct dp_soc * soc,struct dp_ast_entry * ast_entry,struct dp_peer * peer)1953 void dp_peer_unlink_ast_entry(struct dp_soc *soc,
1954 struct dp_ast_entry *ast_entry,
1955 struct dp_peer *peer)
1956 {
1957 if (!peer) {
1958 dp_info_rl("NULL peer");
1959 return;
1960 }
1961
1962 if (ast_entry->peer_id == HTT_INVALID_PEER) {
1963 dp_info_rl("Invalid peer id in AST entry mac addr:"QDF_MAC_ADDR_FMT" type:%d",
1964 QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw),
1965 ast_entry->type);
1966 return;
1967 }
1968 /*
1969 * NOTE: Ensure that call to this API is done
1970 * after soc->ast_lock is taken
1971 */
1972
1973 qdf_assert_always(ast_entry->peer_id == peer->peer_id);
1974 TAILQ_REMOVE(&peer->ast_entry_list, ast_entry, ase_list_elem);
1975
1976 if (ast_entry == peer->self_ast_entry)
1977 peer->self_ast_entry = NULL;
1978
1979 /*
1980 * release the reference only if it is mapped
1981 * to ast_table
1982 */
1983 if (ast_entry->is_mapped)
1984 soc->ast_table[ast_entry->ast_idx] = NULL;
1985
1986 ast_entry->peer_id = HTT_INVALID_PEER;
1987 }
1988
dp_peer_del_ast(struct dp_soc * soc,struct dp_ast_entry * ast_entry)1989 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
1990 {
1991 struct dp_peer *peer = NULL;
1992
1993 if (soc->ast_offload_support)
1994 return;
1995
1996 if (!ast_entry) {
1997 dp_info_rl("NULL AST entry");
1998 return;
1999 }
2000
2001 if (ast_entry->delete_in_progress) {
2002 dp_info_rl("AST entry deletion in progress mac addr:"QDF_MAC_ADDR_FMT" type:%d",
2003 QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw),
2004 ast_entry->type);
2005 return;
2006 }
2007
2008 dp_peer_debug("call by %ps: ID: %u vid: %u mac_addr: " QDF_MAC_ADDR_FMT,
2009 (void *)_RET_IP_, ast_entry->peer_id, ast_entry->vdev_id,
2010 QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw));
2011
2012 ast_entry->delete_in_progress = true;
2013
2014 /* In teardown del ast is called after setting logical delete state
2015 * use __dp_peer_get_ref_by_id to get the reference irrespective of
2016 * state
2017 */
2018 peer = __dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
2019 DP_MOD_ID_AST);
2020
2021 dp_peer_ast_send_wds_del(soc, ast_entry, peer);
2022
2023 /* Remove SELF and STATIC entries in teardown itself */
2024 if (!ast_entry->next_hop)
2025 dp_peer_unlink_ast_entry(soc, ast_entry, peer);
2026
2027 if (ast_entry->is_mapped)
2028 soc->ast_table[ast_entry->ast_idx] = NULL;
2029
2030 /* if peer map v2 is enabled we are not freeing ast entry
2031 * here and it is supposed to be freed in unmap event (after
2032 * we receive delete confirmation from target)
2033 *
2034 * if peer_id is invalid we did not get the peer map event
2035 * for the peer free ast entry from here only in this case
2036 */
2037 if (dp_peer_ast_free_in_unmap_supported(soc, ast_entry))
2038 goto end;
2039
2040 /* for WDS secondary entry ast_entry->next_hop would be set so
2041 * unlinking has to be done explicitly here.
2042 * As this entry is not a mapped entry unmap notification from
2043 * FW will not come. Hence unlinkling is done right here.
2044 */
2045
2046 if (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC)
2047 dp_peer_unlink_ast_entry(soc, ast_entry, peer);
2048
2049 dp_peer_free_ast_entry(soc, ast_entry);
2050
2051 end:
2052 if (peer)
2053 dp_peer_unref_delete(peer, DP_MOD_ID_AST);
2054 }
2055
dp_peer_update_ast(struct dp_soc * soc,struct dp_peer * peer,struct dp_ast_entry * ast_entry,uint32_t flags)2056 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
2057 struct dp_ast_entry *ast_entry, uint32_t flags)
2058 {
2059 int ret = -1;
2060 struct dp_peer *old_peer;
2061
2062 if (soc->ast_offload_support)
2063 return QDF_STATUS_E_INVAL;
2064
2065 dp_peer_debug("%pK: ast_entry->type: %d pdevid: %u vdevid: %u flags: 0x%x mac_addr: " QDF_MAC_ADDR_FMT " peer_mac: " QDF_MAC_ADDR_FMT "\n",
2066 soc, ast_entry->type, peer->vdev->pdev->pdev_id,
2067 peer->vdev->vdev_id, flags,
2068 QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw),
2069 QDF_MAC_ADDR_REF(peer->mac_addr.raw));
2070
2071 /* Do not send AST update in below cases
2072 * 1) Ast entry delete has already triggered
2073 * 2) Peer delete is already triggered
2074 * 3) We did not get the HTT map for create event
2075 */
2076 if (ast_entry->delete_in_progress ||
2077 !dp_peer_state_cmp(peer, DP_PEER_STATE_ACTIVE) ||
2078 !ast_entry->is_mapped)
2079 return ret;
2080
2081 if ((ast_entry->type == CDP_TXRX_AST_TYPE_STATIC) ||
2082 (ast_entry->type == CDP_TXRX_AST_TYPE_SELF) ||
2083 (ast_entry->type == CDP_TXRX_AST_TYPE_STA_BSS) ||
2084 (ast_entry->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
2085 return 0;
2086
2087 /*
2088 * Avoids flood of WMI update messages sent to FW for same peer.
2089 */
2090 if (qdf_unlikely(ast_entry->peer_id == peer->peer_id) &&
2091 (ast_entry->type == CDP_TXRX_AST_TYPE_WDS) &&
2092 (ast_entry->vdev_id == peer->vdev->vdev_id) &&
2093 (ast_entry->is_active))
2094 return 0;
2095
2096 old_peer = dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
2097 DP_MOD_ID_AST);
2098 if (!old_peer)
2099 return 0;
2100
2101 TAILQ_REMOVE(&old_peer->ast_entry_list, ast_entry, ase_list_elem);
2102
2103 dp_peer_unref_delete(old_peer, DP_MOD_ID_AST);
2104
2105 ast_entry->peer_id = peer->peer_id;
2106 ast_entry->type = CDP_TXRX_AST_TYPE_WDS;
2107 ast_entry->pdev_id = peer->vdev->pdev->pdev_id;
2108 ast_entry->vdev_id = peer->vdev->vdev_id;
2109 ast_entry->is_active = TRUE;
2110 TAILQ_INSERT_TAIL(&peer->ast_entry_list, ast_entry, ase_list_elem);
2111
2112 ret = dp_update_wds_entry_wrapper(soc,
2113 peer,
2114 ast_entry->mac_addr.raw,
2115 flags);
2116
2117 return ret;
2118 }
2119
dp_peer_ast_get_pdev_id(struct dp_soc * soc,struct dp_ast_entry * ast_entry)2120 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
2121 struct dp_ast_entry *ast_entry)
2122 {
2123 return ast_entry->pdev_id;
2124 }
2125
dp_peer_ast_get_next_hop(struct dp_soc * soc,struct dp_ast_entry * ast_entry)2126 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
2127 struct dp_ast_entry *ast_entry)
2128 {
2129 return ast_entry->next_hop;
2130 }
2131
dp_peer_ast_set_type(struct dp_soc * soc,struct dp_ast_entry * ast_entry,enum cdp_txrx_ast_entry_type type)2132 void dp_peer_ast_set_type(struct dp_soc *soc,
2133 struct dp_ast_entry *ast_entry,
2134 enum cdp_txrx_ast_entry_type type)
2135 {
2136 ast_entry->type = type;
2137 }
2138
dp_peer_ast_send_wds_del(struct dp_soc * soc,struct dp_ast_entry * ast_entry,struct dp_peer * peer)2139 void dp_peer_ast_send_wds_del(struct dp_soc *soc,
2140 struct dp_ast_entry *ast_entry,
2141 struct dp_peer *peer)
2142 {
2143 bool delete_in_fw = false;
2144
2145 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_TRACE,
2146 "%s: ast_entry->type: %d pdevid: %u vdev: %u mac_addr: "QDF_MAC_ADDR_FMT" next_hop: %u peer_id: %uM\n",
2147 __func__, ast_entry->type, ast_entry->pdev_id,
2148 ast_entry->vdev_id,
2149 QDF_MAC_ADDR_REF(ast_entry->mac_addr.raw),
2150 ast_entry->next_hop, ast_entry->peer_id);
2151
2152 /*
2153 * If peer state is logical delete, the peer is about to get
2154 * teared down with a peer delete command to firmware,
2155 * which will cleanup all the wds ast entries.
2156 * So, no need to send explicit wds ast delete to firmware.
2157 */
2158 if (ast_entry->next_hop) {
2159 if (peer && dp_peer_state_cmp(peer,
2160 DP_PEER_STATE_LOGICAL_DELETE))
2161 delete_in_fw = false;
2162 else
2163 delete_in_fw = true;
2164
2165 dp_del_wds_entry_wrapper(soc,
2166 ast_entry->vdev_id,
2167 ast_entry->mac_addr.raw,
2168 ast_entry->type,
2169 delete_in_fw);
2170 }
2171 }
2172 #else
dp_peer_free_ast_entry(struct dp_soc * soc,struct dp_ast_entry * ast_entry)2173 void dp_peer_free_ast_entry(struct dp_soc *soc,
2174 struct dp_ast_entry *ast_entry)
2175 {
2176 }
2177
dp_peer_unlink_ast_entry(struct dp_soc * soc,struct dp_ast_entry * ast_entry,struct dp_peer * peer)2178 void dp_peer_unlink_ast_entry(struct dp_soc *soc,
2179 struct dp_ast_entry *ast_entry,
2180 struct dp_peer *peer)
2181 {
2182 }
2183
dp_peer_ast_hash_remove(struct dp_soc * soc,struct dp_ast_entry * ase)2184 void dp_peer_ast_hash_remove(struct dp_soc *soc,
2185 struct dp_ast_entry *ase)
2186 {
2187 }
2188
dp_peer_ast_hash_find_by_vdevid(struct dp_soc * soc,uint8_t * ast_mac_addr,uint8_t vdev_id)2189 struct dp_ast_entry *dp_peer_ast_hash_find_by_vdevid(struct dp_soc *soc,
2190 uint8_t *ast_mac_addr,
2191 uint8_t vdev_id)
2192 {
2193 return NULL;
2194 }
2195
dp_peer_add_ast(struct dp_soc * soc,struct dp_peer * peer,uint8_t * mac_addr,enum cdp_txrx_ast_entry_type type,uint32_t flags)2196 QDF_STATUS dp_peer_add_ast(struct dp_soc *soc,
2197 struct dp_peer *peer,
2198 uint8_t *mac_addr,
2199 enum cdp_txrx_ast_entry_type type,
2200 uint32_t flags)
2201 {
2202 return QDF_STATUS_E_FAILURE;
2203 }
2204
dp_peer_del_ast(struct dp_soc * soc,struct dp_ast_entry * ast_entry)2205 void dp_peer_del_ast(struct dp_soc *soc, struct dp_ast_entry *ast_entry)
2206 {
2207 }
2208
dp_peer_update_ast(struct dp_soc * soc,struct dp_peer * peer,struct dp_ast_entry * ast_entry,uint32_t flags)2209 int dp_peer_update_ast(struct dp_soc *soc, struct dp_peer *peer,
2210 struct dp_ast_entry *ast_entry, uint32_t flags)
2211 {
2212 return 1;
2213 }
2214
dp_peer_ast_hash_find_soc(struct dp_soc * soc,uint8_t * ast_mac_addr)2215 struct dp_ast_entry *dp_peer_ast_hash_find_soc(struct dp_soc *soc,
2216 uint8_t *ast_mac_addr)
2217 {
2218 return NULL;
2219 }
2220
dp_peer_ast_hash_find_soc_by_type(struct dp_soc * soc,uint8_t * ast_mac_addr,enum cdp_txrx_ast_entry_type type)2221 struct dp_ast_entry *dp_peer_ast_hash_find_soc_by_type(
2222 struct dp_soc *soc,
2223 uint8_t *ast_mac_addr,
2224 enum cdp_txrx_ast_entry_type type)
2225 {
2226 return NULL;
2227 }
2228
2229 static inline
dp_peer_host_add_map_ast(struct dp_soc * soc,uint16_t peer_id,uint8_t * mac_addr,uint16_t hw_peer_id,uint8_t vdev_id,uint16_t ast_hash,uint8_t is_wds)2230 QDF_STATUS dp_peer_host_add_map_ast(struct dp_soc *soc, uint16_t peer_id,
2231 uint8_t *mac_addr, uint16_t hw_peer_id,
2232 uint8_t vdev_id, uint16_t ast_hash,
2233 uint8_t is_wds)
2234 {
2235 return QDF_STATUS_SUCCESS;
2236 }
2237
dp_peer_ast_hash_find_by_pdevid(struct dp_soc * soc,uint8_t * ast_mac_addr,uint8_t pdev_id)2238 struct dp_ast_entry *dp_peer_ast_hash_find_by_pdevid(struct dp_soc *soc,
2239 uint8_t *ast_mac_addr,
2240 uint8_t pdev_id)
2241 {
2242 return NULL;
2243 }
2244
dp_peer_ast_hash_attach(struct dp_soc * soc)2245 QDF_STATUS dp_peer_ast_hash_attach(struct dp_soc *soc)
2246 {
2247 return QDF_STATUS_SUCCESS;
2248 }
2249
dp_peer_map_ast(struct dp_soc * soc,struct dp_peer * peer,uint8_t * mac_addr,uint16_t hw_peer_id,uint8_t vdev_id,uint16_t ast_hash,uint8_t is_wds)2250 static inline QDF_STATUS dp_peer_map_ast(struct dp_soc *soc,
2251 struct dp_peer *peer,
2252 uint8_t *mac_addr,
2253 uint16_t hw_peer_id,
2254 uint8_t vdev_id,
2255 uint16_t ast_hash,
2256 uint8_t is_wds)
2257 {
2258 return QDF_STATUS_SUCCESS;
2259 }
2260
dp_peer_ast_hash_detach(struct dp_soc * soc)2261 void dp_peer_ast_hash_detach(struct dp_soc *soc)
2262 {
2263 }
2264
dp_peer_ast_set_type(struct dp_soc * soc,struct dp_ast_entry * ast_entry,enum cdp_txrx_ast_entry_type type)2265 void dp_peer_ast_set_type(struct dp_soc *soc,
2266 struct dp_ast_entry *ast_entry,
2267 enum cdp_txrx_ast_entry_type type)
2268 {
2269 }
2270
dp_peer_ast_get_pdev_id(struct dp_soc * soc,struct dp_ast_entry * ast_entry)2271 uint8_t dp_peer_ast_get_pdev_id(struct dp_soc *soc,
2272 struct dp_ast_entry *ast_entry)
2273 {
2274 return 0xff;
2275 }
2276
dp_peer_ast_get_next_hop(struct dp_soc * soc,struct dp_ast_entry * ast_entry)2277 uint8_t dp_peer_ast_get_next_hop(struct dp_soc *soc,
2278 struct dp_ast_entry *ast_entry)
2279 {
2280 return 0xff;
2281 }
2282
dp_peer_ast_send_wds_del(struct dp_soc * soc,struct dp_ast_entry * ast_entry,struct dp_peer * peer)2283 void dp_peer_ast_send_wds_del(struct dp_soc *soc,
2284 struct dp_ast_entry *ast_entry,
2285 struct dp_peer *peer)
2286 {
2287 }
2288
2289 static inline
dp_peer_unmap_ipa_evt(struct dp_soc * soc,uint16_t peer_id,uint8_t vdev_id,uint8_t * mac_addr)2290 void dp_peer_unmap_ipa_evt(struct dp_soc *soc, uint16_t peer_id,
2291 uint8_t vdev_id, uint8_t *mac_addr)
2292 {
2293 }
2294 #endif
2295
2296 #ifdef WLAN_FEATURE_MULTI_AST_DEL
dp_peer_ast_send_multi_wds_del(struct dp_soc * soc,uint8_t vdev_id,struct peer_del_multi_wds_entries * wds_list)2297 void dp_peer_ast_send_multi_wds_del(
2298 struct dp_soc *soc, uint8_t vdev_id,
2299 struct peer_del_multi_wds_entries *wds_list)
2300 {
2301 struct cdp_soc_t *cdp_soc = &soc->cdp_soc;
2302
2303 if (cdp_soc && cdp_soc->ol_ops &&
2304 cdp_soc->ol_ops->peer_del_multi_wds_entry)
2305 cdp_soc->ol_ops->peer_del_multi_wds_entry(soc->ctrl_psoc,
2306 vdev_id, wds_list);
2307 }
2308 #endif
2309
2310 #ifdef FEATURE_WDS
2311 /**
2312 * dp_peer_ast_free_wds_entries() - Free wds ast entries associated with peer
2313 * @soc: soc handle
2314 * @peer: peer handle
2315 *
2316 * Free all the wds ast entries associated with peer
2317 *
2318 * Return: Number of wds ast entries freed
2319 */
dp_peer_ast_free_wds_entries(struct dp_soc * soc,struct dp_peer * peer)2320 static uint32_t dp_peer_ast_free_wds_entries(struct dp_soc *soc,
2321 struct dp_peer *peer)
2322 {
2323 TAILQ_HEAD(, dp_ast_entry) ast_local_list = {0};
2324 struct dp_ast_entry *ast_entry, *temp_ast_entry;
2325 uint32_t num_ast = 0;
2326
2327 TAILQ_INIT(&ast_local_list);
2328 qdf_spin_lock_bh(&soc->ast_lock);
2329
2330 DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry) {
2331 if (ast_entry->next_hop)
2332 num_ast++;
2333
2334 if (ast_entry->is_mapped)
2335 soc->ast_table[ast_entry->ast_idx] = NULL;
2336
2337 dp_peer_unlink_ast_entry(soc, ast_entry, peer);
2338 DP_STATS_INC(soc, ast.deleted, 1);
2339 dp_peer_ast_hash_remove(soc, ast_entry);
2340 TAILQ_INSERT_TAIL(&ast_local_list, ast_entry,
2341 ase_list_elem);
2342 soc->num_ast_entries--;
2343 }
2344
2345 qdf_spin_unlock_bh(&soc->ast_lock);
2346
2347 TAILQ_FOREACH_SAFE(ast_entry, &ast_local_list, ase_list_elem,
2348 temp_ast_entry) {
2349 if (ast_entry->callback)
2350 ast_entry->callback(soc->ctrl_psoc,
2351 dp_soc_to_cdp_soc(soc),
2352 ast_entry->cookie,
2353 CDP_TXRX_AST_DELETED);
2354
2355 qdf_mem_free(ast_entry);
2356 }
2357
2358 return num_ast;
2359 }
2360 /**
2361 * dp_peer_clean_wds_entries() - Clean wds ast entries and compare
2362 * @soc: soc handle
2363 * @peer: peer handle
2364 * @free_wds_count: number of wds entries freed by FW with peer delete
2365 *
2366 * Free all the wds ast entries associated with peer and compare with
2367 * the value received from firmware
2368 *
2369 * Return: Number of wds ast entries freed
2370 */
2371 static void
dp_peer_clean_wds_entries(struct dp_soc * soc,struct dp_peer * peer,uint32_t free_wds_count)2372 dp_peer_clean_wds_entries(struct dp_soc *soc, struct dp_peer *peer,
2373 uint32_t free_wds_count)
2374 {
2375 uint32_t wds_deleted = 0;
2376 bool ast_ind_disable;
2377
2378 if (soc->ast_offload_support && !soc->host_ast_db_enable)
2379 return;
2380
2381 ast_ind_disable = wlan_cfg_get_ast_indication_disable
2382 (soc->wlan_cfg_ctx);
2383
2384 wds_deleted = dp_peer_ast_free_wds_entries(soc, peer);
2385 if ((DP_PEER_WDS_COUNT_INVALID != free_wds_count) &&
2386 (free_wds_count != wds_deleted) && !ast_ind_disable) {
2387 DP_STATS_INC(soc, ast.ast_mismatch, 1);
2388 dp_alert("For peer %pK (mac: "QDF_MAC_ADDR_FMT")number of wds entries deleted by fw = %d during peer delete is not same as the numbers deleted by host = %d",
2389 peer, peer->mac_addr.raw, free_wds_count,
2390 wds_deleted);
2391 }
2392 }
2393
2394 #else
2395 static void
dp_peer_clean_wds_entries(struct dp_soc * soc,struct dp_peer * peer,uint32_t free_wds_count)2396 dp_peer_clean_wds_entries(struct dp_soc *soc, struct dp_peer *peer,
2397 uint32_t free_wds_count)
2398 {
2399 struct dp_ast_entry *ast_entry, *temp_ast_entry;
2400
2401 qdf_spin_lock_bh(&soc->ast_lock);
2402
2403 DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry) {
2404 dp_peer_unlink_ast_entry(soc, ast_entry, peer);
2405
2406 if (ast_entry->is_mapped)
2407 soc->ast_table[ast_entry->ast_idx] = NULL;
2408
2409 dp_peer_free_ast_entry(soc, ast_entry);
2410 }
2411
2412 peer->self_ast_entry = NULL;
2413 qdf_spin_unlock_bh(&soc->ast_lock);
2414 }
2415 #endif
2416
2417 /**
2418 * dp_peer_ast_free_entry_by_mac() - find ast entry by MAC address and delete
2419 * @soc: soc handle
2420 * @peer: peer handle
2421 * @vdev_id: vdev_id
2422 * @mac_addr: mac address of the AST entry to searc and delete
2423 *
2424 * find the ast entry from the peer list using the mac address and free
2425 * the entry.
2426 *
2427 * Return: SUCCESS or NOENT
2428 */
dp_peer_ast_free_entry_by_mac(struct dp_soc * soc,struct dp_peer * peer,uint8_t vdev_id,uint8_t * mac_addr)2429 static int dp_peer_ast_free_entry_by_mac(struct dp_soc *soc,
2430 struct dp_peer *peer,
2431 uint8_t vdev_id,
2432 uint8_t *mac_addr)
2433 {
2434 struct dp_ast_entry *ast_entry;
2435 void *cookie = NULL;
2436 txrx_ast_free_cb cb = NULL;
2437
2438 /*
2439 * release the reference only if it is mapped
2440 * to ast_table
2441 */
2442
2443 qdf_spin_lock_bh(&soc->ast_lock);
2444
2445 ast_entry = dp_peer_ast_hash_find_by_vdevid(soc, mac_addr, vdev_id);
2446 if (!ast_entry) {
2447 qdf_spin_unlock_bh(&soc->ast_lock);
2448 return QDF_STATUS_E_NOENT;
2449 } else if (ast_entry->is_mapped) {
2450 soc->ast_table[ast_entry->ast_idx] = NULL;
2451 }
2452
2453 cb = ast_entry->callback;
2454 cookie = ast_entry->cookie;
2455
2456
2457 dp_peer_unlink_ast_entry(soc, ast_entry, peer);
2458
2459 dp_peer_free_ast_entry(soc, ast_entry);
2460
2461 qdf_spin_unlock_bh(&soc->ast_lock);
2462
2463 if (cb) {
2464 cb(soc->ctrl_psoc,
2465 dp_soc_to_cdp_soc(soc),
2466 cookie,
2467 CDP_TXRX_AST_DELETED);
2468 }
2469
2470 return QDF_STATUS_SUCCESS;
2471 }
2472
dp_peer_find_hash_erase(struct dp_soc * soc)2473 void dp_peer_find_hash_erase(struct dp_soc *soc)
2474 {
2475 int i;
2476
2477 /*
2478 * Not really necessary to take peer_ref_mutex lock - by this point,
2479 * it's known that the soc is no longer in use.
2480 */
2481 for (i = 0; i <= soc->peer_hash.mask; i++) {
2482 if (!TAILQ_EMPTY(&soc->peer_hash.bins[i])) {
2483 struct dp_peer *peer, *peer_next;
2484
2485 /*
2486 * TAILQ_FOREACH_SAFE must be used here to avoid any
2487 * memory access violation after peer is freed
2488 */
2489 TAILQ_FOREACH_SAFE(peer, &soc->peer_hash.bins[i],
2490 hash_list_elem, peer_next) {
2491 /*
2492 * Don't remove the peer from the hash table -
2493 * that would modify the list we are currently
2494 * traversing, and it's not necessary anyway.
2495 */
2496 /*
2497 * Artificially adjust the peer's ref count to
2498 * 1, so it will get deleted by
2499 * dp_peer_unref_delete.
2500 */
2501 /* set to zero */
2502 qdf_atomic_init(&peer->ref_cnt);
2503 for (i = 0; i < DP_MOD_ID_MAX; i++)
2504 qdf_atomic_init(&peer->mod_refs[i]);
2505 /* incr to one */
2506 qdf_atomic_inc(&peer->ref_cnt);
2507 qdf_atomic_inc(&peer->mod_refs
2508 [DP_MOD_ID_CONFIG]);
2509 dp_peer_unref_delete(peer,
2510 DP_MOD_ID_CONFIG);
2511 }
2512 }
2513 }
2514 }
2515
dp_peer_ast_table_detach(struct dp_soc * soc)2516 void dp_peer_ast_table_detach(struct dp_soc *soc)
2517 {
2518 if (soc->ast_table) {
2519 qdf_mem_free(soc->ast_table);
2520 soc->ast_table = NULL;
2521 }
2522 }
2523
dp_peer_find_map_detach(struct dp_soc * soc)2524 void dp_peer_find_map_detach(struct dp_soc *soc)
2525 {
2526 struct dp_peer *peer = NULL;
2527 uint32_t i = 0;
2528
2529 if (soc->peer_id_to_obj_map) {
2530 for (i = 0; i < soc->max_peer_id; i++) {
2531 peer = soc->peer_id_to_obj_map[i];
2532 if (peer)
2533 dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
2534 }
2535 qdf_mem_free(soc->peer_id_to_obj_map);
2536 soc->peer_id_to_obj_map = NULL;
2537 qdf_spinlock_destroy(&soc->peer_map_lock);
2538 }
2539 }
2540
2541 #ifndef AST_OFFLOAD_ENABLE
dp_peer_find_attach(struct dp_soc * soc)2542 QDF_STATUS dp_peer_find_attach(struct dp_soc *soc)
2543 {
2544 QDF_STATUS status;
2545
2546 status = dp_peer_find_map_attach(soc);
2547 if (!QDF_IS_STATUS_SUCCESS(status))
2548 return status;
2549
2550 status = dp_peer_find_hash_attach(soc);
2551 if (!QDF_IS_STATUS_SUCCESS(status))
2552 goto map_detach;
2553
2554 status = dp_peer_ast_table_attach(soc);
2555 if (!QDF_IS_STATUS_SUCCESS(status))
2556 goto hash_detach;
2557
2558 status = dp_peer_ast_hash_attach(soc);
2559 if (!QDF_IS_STATUS_SUCCESS(status))
2560 goto ast_table_detach;
2561
2562 status = dp_peer_mec_hash_attach(soc);
2563 if (QDF_IS_STATUS_SUCCESS(status)) {
2564 dp_soc_wds_attach(soc);
2565 return status;
2566 }
2567
2568 dp_peer_ast_hash_detach(soc);
2569 ast_table_detach:
2570 dp_peer_ast_table_detach(soc);
2571 hash_detach:
2572 dp_peer_find_hash_detach(soc);
2573 map_detach:
2574 dp_peer_find_map_detach(soc);
2575
2576 return status;
2577 }
2578 #else
dp_peer_find_attach(struct dp_soc * soc)2579 QDF_STATUS dp_peer_find_attach(struct dp_soc *soc)
2580 {
2581 QDF_STATUS status;
2582
2583 status = dp_peer_find_map_attach(soc);
2584 if (!QDF_IS_STATUS_SUCCESS(status))
2585 return status;
2586
2587 status = dp_peer_find_hash_attach(soc);
2588 if (!QDF_IS_STATUS_SUCCESS(status))
2589 goto map_detach;
2590
2591 return status;
2592 map_detach:
2593 dp_peer_find_map_detach(soc);
2594
2595 return status;
2596 }
2597 #endif
2598
2599 #ifdef REO_SHARED_QREF_TABLE_EN
dp_peer_rx_reo_shared_qaddr_delete(struct dp_soc * soc,struct dp_peer * peer)2600 void dp_peer_rx_reo_shared_qaddr_delete(struct dp_soc *soc,
2601 struct dp_peer *peer)
2602 {
2603 uint8_t tid;
2604 uint16_t peer_id;
2605 uint32_t max_list_size;
2606
2607 max_list_size = soc->wlan_cfg_ctx->qref_control_size;
2608
2609 peer_id = peer->peer_id;
2610
2611 if (peer_id > soc->max_peer_id)
2612 return;
2613 if (IS_MLO_DP_LINK_PEER(peer))
2614 return;
2615
2616 if (max_list_size) {
2617 unsigned long curr_ts = qdf_get_system_timestamp();
2618 struct dp_peer *primary_peer = peer;
2619 uint16_t chip_id = 0xFFFF;
2620 uint32_t qref_index;
2621
2622 qref_index = soc->shared_qaddr_del_idx;
2623
2624 soc->list_shared_qaddr_del[qref_index].peer_id =
2625 primary_peer->peer_id;
2626 soc->list_shared_qaddr_del[qref_index].ts_qaddr_del = curr_ts;
2627 soc->list_shared_qaddr_del[qref_index].chip_id = chip_id;
2628 soc->shared_qaddr_del_idx++;
2629
2630 if (soc->shared_qaddr_del_idx == max_list_size)
2631 soc->shared_qaddr_del_idx = 0;
2632 }
2633
2634 if (hal_reo_shared_qaddr_is_enable(soc->hal_soc)) {
2635 for (tid = 0; tid < DP_MAX_TIDS; tid++) {
2636 hal_reo_shared_qaddr_write(soc->hal_soc,
2637 peer_id, tid, 0);
2638 }
2639 }
2640 }
2641 #endif
2642
2643 /**
2644 * dp_peer_find_add_id() - map peer_id with peer
2645 * @soc: soc handle
2646 * @peer_mac_addr: peer mac address
2647 * @peer_id: peer id to be mapped
2648 * @hw_peer_id: HW ast index
2649 * @vdev_id: vdev_id
2650 * @peer_type: peer type (link or MLD)
2651 *
2652 * return: peer in success
2653 * NULL in failure
2654 */
dp_peer_find_add_id(struct dp_soc * soc,uint8_t * peer_mac_addr,uint16_t peer_id,uint16_t hw_peer_id,uint8_t vdev_id,enum cdp_peer_type peer_type)2655 static inline struct dp_peer *dp_peer_find_add_id(struct dp_soc *soc,
2656 uint8_t *peer_mac_addr, uint16_t peer_id, uint16_t hw_peer_id,
2657 uint8_t vdev_id, enum cdp_peer_type peer_type)
2658 {
2659 struct dp_peer *peer;
2660 struct cdp_peer_info peer_info = { 0 };
2661
2662 QDF_ASSERT(peer_id <= soc->max_peer_id);
2663 /* check if there's already a peer object with this MAC address */
2664 DP_PEER_INFO_PARAMS_INIT(&peer_info, vdev_id, peer_mac_addr,
2665 false, peer_type);
2666 peer = dp_peer_hash_find_wrapper(soc, &peer_info, DP_MOD_ID_CONFIG);
2667 dp_peer_debug("%pK: peer %pK ID %d vid %d mac " QDF_MAC_ADDR_FMT,
2668 soc, peer, peer_id, vdev_id,
2669 QDF_MAC_ADDR_REF(peer_mac_addr));
2670
2671 if (peer) {
2672 /* peer's ref count was already incremented by
2673 * peer_find_hash_find
2674 */
2675 dp_peer_info("%pK: ref_cnt: %d", soc,
2676 qdf_atomic_read(&peer->ref_cnt));
2677
2678 /*
2679 * if peer is in logical delete CP triggered delete before map
2680 * is received ignore this event
2681 */
2682 if (dp_peer_state_cmp(peer, DP_PEER_STATE_LOGICAL_DELETE)) {
2683 dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
2684 dp_alert("Peer %pK["QDF_MAC_ADDR_FMT"] logical delete state vid %d",
2685 peer, QDF_MAC_ADDR_REF(peer_mac_addr),
2686 vdev_id);
2687 return NULL;
2688 }
2689
2690 if (peer->peer_id == HTT_INVALID_PEER) {
2691 if (!IS_MLO_DP_MLD_PEER(peer))
2692 dp_monitor_peer_tid_peer_id_update(soc, peer,
2693 peer_id);
2694 } else {
2695 dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
2696 QDF_ASSERT(0);
2697 return NULL;
2698 }
2699 dp_peer_find_id_to_obj_add(soc, peer, peer_id);
2700 if (soc->arch_ops.dp_partner_chips_map)
2701 soc->arch_ops.dp_partner_chips_map(soc, peer, peer_id);
2702
2703 dp_peer_update_state(soc, peer, DP_PEER_STATE_ACTIVE);
2704 return peer;
2705 }
2706
2707 return NULL;
2708 }
2709
2710 #ifdef WLAN_FEATURE_11BE_MLO
2711 #ifdef DP_USE_REDUCED_PEER_ID_FIELD_WIDTH
dp_gen_ml_peer_id(struct dp_soc * soc,uint16_t peer_id)2712 uint16_t dp_gen_ml_peer_id(struct dp_soc *soc, uint16_t peer_id)
2713 {
2714 return ((peer_id & soc->peer_id_mask) | (1 << soc->peer_id_shift));
2715 }
2716 #else
dp_gen_ml_peer_id(struct dp_soc * soc,uint16_t peer_id)2717 uint16_t dp_gen_ml_peer_id(struct dp_soc *soc, uint16_t peer_id)
2718 {
2719 return (peer_id | (1 << HTT_RX_PEER_META_DATA_V1_ML_PEER_VALID_S));
2720 }
2721 #endif
2722
2723 QDF_STATUS
dp_rx_mlo_peer_map_handler(struct dp_soc * soc,uint16_t peer_id,uint8_t * peer_mac_addr,struct dp_mlo_flow_override_info * mlo_flow_info,struct dp_mlo_link_info * mlo_link_info)2724 dp_rx_mlo_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
2725 uint8_t *peer_mac_addr,
2726 struct dp_mlo_flow_override_info *mlo_flow_info,
2727 struct dp_mlo_link_info *mlo_link_info)
2728 {
2729 struct dp_peer *peer = NULL;
2730 uint16_t hw_peer_id = mlo_flow_info[0].ast_idx;
2731 uint16_t ast_hash = mlo_flow_info[0].cache_set_num;
2732 uint8_t vdev_id = 0;
2733 uint8_t is_wds = 0;
2734 int i;
2735 uint16_t ml_peer_id = dp_gen_ml_peer_id(soc, peer_id);
2736 enum cdp_txrx_ast_entry_type type = CDP_TXRX_AST_TYPE_STATIC;
2737 QDF_STATUS err = QDF_STATUS_SUCCESS;
2738 struct dp_soc *primary_soc = NULL;
2739
2740 dp_cfg_event_record_peer_map_unmap_evt(soc, DP_CFG_EVENT_MLO_PEER_MAP,
2741 NULL, peer_mac_addr,
2742 1, peer_id, ml_peer_id, 0,
2743 vdev_id);
2744
2745 dp_info("mlo_peer_map_event (soc:%pK): peer_id %d ml_peer_id %d, peer_mac "QDF_MAC_ADDR_FMT,
2746 soc, peer_id, ml_peer_id,
2747 QDF_MAC_ADDR_REF(peer_mac_addr));
2748
2749 DP_STATS_INC(soc, t2h_msg_stats.ml_peer_map, 1);
2750 /* Get corresponding vdev ID for the peer based
2751 * on chip ID obtained from mlo peer_map event
2752 */
2753 for (i = 0; i < DP_MAX_MLO_LINKS; i++) {
2754 if (mlo_link_info[i].peer_chip_id == dp_get_chip_id(soc)) {
2755 vdev_id = mlo_link_info[i].vdev_id;
2756 break;
2757 }
2758 }
2759
2760 peer = dp_peer_find_add_id(soc, peer_mac_addr, ml_peer_id,
2761 hw_peer_id, vdev_id, CDP_MLD_PEER_TYPE);
2762 if (peer) {
2763 if (wlan_op_mode_sta == peer->vdev->opmode &&
2764 qdf_mem_cmp(peer->mac_addr.raw,
2765 peer->vdev->mld_mac_addr.raw,
2766 QDF_MAC_ADDR_SIZE) != 0) {
2767 dp_peer_info("%pK: STA vdev bss_peer!!!!", soc);
2768 peer->bss_peer = 1;
2769 if (peer->txrx_peer)
2770 peer->txrx_peer->bss_peer = 1;
2771 }
2772
2773 if (peer->vdev->opmode == wlan_op_mode_sta) {
2774 peer->vdev->bss_ast_hash = ast_hash;
2775 peer->vdev->bss_ast_idx = hw_peer_id;
2776 }
2777
2778 /* Add ast entry incase self ast entry is
2779 * deleted due to DP CP sync issue
2780 *
2781 * self_ast_entry is modified in peer create
2782 * and peer unmap path which cannot run in
2783 * parllel with peer map, no lock need before
2784 * referring it
2785 */
2786 if (!peer->self_ast_entry) {
2787 dp_info("Add self ast from map "QDF_MAC_ADDR_FMT,
2788 QDF_MAC_ADDR_REF(peer_mac_addr));
2789 dp_peer_add_ast(soc, peer,
2790 peer_mac_addr,
2791 type, 0);
2792 }
2793 /* If peer setup and hence rx_tid setup got called
2794 * before htt peer map then Qref write to LUT did not
2795 * happen in rx_tid setup as peer_id was invalid.
2796 * So defer Qref write to peer map handler. Check if
2797 * rx_tid qdesc for tid 0 is already setup and perform
2798 * qref write to LUT for Tid 0 and 16.
2799 *
2800 * Peer map could be obtained on assoc link, hence
2801 * change to primary link's soc.
2802 */
2803 primary_soc = peer->vdev->pdev->soc;
2804 if (hal_reo_shared_qaddr_is_enable(primary_soc->hal_soc) &&
2805 peer->rx_tid[0].hw_qdesc_vaddr_unaligned) {
2806 hal_reo_shared_qaddr_write(primary_soc->hal_soc,
2807 ml_peer_id,
2808 0,
2809 peer->rx_tid[0].hw_qdesc_paddr);
2810 hal_reo_shared_qaddr_write(primary_soc->hal_soc,
2811 ml_peer_id,
2812 DP_NON_QOS_TID,
2813 peer->rx_tid[DP_NON_QOS_TID].hw_qdesc_paddr);
2814 }
2815 }
2816
2817 if (!primary_soc)
2818 primary_soc = soc;
2819
2820 err = dp_peer_map_ast(soc, peer, peer_mac_addr, hw_peer_id,
2821 vdev_id, ast_hash, is_wds);
2822
2823 /*
2824 * If AST offload and host AST DB is enabled, populate AST entries on
2825 * host based on mlo peer map event from FW
2826 */
2827 if (peer && soc->ast_offload_support && soc->host_ast_db_enable) {
2828 dp_peer_host_add_map_ast(primary_soc, ml_peer_id, peer_mac_addr,
2829 hw_peer_id, vdev_id,
2830 ast_hash, is_wds);
2831 }
2832
2833 return err;
2834 }
2835 #endif
2836
2837 #ifdef DP_RX_UDP_OVER_PEER_ROAM
dp_rx_reset_roaming_peer(struct dp_soc * soc,uint8_t vdev_id,uint8_t * peer_mac_addr)2838 void dp_rx_reset_roaming_peer(struct dp_soc *soc, uint8_t vdev_id,
2839 uint8_t *peer_mac_addr)
2840 {
2841 struct dp_vdev *vdev = NULL;
2842
2843 vdev = dp_vdev_get_ref_by_id(soc, vdev_id, DP_MOD_ID_HTT);
2844 if (vdev) {
2845 if (qdf_mem_cmp(vdev->roaming_peer_mac.raw, peer_mac_addr,
2846 QDF_MAC_ADDR_SIZE) == 0) {
2847 vdev->roaming_peer_status =
2848 WLAN_ROAM_PEER_AUTH_STATUS_NONE;
2849 qdf_mem_zero(vdev->roaming_peer_mac.raw,
2850 QDF_MAC_ADDR_SIZE);
2851 }
2852 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT);
2853 }
2854 }
2855 #endif
2856
2857 #ifdef WLAN_SUPPORT_PPEDS
2858 static void
dp_tx_ppeds_cfg_astidx_cache_mapping(struct dp_soc * soc,struct dp_vdev * vdev,bool peer_map)2859 dp_tx_ppeds_cfg_astidx_cache_mapping(struct dp_soc *soc, struct dp_vdev *vdev,
2860 bool peer_map)
2861 {
2862 if (soc->arch_ops.dp_tx_ppeds_cfg_astidx_cache_mapping)
2863 soc->arch_ops.dp_tx_ppeds_cfg_astidx_cache_mapping(soc, vdev,
2864 peer_map);
2865 }
2866 #else
2867 static void
dp_tx_ppeds_cfg_astidx_cache_mapping(struct dp_soc * soc,struct dp_vdev * vdev,bool peer_map)2868 dp_tx_ppeds_cfg_astidx_cache_mapping(struct dp_soc *soc, struct dp_vdev *vdev,
2869 bool peer_map)
2870 {
2871 }
2872 #endif
2873
2874 QDF_STATUS
dp_rx_peer_map_handler(struct dp_soc * soc,uint16_t peer_id,uint16_t hw_peer_id,uint8_t vdev_id,uint8_t * peer_mac_addr,uint16_t ast_hash,uint8_t is_wds)2875 dp_rx_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
2876 uint16_t hw_peer_id, uint8_t vdev_id,
2877 uint8_t *peer_mac_addr, uint16_t ast_hash,
2878 uint8_t is_wds)
2879 {
2880 struct dp_peer *peer = NULL;
2881 struct dp_vdev *vdev = NULL;
2882 enum cdp_txrx_ast_entry_type type = CDP_TXRX_AST_TYPE_STATIC;
2883 QDF_STATUS err = QDF_STATUS_SUCCESS;
2884
2885 dp_cfg_event_record_peer_map_unmap_evt(soc, DP_CFG_EVENT_PEER_MAP,
2886 NULL, peer_mac_addr, 1, peer_id,
2887 0, 0, vdev_id);
2888 dp_info("peer_map_event (soc:%pK): peer_id %d, hw_peer_id %d, peer_mac "QDF_MAC_ADDR_FMT", vdev_id %d",
2889 soc, peer_id, hw_peer_id,
2890 QDF_MAC_ADDR_REF(peer_mac_addr), vdev_id);
2891 DP_STATS_INC(soc, t2h_msg_stats.peer_map, 1);
2892
2893 /* Peer map event for WDS ast entry get the peer from
2894 * obj map
2895 */
2896 if (is_wds) {
2897 if (!soc->ast_offload_support) {
2898 peer = dp_peer_get_ref_by_id(soc, peer_id,
2899 DP_MOD_ID_HTT);
2900
2901 err = dp_peer_map_ast(soc, peer, peer_mac_addr,
2902 hw_peer_id,
2903 vdev_id, ast_hash, is_wds);
2904 if (peer)
2905 dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
2906 }
2907 } else {
2908 /*
2909 * It's the responsibility of the CP and FW to ensure
2910 * that peer is created successfully. Ideally DP should
2911 * not hit the below condition for directly associated
2912 * peers.
2913 */
2914 if ((!soc->ast_offload_support) && ((hw_peer_id < 0) ||
2915 (hw_peer_id >=
2916 wlan_cfg_get_max_ast_idx(soc->wlan_cfg_ctx)))) {
2917 dp_peer_err("%pK: invalid hw_peer_id: %d", soc, hw_peer_id);
2918 qdf_assert_always(0);
2919 }
2920
2921 peer = dp_peer_find_add_id(soc, peer_mac_addr, peer_id,
2922 hw_peer_id, vdev_id,
2923 CDP_LINK_PEER_TYPE);
2924
2925 if (peer) {
2926 bool peer_map = true;
2927
2928 /* Updating ast_hash and ast_idx in peer level */
2929 peer->ast_hash = ast_hash;
2930 peer->ast_idx = hw_peer_id;
2931 vdev = peer->vdev;
2932 /* Only check for STA Vdev and peer is not for TDLS */
2933 if (wlan_op_mode_sta == vdev->opmode &&
2934 !peer->is_tdls_peer) {
2935 if (qdf_mem_cmp(peer->mac_addr.raw,
2936 vdev->mac_addr.raw,
2937 QDF_MAC_ADDR_SIZE) != 0) {
2938 dp_info("%pK: STA vdev bss_peer", soc);
2939 peer->bss_peer = 1;
2940 if (peer->txrx_peer)
2941 peer->txrx_peer->bss_peer = 1;
2942 }
2943
2944 dp_info("bss ast_hash 0x%x, ast_index 0x%x",
2945 ast_hash, hw_peer_id);
2946 vdev->bss_ast_hash = ast_hash;
2947 vdev->bss_ast_idx = hw_peer_id;
2948
2949 dp_tx_ppeds_cfg_astidx_cache_mapping(soc, vdev,
2950 peer_map);
2951 }
2952
2953 /* Add ast entry incase self ast entry is
2954 * deleted due to DP CP sync issue
2955 *
2956 * self_ast_entry is modified in peer create
2957 * and peer unmap path which cannot run in
2958 * parllel with peer map, no lock need before
2959 * referring it
2960 */
2961 if (!soc->ast_offload_support &&
2962 !peer->self_ast_entry) {
2963 dp_info("Add self ast from map "QDF_MAC_ADDR_FMT,
2964 QDF_MAC_ADDR_REF(peer_mac_addr));
2965 dp_peer_add_ast(soc, peer,
2966 peer_mac_addr,
2967 type, 0);
2968 }
2969
2970 /* If peer setup and hence rx_tid setup got called
2971 * before htt peer map then Qref write to LUT did
2972 * not happen in rx_tid setup as peer_id was invalid.
2973 * So defer Qref write to peer map handler. Check if
2974 * rx_tid qdesc for tid 0 is already setup perform qref
2975 * write to LUT for Tid 0 and 16.
2976 */
2977 if (hal_reo_shared_qaddr_is_enable(soc->hal_soc) &&
2978 peer->rx_tid[0].hw_qdesc_vaddr_unaligned &&
2979 !IS_MLO_DP_LINK_PEER(peer)) {
2980 add_entry_write_list(soc, peer, 0);
2981 hal_reo_shared_qaddr_write(soc->hal_soc,
2982 peer_id,
2983 0,
2984 peer->rx_tid[0].hw_qdesc_paddr);
2985 add_entry_write_list(soc, peer, DP_NON_QOS_TID);
2986 hal_reo_shared_qaddr_write(soc->hal_soc,
2987 peer_id,
2988 DP_NON_QOS_TID,
2989 peer->rx_tid[DP_NON_QOS_TID].hw_qdesc_paddr);
2990 }
2991 }
2992
2993 err = dp_peer_map_ast(soc, peer, peer_mac_addr, hw_peer_id,
2994 vdev_id, ast_hash, is_wds);
2995 }
2996
2997 dp_rx_reset_roaming_peer(soc, vdev_id, peer_mac_addr);
2998
2999 /*
3000 * If AST offload and host AST DB is enabled, populate AST entries on
3001 * host based on peer map event from FW
3002 */
3003 if (soc->ast_offload_support && soc->host_ast_db_enable) {
3004 dp_peer_host_add_map_ast(soc, peer_id, peer_mac_addr,
3005 hw_peer_id, vdev_id,
3006 ast_hash, is_wds);
3007 }
3008
3009 return err;
3010 }
3011
3012 void
dp_rx_peer_unmap_handler(struct dp_soc * soc,uint16_t peer_id,uint8_t vdev_id,uint8_t * mac_addr,uint8_t is_wds,uint32_t free_wds_count)3013 dp_rx_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id,
3014 uint8_t vdev_id, uint8_t *mac_addr,
3015 uint8_t is_wds, uint32_t free_wds_count)
3016 {
3017 struct dp_peer *peer;
3018 struct dp_vdev *vdev = NULL;
3019
3020 DP_STATS_INC(soc, t2h_msg_stats.peer_unmap, 1);
3021
3022 /*
3023 * If FW AST offload is enabled and host AST DB is enabled,
3024 * the AST entries are created during peer map from FW.
3025 */
3026 if (soc->ast_offload_support && is_wds) {
3027 if (!soc->host_ast_db_enable)
3028 return;
3029 }
3030
3031 peer = __dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_HTT);
3032
3033 /*
3034 * Currently peer IDs are assigned for vdevs as well as peers.
3035 * If the peer ID is for a vdev, then the peer pointer stored
3036 * in peer_id_to_obj_map will be NULL.
3037 */
3038 if (!peer) {
3039 dp_err("Received unmap event for invalid peer_id %u",
3040 peer_id);
3041 DP_STATS_INC(soc, t2h_msg_stats.invalid_peer_unmap, 1);
3042 return;
3043 }
3044
3045 vdev = peer->vdev;
3046
3047 if (peer->txrx_peer) {
3048 struct cdp_txrx_peer_params_update params = {0};
3049
3050 params.vdev_id = vdev->vdev_id;
3051 params.peer_mac = peer->mac_addr.raw;
3052 params.chip_id = dp_get_chip_id(soc);
3053 params.pdev_id = vdev->pdev->pdev_id;
3054
3055 dp_wdi_event_handler(WDI_EVENT_PEER_UNMAP, soc,
3056 (void *)¶ms, peer_id,
3057 WDI_NO_VAL, vdev->pdev->pdev_id);
3058 }
3059
3060 /*
3061 * In scenario where assoc peer soc id is different from
3062 * primary soc id, reset the soc to point to primary psoc.
3063 * Since map is received on primary soc, the unmap should
3064 * also delete ast on primary soc.
3065 */
3066 soc = peer->vdev->pdev->soc;
3067
3068 /* If V2 Peer map messages are enabled AST entry has to be
3069 * freed here
3070 */
3071 if (is_wds) {
3072 if (!dp_peer_ast_free_entry_by_mac(soc, peer, vdev_id,
3073 mac_addr)) {
3074 dp_peer_unmap_ipa_evt(soc, peer_id, vdev_id, mac_addr);
3075 dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
3076 return;
3077 }
3078
3079 dp_alert("AST entry not found with peer %pK peer_id %u peer_mac "QDF_MAC_ADDR_FMT" mac_addr "QDF_MAC_ADDR_FMT" vdev_id %u next_hop %u",
3080 peer, peer->peer_id,
3081 QDF_MAC_ADDR_REF(peer->mac_addr.raw),
3082 QDF_MAC_ADDR_REF(mac_addr), vdev_id,
3083 is_wds);
3084
3085 dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
3086 return;
3087 }
3088
3089 dp_peer_clean_wds_entries(soc, peer, free_wds_count);
3090
3091 dp_cfg_event_record_peer_map_unmap_evt(soc, DP_CFG_EVENT_PEER_UNMAP,
3092 peer, mac_addr, 0, peer_id,
3093 0, 0, vdev_id);
3094 dp_info("peer_unmap_event (soc:%pK) peer_id %d peer %pK",
3095 soc, peer_id, peer);
3096
3097 /* Clear entries in Qref LUT */
3098 /* TODO: Check if this is to be called from
3099 * dp_peer_delete for MLO case if there is race between
3100 * new peer id assignment and still not having received
3101 * peer unmap for MLD peer with same peer id.
3102 */
3103 dp_peer_rx_reo_shared_qaddr_delete(soc, peer);
3104
3105 vdev = peer->vdev;
3106
3107 /* only if peer is in STA mode and not tdls peer */
3108 if (wlan_op_mode_sta == vdev->opmode && !peer->is_tdls_peer) {
3109 bool peer_map = false;
3110
3111 dp_tx_ppeds_cfg_astidx_cache_mapping(soc, vdev, peer_map);
3112 }
3113
3114 dp_peer_find_id_to_obj_remove(soc, peer_id);
3115
3116 if (soc->arch_ops.dp_partner_chips_unmap)
3117 soc->arch_ops.dp_partner_chips_unmap(soc, peer_id);
3118
3119 peer->peer_id = HTT_INVALID_PEER;
3120
3121 /*
3122 * Reset ast flow mapping table
3123 */
3124 if (!soc->ast_offload_support)
3125 dp_peer_reset_flowq_map(peer);
3126
3127 if (soc->cdp_soc.ol_ops->peer_unmap_event) {
3128 soc->cdp_soc.ol_ops->peer_unmap_event(soc->ctrl_psoc,
3129 peer_id, vdev_id, mac_addr);
3130 }
3131
3132 dp_update_vdev_stats_on_peer_unmap(vdev, peer);
3133
3134 dp_peer_update_state(soc, peer, DP_PEER_STATE_INACTIVE);
3135 dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
3136 /*
3137 * Remove a reference to the peer.
3138 * If there are no more references, delete the peer object.
3139 */
3140 dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
3141 }
3142
3143 #if defined(WLAN_FEATURE_11BE_MLO) && defined(DP_MLO_LINK_STATS_SUPPORT)
dp_freq_to_band(qdf_freq_t freq)3144 enum dp_bands dp_freq_to_band(qdf_freq_t freq)
3145 {
3146 if (REG_IS_24GHZ_CH_FREQ(freq))
3147 return DP_BAND_2GHZ;
3148 else if (REG_IS_5GHZ_FREQ(freq) || REG_IS_49GHZ_FREQ(freq))
3149 return DP_BAND_5GHZ;
3150 else if (REG_IS_6GHZ_FREQ(freq))
3151 return DP_BAND_6GHZ;
3152 return DP_BAND_INVALID;
3153 }
3154
dp_map_link_id_band(struct dp_peer * peer)3155 void dp_map_link_id_band(struct dp_peer *peer)
3156 {
3157 struct dp_txrx_peer *txrx_peer = NULL;
3158 enum dp_bands band;
3159
3160 txrx_peer = dp_get_txrx_peer(peer);
3161 if (txrx_peer) {
3162 band = dp_freq_to_band(peer->freq);
3163 txrx_peer->band[peer->link_id + 1] = band;
3164 dp_info("Band(Freq: %u): %u mapped to Link ID: %u",
3165 peer->freq, band, peer->link_id);
3166 } else {
3167 dp_info("txrx_peer NULL for peer: " QDF_MAC_ADDR_FMT,
3168 QDF_MAC_ADDR_REF(peer->mac_addr.raw));
3169 }
3170 }
3171
3172 QDF_STATUS
dp_rx_peer_ext_evt(struct dp_soc * soc,struct dp_peer_ext_evt_info * info)3173 dp_rx_peer_ext_evt(struct dp_soc *soc, struct dp_peer_ext_evt_info *info)
3174 {
3175 struct dp_peer *peer = NULL;
3176 struct cdp_peer_info peer_info = { 0 };
3177
3178 QDF_ASSERT(info->peer_id <= soc->max_peer_id);
3179
3180 DP_PEER_INFO_PARAMS_INIT(&peer_info, info->vdev_id, info->peer_mac_addr,
3181 false, CDP_LINK_PEER_TYPE);
3182 peer = dp_peer_hash_find_wrapper(soc, &peer_info, DP_MOD_ID_CONFIG);
3183
3184 if (!peer) {
3185 dp_err("peer NULL, id %u, MAC " QDF_MAC_ADDR_FMT ", vdev_id %u",
3186 info->peer_id, QDF_MAC_ADDR_REF(info->peer_mac_addr),
3187 info->vdev_id);
3188
3189 return QDF_STATUS_E_FAILURE;
3190 }
3191
3192 peer->link_id = info->link_id;
3193 peer->link_id_valid = info->link_id_valid;
3194
3195 if (peer->freq)
3196 dp_map_link_id_band(peer);
3197
3198 dp_peer_unref_delete(peer, DP_MOD_ID_CONFIG);
3199
3200 return QDF_STATUS_SUCCESS;
3201 }
3202 #endif
3203 #ifdef WLAN_FEATURE_11BE_MLO
dp_rx_mlo_peer_unmap_handler(struct dp_soc * soc,uint16_t peer_id)3204 void dp_rx_mlo_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id)
3205 {
3206 uint16_t ml_peer_id = dp_gen_ml_peer_id(soc, peer_id);
3207 uint8_t mac_addr[QDF_MAC_ADDR_SIZE] = {0};
3208 uint8_t vdev_id = DP_VDEV_ALL;
3209 uint8_t is_wds = 0;
3210
3211 dp_cfg_event_record_peer_map_unmap_evt(soc, DP_CFG_EVENT_MLO_PEER_UNMAP,
3212 NULL, mac_addr, 0, peer_id,
3213 0, 0, vdev_id);
3214 dp_info("MLO peer_unmap_event (soc:%pK) peer_id %d",
3215 soc, peer_id);
3216 DP_STATS_INC(soc, t2h_msg_stats.ml_peer_unmap, 1);
3217
3218 dp_rx_peer_unmap_handler(soc, ml_peer_id, vdev_id,
3219 mac_addr, is_wds,
3220 DP_PEER_WDS_COUNT_INVALID);
3221 }
3222 #endif
3223
3224 #ifndef AST_OFFLOAD_ENABLE
3225 void
dp_peer_find_detach(struct dp_soc * soc)3226 dp_peer_find_detach(struct dp_soc *soc)
3227 {
3228 dp_soc_wds_detach(soc);
3229 dp_peer_find_map_detach(soc);
3230 dp_peer_find_hash_detach(soc);
3231 dp_peer_ast_hash_detach(soc);
3232 dp_peer_ast_table_detach(soc);
3233 dp_peer_mec_hash_detach(soc);
3234 }
3235 #else
3236 void
dp_peer_find_detach(struct dp_soc * soc)3237 dp_peer_find_detach(struct dp_soc *soc)
3238 {
3239 dp_peer_find_map_detach(soc);
3240 dp_peer_find_hash_detach(soc);
3241 }
3242 #endif
3243
dp_peer_rx_init(struct dp_pdev * pdev,struct dp_peer * peer)3244 void dp_peer_rx_init(struct dp_pdev *pdev, struct dp_peer *peer)
3245 {
3246 dp_peer_rx_tid_setup(peer);
3247
3248 peer->active_ba_session_cnt = 0;
3249 peer->hw_buffer_size = 0;
3250 peer->kill_256_sessions = 0;
3251
3252 /*
3253 * Set security defaults: no PN check, no security. The target may
3254 * send a HTT SEC_IND message to overwrite these defaults.
3255 */
3256 if (peer->txrx_peer)
3257 peer->txrx_peer->security[dp_sec_ucast].sec_type =
3258 peer->txrx_peer->security[dp_sec_mcast].sec_type =
3259 cdp_sec_type_none;
3260 }
3261
3262 #ifdef WLAN_FEATURE_11BE_MLO
dp_peer_rx_init_reorder_queue(struct dp_pdev * pdev,struct dp_peer * peer)3263 static void dp_peer_rx_init_reorder_queue(struct dp_pdev *pdev,
3264 struct dp_peer *peer)
3265 {
3266 struct dp_soc *soc = pdev->soc;
3267 struct dp_peer *mld_peer = DP_GET_MLD_PEER_FROM_PEER(peer);
3268 struct dp_rx_tid *rx_tid = NULL;
3269 uint32_t ba_window_size, tid;
3270 QDF_STATUS status;
3271
3272 if (dp_get_peer_vdev_roaming_in_progress(peer))
3273 return;
3274
3275 tid = DP_NON_QOS_TID;
3276 rx_tid = &mld_peer->rx_tid[tid];
3277 ba_window_size = rx_tid->ba_status == DP_RX_BA_ACTIVE ?
3278 rx_tid->ba_win_size : 1;
3279 status = dp_peer_rx_reorder_queue_setup(soc, peer, BIT(tid), ba_window_size);
3280 /* Do not return on failure, continue for other tids. */
3281 dp_info("peer %pK " QDF_MAC_ADDR_FMT " type %d setup tid %d ba_win_size %d%s",
3282 peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw),
3283 peer->peer_type, tid, ba_window_size,
3284 QDF_IS_STATUS_SUCCESS(status) ? " SUCCESS" : " FAILED");
3285
3286 for (tid = 0; tid < DP_MAX_TIDS - 1; tid++) {
3287 rx_tid = &mld_peer->rx_tid[tid];
3288 ba_window_size = rx_tid->ba_status == DP_RX_BA_ACTIVE ?
3289 rx_tid->ba_win_size : 1;
3290 status = dp_peer_rx_reorder_queue_setup(soc, peer, BIT(tid),
3291 ba_window_size);
3292 /* Do not return on failure, continue for other tids. */
3293 dp_info("peer %pK " QDF_MAC_ADDR_FMT " type %d setup tid %d ba_win_size %d%s",
3294 peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw),
3295 peer->peer_type, tid, ba_window_size,
3296 QDF_IS_STATUS_SUCCESS(status) ? " SUCCESS" : " FAILED");
3297 }
3298 }
3299
dp_peer_rx_init_wrapper(struct dp_pdev * pdev,struct dp_peer * peer,struct cdp_peer_setup_info * setup_info)3300 void dp_peer_rx_init_wrapper(struct dp_pdev *pdev, struct dp_peer *peer,
3301 struct cdp_peer_setup_info *setup_info)
3302 {
3303 if (setup_info && !setup_info->is_first_link)
3304 dp_peer_rx_init_reorder_queue(pdev, peer);
3305 else
3306 dp_peer_rx_init(pdev, peer);
3307 }
3308 #else
dp_peer_rx_init_wrapper(struct dp_pdev * pdev,struct dp_peer * peer,struct cdp_peer_setup_info * setup_info)3309 void dp_peer_rx_init_wrapper(struct dp_pdev *pdev, struct dp_peer *peer,
3310 struct cdp_peer_setup_info *setup_info)
3311 {
3312 dp_peer_rx_init(pdev, peer);
3313 }
3314 #endif
3315
dp_peer_cleanup(struct dp_vdev * vdev,struct dp_peer * peer)3316 void dp_peer_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
3317 {
3318 enum wlan_op_mode vdev_opmode;
3319 uint8_t vdev_mac_addr[QDF_MAC_ADDR_SIZE];
3320 struct dp_pdev *pdev = vdev->pdev;
3321 struct dp_soc *soc = pdev->soc;
3322
3323 /* save vdev related member in case vdev freed */
3324 vdev_opmode = vdev->opmode;
3325
3326 if (!IS_MLO_DP_MLD_PEER(peer))
3327 dp_monitor_peer_tx_cleanup(vdev, peer);
3328
3329 if (vdev_opmode != wlan_op_mode_monitor)
3330 /* cleanup the Rx reorder queues for this peer */
3331 dp_peer_rx_cleanup(vdev, peer);
3332
3333 dp_peer_rx_tids_destroy(peer);
3334
3335 if (IS_MLO_DP_LINK_PEER(peer))
3336 dp_link_peer_del_mld_peer(peer);
3337 if (IS_MLO_DP_MLD_PEER(peer))
3338 dp_mld_peer_deinit_link_peers_info(peer);
3339
3340 qdf_mem_copy(vdev_mac_addr, vdev->mac_addr.raw,
3341 QDF_MAC_ADDR_SIZE);
3342
3343 if (soc->cdp_soc.ol_ops->peer_unref_delete)
3344 soc->cdp_soc.ol_ops->peer_unref_delete(
3345 soc->ctrl_psoc,
3346 vdev->pdev->pdev_id,
3347 peer->mac_addr.raw, vdev_mac_addr,
3348 vdev_opmode);
3349 }
3350
3351 QDF_STATUS
dp_set_key_sec_type_wifi3(struct cdp_soc_t * soc,uint8_t vdev_id,uint8_t * peer_mac,enum cdp_sec_type sec_type,bool is_unicast)3352 dp_set_key_sec_type_wifi3(struct cdp_soc_t *soc, uint8_t vdev_id,
3353 uint8_t *peer_mac, enum cdp_sec_type sec_type,
3354 bool is_unicast)
3355 {
3356 struct dp_peer *peer =
3357 dp_peer_get_tgt_peer_hash_find((struct dp_soc *)soc,
3358 peer_mac, 0, vdev_id,
3359 DP_MOD_ID_CDP);
3360 int sec_index;
3361
3362 if (!peer) {
3363 dp_peer_debug("%pK: Peer is NULL!\n", soc);
3364 return QDF_STATUS_E_FAILURE;
3365 }
3366
3367 if (!peer->txrx_peer) {
3368 dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3369 dp_peer_debug("%pK: txrx peer is NULL!\n", soc);
3370 return QDF_STATUS_E_FAILURE;
3371 }
3372
3373 dp_peer_info("%pK: key sec spec for peer %pK " QDF_MAC_ADDR_FMT ": %s key of type %d",
3374 soc, peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw),
3375 is_unicast ? "ucast" : "mcast", sec_type);
3376
3377 sec_index = is_unicast ? dp_sec_ucast : dp_sec_mcast;
3378 peer->txrx_peer->security[sec_index].sec_type = sec_type;
3379
3380 dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3381
3382 return QDF_STATUS_SUCCESS;
3383 }
3384
3385 void
dp_rx_sec_ind_handler(struct dp_soc * soc,uint16_t peer_id,enum cdp_sec_type sec_type,int is_unicast,u_int32_t * michael_key,u_int32_t * rx_pn)3386 dp_rx_sec_ind_handler(struct dp_soc *soc, uint16_t peer_id,
3387 enum cdp_sec_type sec_type, int is_unicast,
3388 u_int32_t *michael_key,
3389 u_int32_t *rx_pn)
3390 {
3391 struct dp_peer *peer;
3392 struct dp_txrx_peer *txrx_peer;
3393 int sec_index;
3394
3395 peer = dp_peer_get_ref_by_id(soc, peer_id, DP_MOD_ID_HTT);
3396 if (!peer) {
3397 dp_peer_err("Couldn't find peer from ID %d - skipping security inits",
3398 peer_id);
3399 return;
3400 }
3401 txrx_peer = dp_get_txrx_peer(peer);
3402 if (!txrx_peer) {
3403 dp_peer_err("Couldn't find txrx peer from ID %d - skipping security inits",
3404 peer_id);
3405 return;
3406 }
3407
3408 dp_peer_info("%pK: sec spec for peer %pK " QDF_MAC_ADDR_FMT ": %s key of type %d",
3409 soc, peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw),
3410 is_unicast ? "ucast" : "mcast", sec_type);
3411 sec_index = is_unicast ? dp_sec_ucast : dp_sec_mcast;
3412
3413 peer->txrx_peer->security[sec_index].sec_type = sec_type;
3414 #ifdef notyet /* TODO: See if this is required for defrag support */
3415 /* michael key only valid for TKIP, but for simplicity,
3416 * copy it anyway
3417 */
3418 qdf_mem_copy(
3419 &peer->txrx_peer->security[sec_index].michael_key[0],
3420 michael_key,
3421 sizeof(peer->txrx_peer->security[sec_index].michael_key));
3422 #ifdef BIG_ENDIAN_HOST
3423 OL_IF_SWAPBO(peer->txrx_peer->security[sec_index].michael_key[0],
3424 sizeof(peer->txrx_peer->security[sec_index].michael_key));
3425 #endif /* BIG_ENDIAN_HOST */
3426 #endif
3427
3428 #ifdef notyet /* TODO: Check if this is required for wifi3.0 */
3429 if (sec_type != cdp_sec_type_wapi) {
3430 qdf_mem_zero(peer->tids_last_pn_valid, _EXT_TIDS);
3431 } else {
3432 for (i = 0; i < DP_MAX_TIDS; i++) {
3433 /*
3434 * Setting PN valid bit for WAPI sec_type,
3435 * since WAPI PN has to be started with predefined value
3436 */
3437 peer->tids_last_pn_valid[i] = 1;
3438 qdf_mem_copy(
3439 (u_int8_t *) &peer->tids_last_pn[i],
3440 (u_int8_t *) rx_pn, sizeof(union htt_rx_pn_t));
3441 peer->tids_last_pn[i].pn128[1] =
3442 qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[1]);
3443 peer->tids_last_pn[i].pn128[0] =
3444 qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[0]);
3445 }
3446 }
3447 #endif
3448 /* TODO: Update HW TID queue with PN check parameters (pn type for
3449 * all security types and last pn for WAPI) once REO command API
3450 * is available
3451 */
3452
3453 dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
3454 }
3455
3456 #ifdef QCA_PEER_EXT_STATS
dp_peer_delay_stats_ctx_alloc(struct dp_soc * soc,struct dp_txrx_peer * txrx_peer)3457 QDF_STATUS dp_peer_delay_stats_ctx_alloc(struct dp_soc *soc,
3458 struct dp_txrx_peer *txrx_peer)
3459 {
3460 uint8_t tid, ctx_id;
3461
3462 if (!soc || !txrx_peer) {
3463 dp_warn("Null soc%pK or peer%pK", soc, txrx_peer);
3464 return QDF_STATUS_E_INVAL;
3465 }
3466
3467 if (!wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx))
3468 return QDF_STATUS_SUCCESS;
3469
3470 /*
3471 * Allocate memory for peer extended stats.
3472 */
3473 txrx_peer->delay_stats =
3474 qdf_mem_malloc(sizeof(struct dp_peer_delay_stats));
3475 if (!txrx_peer->delay_stats) {
3476 dp_err("Peer extended stats obj alloc failed!!");
3477 return QDF_STATUS_E_NOMEM;
3478 }
3479
3480 for (tid = 0; tid < CDP_MAX_DATA_TIDS; tid++) {
3481 for (ctx_id = 0; ctx_id < CDP_MAX_TXRX_CTX; ctx_id++) {
3482 struct cdp_delay_tx_stats *tx_delay =
3483 &txrx_peer->delay_stats->delay_tid_stats[tid][ctx_id].tx_delay;
3484 struct cdp_delay_rx_stats *rx_delay =
3485 &txrx_peer->delay_stats->delay_tid_stats[tid][ctx_id].rx_delay;
3486
3487 dp_hist_init(&tx_delay->tx_swq_delay,
3488 CDP_HIST_TYPE_SW_ENQEUE_DELAY);
3489 dp_hist_init(&tx_delay->hwtx_delay,
3490 CDP_HIST_TYPE_HW_COMP_DELAY);
3491 dp_hist_init(&rx_delay->to_stack_delay,
3492 CDP_HIST_TYPE_REAP_STACK);
3493 }
3494 }
3495
3496 return QDF_STATUS_SUCCESS;
3497 }
3498
dp_peer_delay_stats_ctx_dealloc(struct dp_soc * soc,struct dp_txrx_peer * txrx_peer)3499 void dp_peer_delay_stats_ctx_dealloc(struct dp_soc *soc,
3500 struct dp_txrx_peer *txrx_peer)
3501 {
3502 if (!txrx_peer) {
3503 dp_warn("peer_ext dealloc failed due to NULL peer object");
3504 return;
3505 }
3506
3507 if (!wlan_cfg_is_peer_ext_stats_enabled(soc->wlan_cfg_ctx))
3508 return;
3509
3510 if (!txrx_peer->delay_stats)
3511 return;
3512
3513 qdf_mem_free(txrx_peer->delay_stats);
3514 txrx_peer->delay_stats = NULL;
3515 }
3516
dp_peer_delay_stats_ctx_clr(struct dp_txrx_peer * txrx_peer)3517 void dp_peer_delay_stats_ctx_clr(struct dp_txrx_peer *txrx_peer)
3518 {
3519 if (txrx_peer->delay_stats)
3520 qdf_mem_zero(txrx_peer->delay_stats,
3521 sizeof(struct dp_peer_delay_stats));
3522 }
3523 #endif
3524
3525 #ifdef WLAN_PEER_JITTER
dp_peer_jitter_stats_ctx_alloc(struct dp_pdev * pdev,struct dp_txrx_peer * txrx_peer)3526 QDF_STATUS dp_peer_jitter_stats_ctx_alloc(struct dp_pdev *pdev,
3527 struct dp_txrx_peer *txrx_peer)
3528 {
3529 if (!pdev || !txrx_peer) {
3530 dp_warn("Null pdev or peer");
3531 return QDF_STATUS_E_INVAL;
3532 }
3533
3534 if (!wlan_cfg_is_peer_jitter_stats_enabled(pdev->soc->wlan_cfg_ctx))
3535 return QDF_STATUS_SUCCESS;
3536
3537 if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) {
3538 /*
3539 * Allocate memory on per tid basis when nss is enabled
3540 */
3541 txrx_peer->jitter_stats =
3542 qdf_mem_malloc(sizeof(struct cdp_peer_tid_stats)
3543 * DP_MAX_TIDS);
3544 } else {
3545 /*
3546 * Allocate memory on per tid per ring basis
3547 */
3548 txrx_peer->jitter_stats =
3549 qdf_mem_malloc(sizeof(struct cdp_peer_tid_stats)
3550 * DP_MAX_TIDS * CDP_MAX_TXRX_CTX);
3551 }
3552
3553 if (!txrx_peer->jitter_stats) {
3554 dp_warn("Jitter stats obj alloc failed!!");
3555 return QDF_STATUS_E_NOMEM;
3556 }
3557
3558 return QDF_STATUS_SUCCESS;
3559 }
3560
dp_peer_jitter_stats_ctx_dealloc(struct dp_pdev * pdev,struct dp_txrx_peer * txrx_peer)3561 void dp_peer_jitter_stats_ctx_dealloc(struct dp_pdev *pdev,
3562 struct dp_txrx_peer *txrx_peer)
3563 {
3564 if (!pdev || !txrx_peer) {
3565 dp_warn("Null pdev or peer");
3566 return;
3567 }
3568
3569 if (!wlan_cfg_is_peer_jitter_stats_enabled(pdev->soc->wlan_cfg_ctx))
3570 return;
3571
3572 if (txrx_peer->jitter_stats) {
3573 qdf_mem_free(txrx_peer->jitter_stats);
3574 txrx_peer->jitter_stats = NULL;
3575 }
3576 }
3577
dp_peer_jitter_stats_ctx_clr(struct dp_txrx_peer * txrx_peer)3578 void dp_peer_jitter_stats_ctx_clr(struct dp_txrx_peer *txrx_peer)
3579 {
3580 struct cdp_peer_tid_stats *jitter_stats = NULL;
3581
3582 if (!txrx_peer) {
3583 dp_warn("Null peer");
3584 return;
3585 }
3586
3587 if (!wlan_cfg_is_peer_jitter_stats_enabled(txrx_peer->
3588 vdev->
3589 pdev->soc->wlan_cfg_ctx))
3590 return;
3591
3592 jitter_stats = txrx_peer->jitter_stats;
3593 if (!jitter_stats)
3594 return;
3595
3596 if (wlan_cfg_get_dp_pdev_nss_enabled(txrx_peer->
3597 vdev->pdev->wlan_cfg_ctx))
3598 qdf_mem_zero(jitter_stats,
3599 sizeof(struct cdp_peer_tid_stats) *
3600 DP_MAX_TIDS);
3601
3602 else
3603 qdf_mem_zero(jitter_stats,
3604 sizeof(struct cdp_peer_tid_stats) *
3605 DP_MAX_TIDS * CDP_MAX_TXRX_CTX);
3606
3607 }
3608 #endif
3609
3610 #ifdef DP_PEER_EXTENDED_API
3611 /**
3612 * dp_peer_set_bw() - Set bandwidth and mpdu retry count threshold for peer
3613 * @soc: DP soc handle
3614 * @txrx_peer: Core txrx_peer handle
3615 * @set_bw: enum of bandwidth to be set for this peer connection
3616 *
3617 * Return: None
3618 */
dp_peer_set_bw(struct dp_soc * soc,struct dp_txrx_peer * txrx_peer,enum cdp_peer_bw set_bw)3619 static void dp_peer_set_bw(struct dp_soc *soc, struct dp_txrx_peer *txrx_peer,
3620 enum cdp_peer_bw set_bw)
3621 {
3622 if (!txrx_peer)
3623 return;
3624
3625 txrx_peer->bw = set_bw;
3626
3627 switch (set_bw) {
3628 case CDP_160_MHZ:
3629 case CDP_320_MHZ:
3630 txrx_peer->mpdu_retry_threshold =
3631 soc->wlan_cfg_ctx->mpdu_retry_threshold_2;
3632 break;
3633 case CDP_20_MHZ:
3634 case CDP_40_MHZ:
3635 case CDP_80_MHZ:
3636 default:
3637 txrx_peer->mpdu_retry_threshold =
3638 soc->wlan_cfg_ctx->mpdu_retry_threshold_1;
3639 break;
3640 }
3641
3642 dp_info("Peer id: %u: BW: %u, mpdu retry threshold: %u",
3643 txrx_peer->peer_id, txrx_peer->bw,
3644 txrx_peer->mpdu_retry_threshold);
3645 }
3646
3647 #ifdef WLAN_FEATURE_11BE_MLO
dp_register_peer(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,struct ol_txrx_desc_type * sta_desc)3648 QDF_STATUS dp_register_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
3649 struct ol_txrx_desc_type *sta_desc)
3650 {
3651 struct dp_peer *peer;
3652 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3653
3654 peer = dp_peer_find_hash_find(soc, sta_desc->peer_addr.bytes,
3655 0, DP_VDEV_ALL, DP_MOD_ID_CDP);
3656
3657 if (!peer)
3658 return QDF_STATUS_E_FAULT;
3659
3660 qdf_spin_lock_bh(&peer->peer_info_lock);
3661 peer->state = OL_TXRX_PEER_STATE_CONN;
3662 qdf_spin_unlock_bh(&peer->peer_info_lock);
3663
3664 dp_peer_set_bw(soc, peer->txrx_peer, sta_desc->bw);
3665
3666 dp_rx_flush_rx_cached(peer, false);
3667
3668 if (IS_MLO_DP_LINK_PEER(peer) && peer->first_link) {
3669 dp_peer_info("register for mld peer" QDF_MAC_ADDR_FMT,
3670 QDF_MAC_ADDR_REF(peer->mld_peer->mac_addr.raw));
3671 qdf_spin_lock_bh(&peer->mld_peer->peer_info_lock);
3672 peer->mld_peer->state = peer->state;
3673 qdf_spin_unlock_bh(&peer->mld_peer->peer_info_lock);
3674 dp_rx_flush_rx_cached(peer->mld_peer, false);
3675 }
3676
3677 dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3678
3679 return QDF_STATUS_SUCCESS;
3680 }
3681
dp_peer_state_update(struct cdp_soc_t * soc_hdl,uint8_t * peer_mac,enum ol_txrx_peer_state state)3682 QDF_STATUS dp_peer_state_update(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
3683 enum ol_txrx_peer_state state)
3684 {
3685 struct dp_peer *peer;
3686 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3687
3688 peer = dp_peer_find_hash_find(soc, peer_mac, 0, DP_VDEV_ALL,
3689 DP_MOD_ID_CDP);
3690 if (!peer) {
3691 dp_peer_err("%pK: Failed to find peer[" QDF_MAC_ADDR_FMT "]",
3692 soc, QDF_MAC_ADDR_REF(peer_mac));
3693 return QDF_STATUS_E_FAILURE;
3694 }
3695 peer->state = state;
3696 peer->authorize = (state == OL_TXRX_PEER_STATE_AUTH) ? 1 : 0;
3697
3698 if (peer->txrx_peer)
3699 peer->txrx_peer->authorize = peer->authorize;
3700
3701 dp_peer_info("peer %pK MAC " QDF_MAC_ADDR_FMT " state %d",
3702 peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw),
3703 peer->state);
3704
3705 if (IS_MLO_DP_LINK_PEER(peer) && peer->first_link) {
3706 peer->mld_peer->state = peer->state;
3707 peer->mld_peer->txrx_peer->authorize = peer->authorize;
3708 dp_peer_info("mld peer %pK MAC " QDF_MAC_ADDR_FMT " state %d",
3709 peer->mld_peer,
3710 QDF_MAC_ADDR_REF(peer->mld_peer->mac_addr.raw),
3711 peer->mld_peer->state);
3712 }
3713
3714 /* ref_cnt is incremented inside dp_peer_find_hash_find().
3715 * Decrement it here.
3716 */
3717 dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3718
3719 return QDF_STATUS_SUCCESS;
3720 }
3721 #else
dp_register_peer(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,struct ol_txrx_desc_type * sta_desc)3722 QDF_STATUS dp_register_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
3723 struct ol_txrx_desc_type *sta_desc)
3724 {
3725 struct dp_peer *peer;
3726 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3727
3728 peer = dp_peer_find_hash_find(soc, sta_desc->peer_addr.bytes,
3729 0, DP_VDEV_ALL, DP_MOD_ID_CDP);
3730
3731 if (!peer)
3732 return QDF_STATUS_E_FAULT;
3733
3734 qdf_spin_lock_bh(&peer->peer_info_lock);
3735 peer->state = OL_TXRX_PEER_STATE_CONN;
3736 qdf_spin_unlock_bh(&peer->peer_info_lock);
3737
3738 dp_peer_set_bw(soc, peer->txrx_peer, sta_desc->bw);
3739
3740 dp_rx_flush_rx_cached(peer, false);
3741
3742 dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3743
3744 return QDF_STATUS_SUCCESS;
3745 }
3746
dp_peer_state_update(struct cdp_soc_t * soc_hdl,uint8_t * peer_mac,enum ol_txrx_peer_state state)3747 QDF_STATUS dp_peer_state_update(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
3748 enum ol_txrx_peer_state state)
3749 {
3750 struct dp_peer *peer;
3751 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3752
3753 peer = dp_peer_find_hash_find(soc, peer_mac, 0, DP_VDEV_ALL,
3754 DP_MOD_ID_CDP);
3755 if (!peer) {
3756 dp_peer_err("%pK: Failed to find peer for: [" QDF_MAC_ADDR_FMT "]",
3757 soc, QDF_MAC_ADDR_REF(peer_mac));
3758 return QDF_STATUS_E_FAILURE;
3759 }
3760 peer->state = state;
3761 peer->authorize = (state == OL_TXRX_PEER_STATE_AUTH) ? 1 : 0;
3762
3763 if (peer->txrx_peer)
3764 peer->txrx_peer->authorize = peer->authorize;
3765
3766 dp_info("peer %pK state %d", peer, peer->state);
3767 /* ref_cnt is incremented inside dp_peer_find_hash_find().
3768 * Decrement it here.
3769 */
3770 dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3771
3772 return QDF_STATUS_SUCCESS;
3773 }
3774 #endif
3775
3776 QDF_STATUS
dp_clear_peer(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,struct qdf_mac_addr peer_addr)3777 dp_clear_peer(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
3778 struct qdf_mac_addr peer_addr)
3779 {
3780 struct dp_peer *peer;
3781 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3782
3783 peer = dp_peer_find_hash_find(soc, peer_addr.bytes,
3784 0, DP_VDEV_ALL, DP_MOD_ID_CDP);
3785
3786 if (!peer)
3787 return QDF_STATUS_E_FAULT;
3788 if (!peer->valid) {
3789 dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3790 return QDF_STATUS_E_FAULT;
3791 }
3792
3793 dp_clear_peer_internal(soc, peer);
3794 dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3795 return QDF_STATUS_SUCCESS;
3796 }
3797
dp_get_vdevid(struct cdp_soc_t * soc_hdl,uint8_t * peer_mac,uint8_t * vdev_id)3798 QDF_STATUS dp_get_vdevid(struct cdp_soc_t *soc_hdl, uint8_t *peer_mac,
3799 uint8_t *vdev_id)
3800 {
3801 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3802 struct dp_peer *peer =
3803 dp_peer_find_hash_find(soc, peer_mac, 0, DP_VDEV_ALL,
3804 DP_MOD_ID_CDP);
3805
3806 if (!peer)
3807 return QDF_STATUS_E_FAILURE;
3808
3809 dp_info("peer %pK vdev %pK vdev id %d",
3810 peer, peer->vdev, peer->vdev->vdev_id);
3811 *vdev_id = peer->vdev->vdev_id;
3812 /* ref_cnt is incremented inside dp_peer_find_hash_find().
3813 * Decrement it here.
3814 */
3815 dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3816
3817 return QDF_STATUS_SUCCESS;
3818 }
3819
3820 struct cdp_vdev *
dp_get_vdev_by_peer_addr(struct cdp_pdev * pdev_handle,struct qdf_mac_addr peer_addr)3821 dp_get_vdev_by_peer_addr(struct cdp_pdev *pdev_handle,
3822 struct qdf_mac_addr peer_addr)
3823 {
3824 struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
3825 struct dp_peer *peer = NULL;
3826 struct cdp_vdev *vdev = NULL;
3827
3828 if (!pdev) {
3829 dp_peer_info("PDEV not found for peer_addr: " QDF_MAC_ADDR_FMT,
3830 QDF_MAC_ADDR_REF(peer_addr.bytes));
3831 return NULL;
3832 }
3833
3834 peer = dp_peer_find_hash_find(pdev->soc, peer_addr.bytes, 0,
3835 DP_VDEV_ALL, DP_MOD_ID_CDP);
3836 if (!peer) {
3837 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
3838 "PDEV not found for peer_addr: "QDF_MAC_ADDR_FMT,
3839 QDF_MAC_ADDR_REF(peer_addr.bytes));
3840 return NULL;
3841 }
3842
3843 vdev = (struct cdp_vdev *)peer->vdev;
3844
3845 dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3846 return vdev;
3847 }
3848
dp_get_vdev_for_peer(void * peer_handle)3849 struct cdp_vdev *dp_get_vdev_for_peer(void *peer_handle)
3850 {
3851 struct dp_peer *peer = peer_handle;
3852
3853 DP_TRACE(DEBUG, "peer %pK vdev %pK", peer, peer->vdev);
3854 return (struct cdp_vdev *)peer->vdev;
3855 }
3856
dp_peer_get_peer_mac_addr(void * peer_handle)3857 uint8_t *dp_peer_get_peer_mac_addr(void *peer_handle)
3858 {
3859 struct dp_peer *peer = peer_handle;
3860 uint8_t *mac;
3861
3862 mac = peer->mac_addr.raw;
3863 dp_info("peer %pK mac 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x",
3864 peer, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
3865 return peer->mac_addr.raw;
3866 }
3867
dp_get_peer_state(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,uint8_t * peer_mac,bool slowpath)3868 int dp_get_peer_state(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3869 uint8_t *peer_mac, bool slowpath)
3870 {
3871 enum ol_txrx_peer_state peer_state;
3872 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3873 struct cdp_peer_info peer_info = { 0 };
3874 struct dp_peer *peer;
3875 struct dp_peer *tgt_peer;
3876
3877 DP_PEER_INFO_PARAMS_INIT(&peer_info, vdev_id, peer_mac,
3878 false, CDP_WILD_PEER_TYPE);
3879
3880 peer = dp_peer_hash_find_wrapper(soc, &peer_info, DP_MOD_ID_CDP);
3881
3882 if (!peer)
3883 return OL_TXRX_PEER_STATE_INVALID;
3884
3885 tgt_peer = dp_get_tgt_peer_from_peer(peer);
3886 peer_state = tgt_peer->state;
3887
3888 if (slowpath)
3889 dp_peer_info("peer %pK tgt_peer: %pK peer MAC "
3890 QDF_MAC_ADDR_FMT " tgt peer MAC "
3891 QDF_MAC_ADDR_FMT " tgt peer state %d",
3892 peer, tgt_peer, QDF_MAC_ADDR_REF(peer->mac_addr.raw),
3893 QDF_MAC_ADDR_REF(tgt_peer->mac_addr.raw),
3894 tgt_peer->state);
3895
3896 dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3897
3898 return peer_state;
3899 }
3900
dp_local_peer_id_pool_init(struct dp_pdev * pdev)3901 void dp_local_peer_id_pool_init(struct dp_pdev *pdev)
3902 {
3903 int i;
3904
3905 /* point the freelist to the first ID */
3906 pdev->local_peer_ids.freelist = 0;
3907
3908 /* link each ID to the next one */
3909 for (i = 0; i < OL_TXRX_NUM_LOCAL_PEER_IDS; i++) {
3910 pdev->local_peer_ids.pool[i] = i + 1;
3911 pdev->local_peer_ids.map[i] = NULL;
3912 }
3913
3914 /* link the last ID to itself, to mark the end of the list */
3915 i = OL_TXRX_NUM_LOCAL_PEER_IDS;
3916 pdev->local_peer_ids.pool[i] = i;
3917
3918 qdf_spinlock_create(&pdev->local_peer_ids.lock);
3919 dp_info("Peer pool init");
3920 }
3921
dp_local_peer_id_alloc(struct dp_pdev * pdev,struct dp_peer * peer)3922 void dp_local_peer_id_alloc(struct dp_pdev *pdev, struct dp_peer *peer)
3923 {
3924 int i;
3925
3926 qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
3927 i = pdev->local_peer_ids.freelist;
3928 if (pdev->local_peer_ids.pool[i] == i) {
3929 /* the list is empty, except for the list-end marker */
3930 peer->local_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
3931 } else {
3932 /* take the head ID and advance the freelist */
3933 peer->local_id = i;
3934 pdev->local_peer_ids.freelist = pdev->local_peer_ids.pool[i];
3935 pdev->local_peer_ids.map[i] = peer;
3936 }
3937 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
3938 dp_info("peer %pK, local id %d", peer, peer->local_id);
3939 }
3940
dp_local_peer_id_free(struct dp_pdev * pdev,struct dp_peer * peer)3941 void dp_local_peer_id_free(struct dp_pdev *pdev, struct dp_peer *peer)
3942 {
3943 int i = peer->local_id;
3944 if ((i == OL_TXRX_INVALID_LOCAL_PEER_ID) ||
3945 (i >= OL_TXRX_NUM_LOCAL_PEER_IDS)) {
3946 return;
3947 }
3948
3949 /* put this ID on the head of the freelist */
3950 qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
3951 pdev->local_peer_ids.pool[i] = pdev->local_peer_ids.freelist;
3952 pdev->local_peer_ids.freelist = i;
3953 pdev->local_peer_ids.map[i] = NULL;
3954 qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
3955 }
3956
dp_find_peer_exist_on_vdev(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,uint8_t * peer_addr)3957 bool dp_find_peer_exist_on_vdev(struct cdp_soc_t *soc_hdl,
3958 uint8_t vdev_id, uint8_t *peer_addr)
3959 {
3960 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3961 struct dp_peer *peer = NULL;
3962
3963 peer = dp_peer_find_hash_find(soc, peer_addr, 0, vdev_id,
3964 DP_MOD_ID_CDP);
3965 if (!peer)
3966 return false;
3967
3968 dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3969
3970 return true;
3971 }
3972
dp_find_peer_exist_on_other_vdev(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,uint8_t * peer_addr,uint16_t max_bssid)3973 bool dp_find_peer_exist_on_other_vdev(struct cdp_soc_t *soc_hdl,
3974 uint8_t vdev_id, uint8_t *peer_addr,
3975 uint16_t max_bssid)
3976 {
3977 int i;
3978 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3979 struct dp_peer *peer = NULL;
3980
3981 for (i = 0; i < max_bssid; i++) {
3982 /* Need to check vdevs other than the vdev_id */
3983 if (vdev_id == i)
3984 continue;
3985 peer = dp_peer_find_hash_find(soc, peer_addr, 0, i,
3986 DP_MOD_ID_CDP);
3987 if (peer) {
3988 dp_err("Duplicate peer "QDF_MAC_ADDR_FMT" already exist on vdev %d",
3989 QDF_MAC_ADDR_REF(peer_addr), i);
3990 dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
3991 return true;
3992 }
3993 }
3994
3995 return false;
3996 }
3997
dp_set_peer_as_tdls_peer(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,uint8_t * peer_mac,bool val)3998 void dp_set_peer_as_tdls_peer(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3999 uint8_t *peer_mac, bool val)
4000 {
4001 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4002 struct dp_peer *peer = NULL;
4003
4004 peer = dp_peer_find_hash_find(soc, peer_mac, 0, vdev_id,
4005 DP_MOD_ID_CDP);
4006 if (!peer) {
4007 dp_err("Failed to find peer for:" QDF_MAC_ADDR_FMT,
4008 QDF_MAC_ADDR_REF(peer_mac));
4009 return;
4010 }
4011
4012 dp_info("Set tdls flag %d for peer:" QDF_MAC_ADDR_FMT,
4013 val, QDF_MAC_ADDR_REF(peer_mac));
4014 peer->is_tdls_peer = val;
4015
4016 dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4017 }
4018 #endif
4019
dp_find_peer_exist(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,uint8_t * peer_addr)4020 bool dp_find_peer_exist(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
4021 uint8_t *peer_addr)
4022 {
4023 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4024 struct dp_peer *peer = NULL;
4025
4026 peer = dp_peer_find_hash_find(soc, peer_addr, 0, DP_VDEV_ALL,
4027 DP_MOD_ID_CDP);
4028 if (peer) {
4029 dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4030 return true;
4031 }
4032
4033 return false;
4034 }
4035
4036 QDF_STATUS
dp_set_michael_key(struct cdp_soc_t * soc,uint8_t vdev_id,uint8_t * peer_mac,bool is_unicast,uint32_t * key)4037 dp_set_michael_key(struct cdp_soc_t *soc,
4038 uint8_t vdev_id,
4039 uint8_t *peer_mac,
4040 bool is_unicast, uint32_t *key)
4041 {
4042 uint8_t sec_index = is_unicast ? 1 : 0;
4043 struct dp_peer *peer =
4044 dp_peer_get_tgt_peer_hash_find((struct dp_soc *)soc,
4045 peer_mac, 0, vdev_id,
4046 DP_MOD_ID_CDP);
4047
4048 if (!peer) {
4049 dp_peer_err("%pK: peer not found ", soc);
4050 return QDF_STATUS_E_FAILURE;
4051 }
4052
4053 qdf_mem_copy(&peer->txrx_peer->security[sec_index].michael_key[0],
4054 key, IEEE80211_WEP_MICLEN);
4055
4056 dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4057
4058 return QDF_STATUS_SUCCESS;
4059 }
4060
4061
dp_vdev_bss_peer_ref_n_get(struct dp_soc * soc,struct dp_vdev * vdev,enum dp_mod_id mod_id)4062 struct dp_peer *dp_vdev_bss_peer_ref_n_get(struct dp_soc *soc,
4063 struct dp_vdev *vdev,
4064 enum dp_mod_id mod_id)
4065 {
4066 struct dp_peer *peer = NULL;
4067
4068 qdf_spin_lock_bh(&vdev->peer_list_lock);
4069 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
4070 if (peer->bss_peer)
4071 break;
4072 }
4073
4074 if (!peer) {
4075 qdf_spin_unlock_bh(&vdev->peer_list_lock);
4076 return NULL;
4077 }
4078
4079 if (dp_peer_get_ref(soc, peer, mod_id) == QDF_STATUS_SUCCESS) {
4080 qdf_spin_unlock_bh(&vdev->peer_list_lock);
4081 return peer;
4082 }
4083
4084 qdf_spin_unlock_bh(&vdev->peer_list_lock);
4085 return peer;
4086 }
4087
dp_sta_vdev_self_peer_ref_n_get(struct dp_soc * soc,struct dp_vdev * vdev,enum dp_mod_id mod_id)4088 struct dp_peer *dp_sta_vdev_self_peer_ref_n_get(struct dp_soc *soc,
4089 struct dp_vdev *vdev,
4090 enum dp_mod_id mod_id)
4091 {
4092 struct dp_peer *peer;
4093
4094 if (vdev->opmode != wlan_op_mode_sta)
4095 return NULL;
4096
4097 qdf_spin_lock_bh(&vdev->peer_list_lock);
4098 TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
4099 if (peer->sta_self_peer)
4100 break;
4101 }
4102
4103 if (!peer) {
4104 qdf_spin_unlock_bh(&vdev->peer_list_lock);
4105 return NULL;
4106 }
4107
4108 if (dp_peer_get_ref(soc, peer, mod_id) == QDF_STATUS_SUCCESS) {
4109 qdf_spin_unlock_bh(&vdev->peer_list_lock);
4110 return peer;
4111 }
4112
4113 qdf_spin_unlock_bh(&vdev->peer_list_lock);
4114 return peer;
4115 }
4116
dp_peer_flush_frags(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,uint8_t * peer_mac)4117 void dp_peer_flush_frags(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
4118 uint8_t *peer_mac)
4119 {
4120 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4121 struct dp_peer *peer = dp_peer_get_tgt_peer_hash_find(soc, peer_mac, 0,
4122 vdev_id,
4123 DP_MOD_ID_CDP);
4124 struct dp_txrx_peer *txrx_peer;
4125 uint8_t tid;
4126 struct dp_rx_tid_defrag *defrag_rx_tid;
4127
4128 if (!peer)
4129 return;
4130
4131 if (!peer->txrx_peer)
4132 goto fail;
4133
4134 dp_info("Flushing fragments for peer " QDF_MAC_ADDR_FMT,
4135 QDF_MAC_ADDR_REF(peer->mac_addr.raw));
4136
4137 txrx_peer = peer->txrx_peer;
4138
4139 for (tid = 0; tid < DP_MAX_TIDS; tid++) {
4140 defrag_rx_tid = &txrx_peer->rx_tid[tid];
4141
4142 qdf_spin_lock_bh(&defrag_rx_tid->defrag_tid_lock);
4143 dp_rx_defrag_waitlist_remove(txrx_peer, tid);
4144 dp_rx_reorder_flush_frag(txrx_peer, tid);
4145 qdf_spin_unlock_bh(&defrag_rx_tid->defrag_tid_lock);
4146 }
4147 fail:
4148 dp_peer_unref_delete(peer, DP_MOD_ID_CDP);
4149 }
4150
dp_peer_find_by_id_valid(struct dp_soc * soc,uint16_t peer_id)4151 bool dp_peer_find_by_id_valid(struct dp_soc *soc, uint16_t peer_id)
4152 {
4153 struct dp_peer *peer = dp_peer_get_ref_by_id(soc, peer_id,
4154 DP_MOD_ID_HTT);
4155
4156 if (peer) {
4157 /*
4158 * Decrement the peer ref which is taken as part of
4159 * dp_peer_get_ref_by_id if PEER_LOCK_REF_PROTECT is enabled
4160 */
4161 dp_peer_unref_delete(peer, DP_MOD_ID_HTT);
4162
4163 return true;
4164 }
4165
4166 return false;
4167 }
4168
4169 qdf_export_symbol(dp_peer_find_by_id_valid);
4170
4171 #ifdef QCA_MULTIPASS_SUPPORT
dp_peer_multipass_list_remove(struct dp_peer * peer)4172 void dp_peer_multipass_list_remove(struct dp_peer *peer)
4173 {
4174 struct dp_vdev *vdev = peer->vdev;
4175 struct dp_txrx_peer *tpeer = NULL;
4176 bool found = 0;
4177
4178 qdf_spin_lock_bh(&vdev->mpass_peer_mutex);
4179 TAILQ_FOREACH(tpeer, &vdev->mpass_peer_list, mpass_peer_list_elem) {
4180 if (tpeer == peer->txrx_peer) {
4181 found = 1;
4182 TAILQ_REMOVE(&vdev->mpass_peer_list, peer->txrx_peer,
4183 mpass_peer_list_elem);
4184 break;
4185 }
4186 }
4187
4188 qdf_spin_unlock_bh(&vdev->mpass_peer_mutex);
4189
4190 if (found)
4191 dp_peer_unref_delete(peer, DP_MOD_ID_TX_MULTIPASS);
4192 }
4193
4194 /**
4195 * dp_peer_multipass_list_add() - add to new multipass list
4196 * @soc: soc handle
4197 * @peer_mac: mac address
4198 * @vdev_id: vdev id for peer
4199 * @vlan_id: vlan_id
4200 *
4201 * return: void
4202 */
dp_peer_multipass_list_add(struct dp_soc * soc,uint8_t * peer_mac,uint8_t vdev_id,uint16_t vlan_id)4203 static void dp_peer_multipass_list_add(struct dp_soc *soc, uint8_t *peer_mac,
4204 uint8_t vdev_id, uint16_t vlan_id)
4205 {
4206 struct dp_peer *peer =
4207 dp_peer_get_tgt_peer_hash_find(soc, peer_mac, 0,
4208 vdev_id,
4209 DP_MOD_ID_TX_MULTIPASS);
4210
4211 if (qdf_unlikely(!peer)) {
4212 qdf_err("NULL peer");
4213 return;
4214 }
4215
4216 if (qdf_unlikely(!peer->txrx_peer))
4217 goto fail;
4218
4219 /* If peer already exists in vdev multipass list, do not add it.
4220 * This may happen if key install comes twice or re-key
4221 * happens for a peer.
4222 */
4223 if (peer->txrx_peer->vlan_id) {
4224 dp_debug("peer already added to vdev multipass list"
4225 "MAC: "QDF_MAC_ADDR_FMT" vlan: %d ",
4226 QDF_MAC_ADDR_REF(peer->mac_addr.raw),
4227 peer->txrx_peer->vlan_id);
4228 goto fail;
4229 }
4230
4231 /*
4232 * Ref_cnt is incremented inside dp_peer_find_hash_find().
4233 * Decrement it when element is deleted from the list.
4234 */
4235 peer->txrx_peer->vlan_id = vlan_id;
4236 qdf_spin_lock_bh(&peer->txrx_peer->vdev->mpass_peer_mutex);
4237 TAILQ_INSERT_HEAD(&peer->txrx_peer->vdev->mpass_peer_list,
4238 peer->txrx_peer,
4239 mpass_peer_list_elem);
4240 qdf_spin_unlock_bh(&peer->txrx_peer->vdev->mpass_peer_mutex);
4241 return;
4242
4243 fail:
4244 dp_peer_unref_delete(peer, DP_MOD_ID_TX_MULTIPASS);
4245 }
4246
dp_peer_set_vlan_id(struct cdp_soc_t * cdp_soc,uint8_t vdev_id,uint8_t * peer_mac,uint16_t vlan_id)4247 void dp_peer_set_vlan_id(struct cdp_soc_t *cdp_soc,
4248 uint8_t vdev_id, uint8_t *peer_mac,
4249 uint16_t vlan_id)
4250 {
4251 struct dp_soc *soc = (struct dp_soc *)cdp_soc;
4252 struct dp_vdev *vdev =
4253 dp_vdev_get_ref_by_id((struct dp_soc *)soc, vdev_id,
4254 DP_MOD_ID_TX_MULTIPASS);
4255
4256 dp_info("vdev_id %d, vdev %pK, multipass_en %d, peer_mac " QDF_MAC_ADDR_FMT " vlan %d",
4257 vdev_id, vdev, vdev ? vdev->multipass_en : 0,
4258 QDF_MAC_ADDR_REF(peer_mac), vlan_id);
4259 if (vdev && vdev->multipass_en) {
4260 dp_peer_multipass_list_add(soc, peer_mac, vdev_id, vlan_id);
4261 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_MULTIPASS);
4262 }
4263 }
4264 #endif /* QCA_MULTIPASS_SUPPORT */
4265