xref: /wlan-driver/qcacld-3.0/core/dp/txrx/ol_txrx_peer_find.c (revision 5113495b16420b49004c444715d2daae2066e7dc)
1 /*
2  * Copyright (c) 2011-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /*=== includes ===*/
21 /* header files for OS primitives */
22 #include <osdep.h>              /* uint32_t, etc. */
23 #include <qdf_mem.h>         /* qdf_mem_malloc, etc. */
24 #include <qdf_types.h>          /* qdf_device_t, qdf_print */
25 /* header files for utilities */
26 #include "queue.h"         /* TAILQ */
27 
28 /* header files for configuration API */
29 #include <ol_cfg.h>             /* ol_cfg_max_peer_id */
30 
31 /* header files for our internal definitions */
32 #include <ol_txrx_api.h>        /* ol_txrx_pdev_t, etc. */
33 #include <ol_txrx_dbg.h>        /* TXRX_DEBUG_LEVEL */
34 #include <ol_txrx_internal.h>   /* ol_txrx_pdev_t, etc. */
35 #include <ol_txrx.h>            /* ol_txrx_peer_release_ref */
36 #include <ol_txrx_peer_find.h>  /* ol_txrx_peer_find_attach, etc. */
37 #include <ol_tx_queue.h>
38 #include "wlan_roam_debug.h"
39 
40 /*=== misc. / utility function definitions ==================================*/
41 
ol_txrx_log2_ceil(unsigned int value)42 static int ol_txrx_log2_ceil(unsigned int value)
43 {
44 	/* need to switch to unsigned math so that negative values
45 	 * will right-shift towards 0 instead of -1
46 	 */
47 	unsigned int tmp = value;
48 	int log2 = -1;
49 
50 	if (value == 0) {
51 		TXRX_ASSERT2(0);
52 		return 0;
53 	}
54 
55 	while (tmp) {
56 		log2++;
57 		tmp >>= 1;
58 	}
59 	if (1U << log2 != value)
60 		log2++;
61 
62 	return log2;
63 }
64 
ol_txrx_peer_get_ref(struct ol_txrx_peer_t * peer,enum peer_debug_id_type dbg_id)65 int ol_txrx_peer_get_ref(struct ol_txrx_peer_t *peer,
66 			  enum peer_debug_id_type dbg_id)
67 {
68 	int refs_dbg_id;
69 
70 	if (!peer) {
71 		ol_txrx_err("peer is null for ID %d", dbg_id);
72 		return -EINVAL;
73 	}
74 
75 	if (dbg_id >= PEER_DEBUG_ID_MAX || dbg_id < 0) {
76 		ol_txrx_err("incorrect debug_id %d ", dbg_id);
77 		return -EINVAL;
78 	}
79 
80 	qdf_atomic_inc(&peer->ref_cnt);
81 	qdf_atomic_inc(&peer->access_list[dbg_id]);
82 	refs_dbg_id = qdf_atomic_read(&peer->access_list[dbg_id]);
83 
84 	return refs_dbg_id;
85 }
86 
87 /*=== function definitions for peer MAC addr --> peer object hash table =====*/
88 
89 /*
90  * TXRX_PEER_HASH_LOAD_FACTOR:
91  * Multiply by 2 and divide by 2^0 (shift by 0), then round up to a
92  * power of two.
93  * This provides at least twice as many bins in the peer hash table
94  * as there will be entries.
95  * Having substantially more bins than spaces minimizes the probability of
96  * having to compare MAC addresses.
97  * Because the MAC address comparison is fairly efficient, it is okay if the
98  * hash table is sparsely loaded, but it's generally better to use extra mem
99  * to keep the table sparse, to keep the lookups as fast as possible.
100  * An optimization would be to apply a more conservative loading factor for
101  * high latency, where the lookup happens during the tx classification of
102  * every tx frame, than for low-latency, where the lookup only happens
103  * during association, when the PEER_MAP message is received.
104  */
105 #define TXRX_PEER_HASH_LOAD_MULT  2
106 #define TXRX_PEER_HASH_LOAD_SHIFT 0
107 
ol_txrx_peer_find_hash_attach(struct ol_txrx_pdev_t * pdev)108 static int ol_txrx_peer_find_hash_attach(struct ol_txrx_pdev_t *pdev)
109 {
110 	int i, hash_elems, log2;
111 
112 	/* allocate the peer MAC address -> peer object hash table */
113 	hash_elems = ol_cfg_max_peer_id(pdev->ctrl_pdev) + 1;
114 	hash_elems *= TXRX_PEER_HASH_LOAD_MULT;
115 	hash_elems >>= TXRX_PEER_HASH_LOAD_SHIFT;
116 	log2 = ol_txrx_log2_ceil(hash_elems);
117 	hash_elems = 1 << log2;
118 
119 	pdev->peer_hash.mask = hash_elems - 1;
120 	pdev->peer_hash.idx_bits = log2;
121 	/* allocate an array of TAILQ peer object lists */
122 	pdev->peer_hash.bins =
123 		qdf_mem_malloc(hash_elems *
124 			       sizeof(TAILQ_HEAD(anonymous_tail_q,
125 						 ol_txrx_peer_t)));
126 	if (!pdev->peer_hash.bins)
127 		return 1;       /* failure */
128 
129 	for (i = 0; i < hash_elems; i++)
130 		TAILQ_INIT(&pdev->peer_hash.bins[i]);
131 
132 	return 0;               /* success */
133 }
134 
ol_txrx_peer_find_hash_detach(struct ol_txrx_pdev_t * pdev)135 static void ol_txrx_peer_find_hash_detach(struct ol_txrx_pdev_t *pdev)
136 {
137 	qdf_mem_free(pdev->peer_hash.bins);
138 }
139 
140 static inline unsigned int
ol_txrx_peer_find_hash_index(struct ol_txrx_pdev_t * pdev,union ol_txrx_align_mac_addr_t * mac_addr)141 ol_txrx_peer_find_hash_index(struct ol_txrx_pdev_t *pdev,
142 			     union ol_txrx_align_mac_addr_t *mac_addr)
143 {
144 	unsigned int index;
145 
146 	index =
147 		mac_addr->align2.bytes_ab ^
148 		mac_addr->align2.bytes_cd ^ mac_addr->align2.bytes_ef;
149 	index ^= index >> pdev->peer_hash.idx_bits;
150 	index &= pdev->peer_hash.mask;
151 	return index;
152 }
153 
154 void
ol_txrx_peer_find_hash_add(struct ol_txrx_pdev_t * pdev,struct ol_txrx_peer_t * peer)155 ol_txrx_peer_find_hash_add(struct ol_txrx_pdev_t *pdev,
156 			   struct ol_txrx_peer_t *peer)
157 {
158 	unsigned int index;
159 
160 	index = ol_txrx_peer_find_hash_index(pdev, &peer->mac_addr);
161 	qdf_spin_lock_bh(&pdev->peer_ref_mutex);
162 	/*
163 	 * It is important to add the new peer at the tail of the peer list
164 	 * with the bin index.  Together with having the hash_find function
165 	 * search from head to tail, this ensures that if two entries with
166 	 * the same MAC address are stored, the one added first will be
167 	 * found first.
168 	 */
169 	TAILQ_INSERT_TAIL(&pdev->peer_hash.bins[index], peer, hash_list_elem);
170 	qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
171 }
172 
ol_txrx_peer_vdev_find_hash(struct ol_txrx_pdev_t * pdev,struct ol_txrx_vdev_t * vdev,uint8_t * peer_mac_addr,int mac_addr_is_aligned,uint8_t check_valid)173 struct ol_txrx_peer_t *ol_txrx_peer_vdev_find_hash(struct ol_txrx_pdev_t *pdev,
174 						   struct ol_txrx_vdev_t *vdev,
175 						   uint8_t *peer_mac_addr,
176 						   int mac_addr_is_aligned,
177 						   uint8_t check_valid)
178 {
179 	union ol_txrx_align_mac_addr_t local_mac_addr_aligned, *mac_addr;
180 	unsigned int index;
181 	struct ol_txrx_peer_t *peer;
182 
183 	if (mac_addr_is_aligned) {
184 		mac_addr = (union ol_txrx_align_mac_addr_t *)peer_mac_addr;
185 	} else {
186 		qdf_mem_copy(&local_mac_addr_aligned.raw[0],
187 			     peer_mac_addr, QDF_MAC_ADDR_SIZE);
188 		mac_addr = &local_mac_addr_aligned;
189 	}
190 	index = ol_txrx_peer_find_hash_index(pdev, mac_addr);
191 	qdf_spin_lock_bh(&pdev->peer_ref_mutex);
192 	TAILQ_FOREACH(peer, &pdev->peer_hash.bins[index], hash_list_elem) {
193 		if (ol_txrx_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) ==
194 		    0 && (check_valid == 0 || peer->valid)
195 		    && peer->vdev == vdev) {
196 			/* found it */
197 			ol_txrx_peer_get_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
198 			qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
199 			return peer;
200 		}
201 	}
202 	qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
203 	return NULL;            /* failure */
204 }
205 
206 struct ol_txrx_peer_t *
ol_txrx_peer_find_hash_find_get_ref(struct ol_txrx_pdev_t * pdev,uint8_t * peer_mac_addr,int mac_addr_is_aligned,u8 check_valid,enum peer_debug_id_type dbg_id)207 	ol_txrx_peer_find_hash_find_get_ref
208 				(struct ol_txrx_pdev_t *pdev,
209 				uint8_t *peer_mac_addr,
210 				int mac_addr_is_aligned,
211 				u8 check_valid,
212 				enum peer_debug_id_type dbg_id)
213 {
214 	union ol_txrx_align_mac_addr_t local_mac_addr_aligned, *mac_addr;
215 	unsigned int index;
216 	struct ol_txrx_peer_t *peer;
217 
218 	if (mac_addr_is_aligned) {
219 		mac_addr = (union ol_txrx_align_mac_addr_t *)peer_mac_addr;
220 	} else {
221 		qdf_mem_copy(&local_mac_addr_aligned.raw[0],
222 			     peer_mac_addr, QDF_MAC_ADDR_SIZE);
223 		mac_addr = &local_mac_addr_aligned;
224 	}
225 	index = ol_txrx_peer_find_hash_index(pdev, mac_addr);
226 	qdf_spin_lock_bh(&pdev->peer_ref_mutex);
227 	TAILQ_FOREACH(peer, &pdev->peer_hash.bins[index], hash_list_elem) {
228 		if (ol_txrx_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) ==
229 		    0 && (check_valid == 0 || peer->valid)) {
230 			/* found it */
231 			ol_txrx_peer_get_ref(peer, dbg_id);
232 			qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
233 			return peer;
234 		}
235 	}
236 	qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
237 	return NULL;            /* failure */
238 }
239 
240 void
ol_txrx_peer_find_hash_remove(struct ol_txrx_pdev_t * pdev,struct ol_txrx_peer_t * peer)241 ol_txrx_peer_find_hash_remove(struct ol_txrx_pdev_t *pdev,
242 			      struct ol_txrx_peer_t *peer)
243 {
244 	unsigned int index;
245 
246 	index = ol_txrx_peer_find_hash_index(pdev, &peer->mac_addr);
247 	/*
248 	 * DO NOT take the peer_ref_mutex lock here - it needs to be taken
249 	 * by the caller.
250 	 * The caller needs to hold the lock from the time the peer object's
251 	 * reference count is decremented and tested up through the time the
252 	 * reference to the peer object is removed from the hash table, by
253 	 * this function.
254 	 * Holding the lock only while removing the peer object reference
255 	 * from the hash table keeps the hash table consistent, but does not
256 	 * protect against a new HL tx context starting to use the peer object
257 	 * if it looks up the peer object from its MAC address just after the
258 	 * peer ref count is decremented to zero, but just before the peer
259 	 * object reference is removed from the hash table.
260 	 */
261 	/* qdf_spin_lock_bh(&pdev->peer_ref_mutex); */
262 	TAILQ_REMOVE(&pdev->peer_hash.bins[index], peer, hash_list_elem);
263 	/* qdf_spin_unlock_bh(&pdev->peer_ref_mutex); */
264 }
265 
ol_txrx_peer_find_hash_erase(struct ol_txrx_pdev_t * pdev)266 void ol_txrx_peer_find_hash_erase(struct ol_txrx_pdev_t *pdev)
267 {
268 	unsigned int i;
269 	/*
270 	 * Not really necessary to take peer_ref_mutex lock - by this point,
271 	 * it's known that the pdev is no longer in use.
272 	 */
273 
274 	for (i = 0; i <= pdev->peer_hash.mask; i++) {
275 		if (!TAILQ_EMPTY(&pdev->peer_hash.bins[i])) {
276 			struct ol_txrx_peer_t *peer, *peer_next;
277 
278 			/*
279 			 * TAILQ_FOREACH_SAFE must be used here to avoid any
280 			 * memory access violation after peer is freed
281 			 */
282 			TAILQ_FOREACH_SAFE(peer, &pdev->peer_hash.bins[i],
283 					   hash_list_elem, peer_next) {
284 				/*
285 				 * Don't remove the peer from the hash table -
286 				 * that would modify the list we are currently
287 				 * traversing,
288 				 * and it's not necessary anyway.
289 				 */
290 				/*
291 				 * Artificially adjust the peer's ref count to
292 				 * 1, so it will get deleted by
293 				 * ol_txrx_peer_release_ref.
294 				 */
295 				qdf_atomic_init(&peer->ref_cnt); /* set to 0 */
296 				ol_txrx_peer_get_ref(peer,
297 						     PEER_DEBUG_ID_OL_HASH_ERS);
298 				ol_txrx_peer_release_ref(peer,
299 						     PEER_DEBUG_ID_OL_HASH_ERS);
300 			}
301 		}
302 	}
303 }
304 
ol_txrx_peer_free_inactive_list(struct ol_txrx_pdev_t * pdev)305 void ol_txrx_peer_free_inactive_list(struct ol_txrx_pdev_t *pdev)
306 {
307 	struct ol_txrx_peer_t *peer = NULL, *tmp;
308 
309 	qdf_spin_lock_bh(&pdev->peer_map_unmap_lock);
310 	if (!TAILQ_EMPTY(&pdev->inactive_peer_list)) {
311 		TAILQ_FOREACH_SAFE(peer, &pdev->inactive_peer_list,
312 				   inactive_peer_list_elem, tmp) {
313 			qdf_atomic_init(&peer->del_ref_cnt); /* set to 0 */
314 			qdf_mem_free(peer);
315 		}
316 	}
317 	qdf_spin_unlock_bh(&pdev->peer_map_unmap_lock);
318 }
319 
320 /*=== function definitions for peer id --> peer object map ==================*/
321 
ol_txrx_peer_find_map_attach(struct ol_txrx_pdev_t * pdev)322 static int ol_txrx_peer_find_map_attach(struct ol_txrx_pdev_t *pdev)
323 {
324 	int max_peers, peer_map_size;
325 
326 	/* allocate the peer ID -> peer object map */
327 	max_peers = ol_cfg_max_peer_id(pdev->ctrl_pdev) + 1;
328 	peer_map_size = max_peers * sizeof(pdev->peer_id_to_obj_map[0]);
329 	pdev->peer_id_to_obj_map = qdf_mem_malloc(peer_map_size);
330 	if (!pdev->peer_id_to_obj_map)
331 		return 1;       /* failure */
332 
333 	return 0;               /* success */
334 }
335 
ol_txrx_peer_find_map_detach(struct ol_txrx_pdev_t * pdev)336 static void ol_txrx_peer_find_map_detach(struct ol_txrx_pdev_t *pdev)
337 {
338 	qdf_mem_free(pdev->peer_id_to_obj_map);
339 }
340 
341 /**
342  * ol_txrx_peer_clear_map_peer() - Remove map entries that refer to a peer.
343  * @pdev: pdev handle
344  * @peer: peer for removing obj map entries
345  *
346  * Run through the entire peer_id_to_obj map and nullify all the entries
347  * that map to a particular peer. Called before deleting the peer object.
348  *
349  * Return: None
350  */
ol_txrx_peer_clear_map_peer(ol_txrx_pdev_handle pdev,struct ol_txrx_peer_t * peer)351 void ol_txrx_peer_clear_map_peer(ol_txrx_pdev_handle pdev,
352 				 struct ol_txrx_peer_t *peer)
353 {
354 	int max_peers;
355 	int i;
356 
357 	max_peers = ol_cfg_max_peer_id(pdev->ctrl_pdev) + 1;
358 
359 	qdf_spin_lock_bh(&pdev->peer_map_unmap_lock);
360 	for (i = 0; i < max_peers; i++) {
361 		if (pdev->peer_id_to_obj_map[i].peer == peer) {
362 			/* Found a map entry for this peer, clear it. */
363 			pdev->peer_id_to_obj_map[i].peer = NULL;
364 		}
365 	}
366 	qdf_spin_unlock_bh(&pdev->peer_map_unmap_lock);
367 }
368 
369 /*
370  * ol_txrx_peer_find_add_id() - Add peer_id entry to peer
371  *
372  * @pdev: Handle to pdev object
373  * @peer_mac_addr: MAC address of peer provided by firmware
374  * @peer_id: peer_id provided by firmware
375  *
376  * Search for peer object for the MAC address, add the peer_id to
377  * its array of peer_id's and update the peer_id_to_obj map entry
378  * for that peer_id. Increment corresponding reference counts.
379  *
380  * Riva/Pronto has one peer id for each peer.
381  * Peregrine/Rome has two peer id for each peer.
382  * iHelium has upto three peer id for each peer.
383  *
384  * Return: None
385  */
ol_txrx_peer_find_add_id(struct ol_txrx_pdev_t * pdev,uint8_t * peer_mac_addr,uint16_t peer_id)386 static inline void ol_txrx_peer_find_add_id(struct ol_txrx_pdev_t *pdev,
387 				uint8_t *peer_mac_addr, uint16_t peer_id)
388 {
389 	struct ol_txrx_peer_t *peer;
390 	int status;
391 	int i;
392 	uint32_t peer_id_ref_cnt;
393 	uint32_t peer_ref_cnt;
394 	u8 check_valid = 0;
395 
396 	if (pdev->enable_peer_unmap_conf_support)
397 		check_valid = 1;
398 
399 	/* check if there's already a peer object with this MAC address */
400 	peer =
401 		ol_txrx_peer_find_hash_find_get_ref(pdev, peer_mac_addr,
402 						    1 /* is aligned */,
403 						    check_valid,
404 						    PEER_DEBUG_ID_OL_PEER_MAP);
405 
406 	if (!peer || peer_id == HTT_INVALID_PEER) {
407 		/*
408 		 * Currently peer IDs are assigned for vdevs as well as peers.
409 		 * If the peer ID is for a vdev, then we will fail to find a
410 		 * peer with a matching MAC address.
411 		 */
412 		ol_txrx_err("peer not found or peer ID is %d invalid",
413 			    peer_id);
414 		wlan_roam_debug_log(DEBUG_INVALID_VDEV_ID,
415 				    DEBUG_PEER_MAP_EVENT,
416 				    peer_id, peer_mac_addr,
417 				    peer, 0, 0);
418 
419 		return;
420 	}
421 
422 	qdf_spin_lock(&pdev->peer_map_unmap_lock);
423 
424 	/* peer's ref count was already incremented by
425 	 * peer_find_hash_find
426 	 */
427 	if (!pdev->peer_id_to_obj_map[peer_id].peer) {
428 		pdev->peer_id_to_obj_map[peer_id].peer = peer;
429 		qdf_atomic_init
430 		  (&pdev->peer_id_to_obj_map[peer_id].peer_id_ref_cnt);
431 	}
432 	qdf_atomic_inc
433 		(&pdev->peer_id_to_obj_map[peer_id].peer_id_ref_cnt);
434 
435 	status = 1;
436 
437 	/* find a place in peer_id array and insert peer_id */
438 	for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
439 		if (peer->peer_ids[i] == HTT_INVALID_PEER) {
440 			peer->peer_ids[i] = peer_id;
441 			status = 0;
442 			break;
443 		}
444 	}
445 
446 	if (qdf_atomic_read(&peer->fw_create_pending) == 1) {
447 		qdf_atomic_set(&peer->fw_create_pending, 0);
448 	}
449 
450 	qdf_spin_unlock(&pdev->peer_map_unmap_lock);
451 
452 	peer_id_ref_cnt = qdf_atomic_read(&pdev->
453 				peer_id_to_obj_map[peer_id].peer_id_ref_cnt);
454 	peer_ref_cnt = qdf_atomic_read(&peer->ref_cnt);
455 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
456 	   "%s: peer %pK ID %d peer_id[%d] peer_id_ref_cnt %d peer->ref_cnt %d",
457 	   __func__, peer, peer_id, i, peer_id_ref_cnt, peer_ref_cnt);
458 	wlan_roam_debug_log(DEBUG_INVALID_VDEV_ID,
459 			    DEBUG_PEER_MAP_EVENT,
460 			    peer_id, &peer->mac_addr.raw, peer,
461 			    peer_id_ref_cnt,
462 			    peer_ref_cnt);
463 
464 
465 	if (status) {
466 		/* TBDXXX: assert for now */
467 		qdf_assert(0);
468 	}
469 }
470 
471 /*=== allocation / deallocation function definitions ========================*/
472 
ol_txrx_peer_find_attach(struct ol_txrx_pdev_t * pdev)473 int ol_txrx_peer_find_attach(struct ol_txrx_pdev_t *pdev)
474 {
475 	if (ol_txrx_peer_find_map_attach(pdev))
476 		return 1;
477 	if (ol_txrx_peer_find_hash_attach(pdev)) {
478 		ol_txrx_peer_find_map_detach(pdev);
479 		return 1;
480 	}
481 	return 0;               /* success */
482 }
483 
ol_txrx_peer_find_detach(struct ol_txrx_pdev_t * pdev)484 void ol_txrx_peer_find_detach(struct ol_txrx_pdev_t *pdev)
485 {
486 	ol_txrx_peer_find_map_detach(pdev);
487 	ol_txrx_peer_find_hash_detach(pdev);
488 }
489 
490 /**
491  * ol_txrx_peer_unmap_conf_handler() - send peer unmap conf cmd to FW
492  * @pdev: pdev_handle
493  * @peer_id: peer_id
494  *
495  * Return: None
496  */
497 static inline void
ol_txrx_peer_unmap_conf_handler(ol_txrx_pdev_handle pdev,uint16_t peer_id)498 ol_txrx_peer_unmap_conf_handler(ol_txrx_pdev_handle pdev,
499 				uint16_t peer_id)
500 {
501 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
502 
503 	if (peer_id == HTT_INVALID_PEER) {
504 		ol_txrx_err(
505 		   "invalid peer ID %d\n", peer_id);
506 		return;
507 	}
508 
509 	qdf_atomic_inc(&pdev->peer_id_to_obj_map[peer_id].peer_id_unmap_cnt);
510 
511 	if (qdf_atomic_read(
512 		&pdev->peer_id_to_obj_map[peer_id].peer_id_unmap_cnt) ==
513 		pdev->peer_id_unmap_ref_cnt) {
514 		ol_txrx_dbg("send unmap conf cmd: peer_id[%d] unmap_cnt[%d]",
515 			    peer_id, pdev->peer_id_unmap_ref_cnt);
516 		status = pdev->peer_unmap_sync_cb(
517 				DEBUG_INVALID_VDEV_ID,
518 				1, &peer_id);
519 
520 		if (status == QDF_STATUS_SUCCESS ||
521 		    status == QDF_STATUS_E_BUSY) {
522 			qdf_atomic_init(
523 			&pdev->peer_id_to_obj_map[peer_id].peer_id_unmap_cnt);
524 		} else {
525 			qdf_atomic_set(
526 			&pdev->peer_id_to_obj_map[peer_id].peer_id_unmap_cnt,
527 			OL_TXRX_INVALID_PEER_UNMAP_COUNT);
528 			ol_txrx_err("unable to send unmap conf cmd [%d]",
529 				    peer_id);
530 		}
531 
532 	}
533 }
534 
535 /*=== function definitions for message handling =============================*/
536 
537 #if defined(CONFIG_HL_SUPPORT)
538 
539 void
ol_rx_peer_map_handler(ol_txrx_pdev_handle pdev,uint16_t peer_id,uint8_t vdev_id,uint8_t * peer_mac_addr,int tx_ready)540 ol_rx_peer_map_handler(ol_txrx_pdev_handle pdev,
541 		       uint16_t peer_id,
542 		       uint8_t vdev_id, uint8_t *peer_mac_addr, int tx_ready)
543 {
544 	ol_txrx_peer_find_add_id(pdev, peer_mac_addr, peer_id);
545 	if (!tx_ready) {
546 		struct ol_txrx_peer_t *peer;
547 
548 		peer = ol_txrx_peer_find_by_id(pdev, peer_id);
549 		if (!peer) {
550 			/* ol_txrx_peer_detach called before peer map arrived*/
551 			return;
552 		} else {
553 			if (tx_ready) {
554 				int i;
555 
556 				/* unpause all tx queues now, since the
557 				 * target is ready
558 				 */
559 				for (i = 0; i < QDF_ARRAY_SIZE(peer->txqs);
560 									i++)
561 					ol_txrx_peer_tid_unpause(peer, i);
562 
563 			} else {
564 				/* walk through paused mgmt queue,
565 				 * update tx descriptors
566 				 */
567 				ol_tx_queue_decs_reinit(peer, peer_id);
568 
569 				/* keep non-mgmt tx queues paused until assoc
570 				 * is finished tx queues were paused in
571 				 * ol_txrx_peer_attach
572 				 */
573 				/* unpause tx mgmt queue */
574 				ol_txrx_peer_tid_unpause(peer,
575 							 HTT_TX_EXT_TID_MGMT);
576 			}
577 		}
578 	}
579 }
580 
ol_txrx_peer_tx_ready_handler(ol_txrx_pdev_handle pdev,uint16_t peer_id)581 void ol_txrx_peer_tx_ready_handler(ol_txrx_pdev_handle pdev, uint16_t peer_id)
582 {
583 	struct ol_txrx_peer_t *peer;
584 
585 	peer = ol_txrx_peer_find_by_id(pdev, peer_id);
586 	if (peer) {
587 		int i;
588 		/*
589 		 * Unpause all data tx queues now that the target is ready.
590 		 * The mgmt tx queue was not paused, so skip it.
591 		 */
592 		for (i = 0; i < QDF_ARRAY_SIZE(peer->txqs); i++) {
593 			if (i == HTT_TX_EXT_TID_MGMT)
594 				continue; /* mgmt tx queue was not paused */
595 
596 			ol_txrx_peer_tid_unpause(peer, i);
597 		}
598 	}
599 }
600 #else
601 
602 void
ol_rx_peer_map_handler(ol_txrx_pdev_handle pdev,uint16_t peer_id,uint8_t vdev_id,uint8_t * peer_mac_addr,int tx_ready)603 ol_rx_peer_map_handler(ol_txrx_pdev_handle pdev,
604 		       uint16_t peer_id,
605 		       uint8_t vdev_id,
606 		       uint8_t *peer_mac_addr,
607 		       int tx_ready)
608 {
609 	ol_txrx_peer_find_add_id(pdev, peer_mac_addr, peer_id);
610 }
611 
ol_txrx_peer_tx_ready_handler(ol_txrx_pdev_handle pdev,uint16_t peer_id)612 void ol_txrx_peer_tx_ready_handler(ol_txrx_pdev_handle pdev, uint16_t peer_id)
613 {
614 }
615 
616 #endif
617 
618 /*
619  * ol_rx_peer_unmap_handler() - Handle peer unmap event from firmware
620  *
621  * @pdev: Handle to pdev pbject
622  * @peer_id: peer_id unmapped by firmware
623  *
624  * Decrement reference count for the peer_id in peer_id_to_obj_map,
625  * decrement reference count in corresponding peer object and clear the entry
626  * in peer's peer_ids array.
627  * In case of unmap events for a peer that is already deleted, just decrement
628  * del_peer_id_ref_cnt.
629  *
630  * Return: None
631  */
ol_rx_peer_unmap_handler(ol_txrx_pdev_handle pdev,uint16_t peer_id)632 void ol_rx_peer_unmap_handler(ol_txrx_pdev_handle pdev, uint16_t peer_id)
633 {
634 	struct ol_txrx_peer_t *peer;
635 	int i = 0;
636 	int32_t ref_cnt;
637 	int del_ref_cnt;
638 
639 	if (peer_id == HTT_INVALID_PEER) {
640 		ol_txrx_err(
641 		   "invalid peer ID %d\n", peer_id);
642 		wlan_roam_debug_log(DEBUG_INVALID_VDEV_ID,
643 				    DEBUG_PEER_UNMAP_EVENT,
644 				    peer_id, NULL, NULL, 0, 0x100);
645 		return;
646 	}
647 
648 	qdf_spin_lock_bh(&pdev->peer_map_unmap_lock);
649 
650 	/* send peer unmap conf cmd to fw for unmapped peer_ids */
651 	if (pdev->enable_peer_unmap_conf_support &&
652 	    pdev->peer_unmap_sync_cb)
653 		ol_txrx_peer_unmap_conf_handler(pdev, peer_id);
654 
655 	if (qdf_atomic_read(
656 		&pdev->peer_id_to_obj_map[peer_id].del_peer_id_ref_cnt)) {
657 		/* This peer_id belongs to a peer already deleted */
658 		peer = pdev->peer_id_to_obj_map[peer_id].del_peer;
659 		if (qdf_atomic_dec_and_test
660 		    (&pdev->peer_id_to_obj_map[peer_id].del_peer_id_ref_cnt)) {
661 			pdev->peer_id_to_obj_map[peer_id].del_peer = NULL;
662 		}
663 
664 		del_ref_cnt = qdf_atomic_read(&peer->del_ref_cnt);
665 		if (qdf_atomic_dec_and_test(&peer->del_ref_cnt)) {
666 			TAILQ_REMOVE(&pdev->inactive_peer_list, peer,
667 				     inactive_peer_list_elem);
668 			qdf_mem_free(peer);
669 		}
670 		del_ref_cnt--;
671 
672 		ref_cnt = qdf_atomic_read(&pdev->peer_id_to_obj_map[peer_id].
673 							del_peer_id_ref_cnt);
674 		qdf_spin_unlock_bh(&pdev->peer_map_unmap_lock);
675 		wlan_roam_debug_log(DEBUG_INVALID_VDEV_ID,
676 				    DEBUG_PEER_UNMAP_EVENT,
677 				    peer_id, NULL, NULL, ref_cnt, 0x101);
678 		ol_txrx_dbg("peer already deleted, peer_id %d del_ref_cnt:%d del_peer_id_ref_cnt %d",
679 			    peer_id, del_ref_cnt, ref_cnt);
680 		return;
681 	}
682 	peer = pdev->peer_id_to_obj_map[peer_id].peer;
683 
684 	if (!peer) {
685 		/*
686 		 * Currently peer IDs are assigned for vdevs as well as peers.
687 		 * If the peer ID is for a vdev, then the peer pointer stored
688 		 * in peer_id_to_obj_map will be NULL.
689 		 */
690 		qdf_spin_unlock_bh(&pdev->peer_map_unmap_lock);
691 		ol_txrx_info("peer not found for peer_id %d", peer_id);
692 		wlan_roam_debug_log(DEBUG_INVALID_VDEV_ID,
693 				    DEBUG_PEER_UNMAP_EVENT,
694 				    peer_id, NULL, NULL, 0, 0x102);
695 		return;
696 	}
697 
698 	if (qdf_atomic_dec_and_test
699 		(&pdev->peer_id_to_obj_map[peer_id].peer_id_ref_cnt)) {
700 		pdev->peer_id_to_obj_map[peer_id].peer = NULL;
701 		for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
702 			if (peer->peer_ids[i] == peer_id) {
703 				peer->peer_ids[i] = HTT_INVALID_PEER;
704 				break;
705 			}
706 		}
707 	}
708 
709 	ref_cnt = qdf_atomic_read
710 		(&pdev->peer_id_to_obj_map[peer_id].peer_id_ref_cnt);
711 
712 	qdf_spin_unlock_bh(&pdev->peer_map_unmap_lock);
713 
714 	wlan_roam_debug_log(DEBUG_INVALID_VDEV_ID,
715 			    DEBUG_PEER_UNMAP_EVENT,
716 			    peer_id, &peer->mac_addr.raw, peer, ref_cnt,
717 			    qdf_atomic_read(&peer->ref_cnt));
718 
719 	/*
720 	 * Remove a reference to the peer.
721 	 * If there are no more references, delete the peer object.
722 	 */
723 	ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_PEER_MAP);
724 
725 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
726 		  "%s: peer_id %d peer %pK peer_id_ref_cnt %d",
727 		  __func__, peer_id, peer, ref_cnt);
728 }
729 
730 /**
731  * ol_txrx_peer_remove_obj_map_entries() - Remove matching pdev peer map entries
732  * @pdev: pdev handle
733  * @peer: peer for removing obj map entries
734  *
735  * Saves peer_id_ref_cnt to a different field and removes the link
736  * to peer object. It also decrements the peer reference count by
737  * the number of references removed.
738  *
739  * Return: None
740  */
ol_txrx_peer_remove_obj_map_entries(ol_txrx_pdev_handle pdev,struct ol_txrx_peer_t * peer)741 void ol_txrx_peer_remove_obj_map_entries(ol_txrx_pdev_handle pdev,
742 					struct ol_txrx_peer_t *peer)
743 {
744 	int i;
745 	uint16_t peer_id;
746 	int32_t peer_id_ref_cnt;
747 	int32_t num_deleted_maps = 0;
748 	uint16_t save_peer_ids[MAX_NUM_PEER_ID_PER_PEER];
749 	uint16_t save_peer_id_ref_cnt[MAX_NUM_PEER_ID_PER_PEER] = {0};
750 
751 	qdf_spin_lock_bh(&pdev->peer_map_unmap_lock);
752 	for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
753 		peer_id = peer->peer_ids[i];
754 		save_peer_ids[i] = HTT_INVALID_PEER;
755 		if (peer_id == HTT_INVALID_PEER ||
756 			!pdev->peer_id_to_obj_map[peer_id].peer) {
757 			/* unused peer_id, or object is already dereferenced */
758 			continue;
759 		}
760 		if (pdev->peer_id_to_obj_map[peer_id].peer != peer) {
761 			QDF_TRACE(QDF_MODULE_ID_TXRX,
762 				QDF_TRACE_LEVEL_ERROR,
763 				FL("peer pointer mismatch in peer_id_to_obj"));
764 			continue;
765 		}
766 		peer_id_ref_cnt = qdf_atomic_read(
767 					&pdev->peer_id_to_obj_map[peer_id].
768 						peer_id_ref_cnt);
769 		save_peer_ids[i] = peer_id;
770 		save_peer_id_ref_cnt[i] = peer_id_ref_cnt;
771 
772 		/*
773 		 * Transfer peer_id_ref_cnt into del_peer_id_ref_cnt so that
774 		 * ol_txrx_peer_release_ref will decrement del_peer_id_ref_cnt
775 		 * and any map events will increment peer_id_ref_cnt. Otherwise
776 		 * accounting will be messed up.
777 		 *
778 		 * Add operation will ensure that back to back roaming in the
779 		 * middle of unmap/map event sequence will be accounted for.
780 		 */
781 		qdf_atomic_add(peer_id_ref_cnt,
782 			&pdev->peer_id_to_obj_map[peer_id].del_peer_id_ref_cnt);
783 		qdf_atomic_init(&pdev->peer_id_to_obj_map[peer_id].
784 				peer_id_ref_cnt);
785 		num_deleted_maps += peer_id_ref_cnt;
786 		pdev->peer_id_to_obj_map[peer_id].peer = NULL;
787 		pdev->peer_id_to_obj_map[peer_id].del_peer = peer;
788 		peer->peer_ids[i] = HTT_INVALID_PEER;
789 	}
790 	qdf_atomic_init(&peer->del_ref_cnt);
791 	if (num_deleted_maps != 0) {
792 		qdf_atomic_add(num_deleted_maps, &peer->del_ref_cnt);
793 		TAILQ_INSERT_TAIL(&pdev->inactive_peer_list, peer,
794 				  inactive_peer_list_elem);
795 	}
796 	qdf_spin_unlock_bh(&pdev->peer_map_unmap_lock);
797 
798 	/* Debug print the information after releasing bh spinlock */
799 	for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
800 		if (save_peer_ids[i] == HTT_INVALID_PEER)
801 			continue;
802 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
803 			  FL("peer_id = %d, peer_id_ref_cnt = %d, index = %d"),
804 			  save_peer_ids[i], save_peer_id_ref_cnt[i], i);
805 	}
806 
807 	if (num_deleted_maps > qdf_atomic_read(&peer->ref_cnt)) {
808 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
809 			  FL("num_deleted_maps %d ref_cnt %d"),
810 			  num_deleted_maps, qdf_atomic_read(&peer->ref_cnt));
811 		QDF_BUG(0);
812 		return;
813 	}
814 
815 	while (num_deleted_maps-- > 0)
816 		ol_txrx_peer_release_ref(peer, PEER_DEBUG_ID_OL_PEER_MAP);
817 }
818 
ol_txrx_assoc_peer_find(struct ol_txrx_vdev_t * vdev)819 struct ol_txrx_peer_t *ol_txrx_assoc_peer_find(struct ol_txrx_vdev_t *vdev)
820 {
821 	struct ol_txrx_peer_t *peer;
822 
823 	qdf_spin_lock_bh(&vdev->pdev->last_real_peer_mutex);
824 	/*
825 	 * Check the TXRX Peer is itself valid And also
826 	 * if HTT Peer ID has been setup for this peer
827 	 */
828 	if (vdev->last_real_peer
829 	    && vdev->last_real_peer->peer_ids[0] != HTT_INVALID_PEER_ID) {
830 		qdf_spin_lock_bh(&vdev->pdev->peer_ref_mutex);
831 		ol_txrx_peer_get_ref(vdev->last_real_peer,
832 				     PEER_DEBUG_ID_OL_INTERNAL);
833 		qdf_spin_unlock_bh(&vdev->pdev->peer_ref_mutex);
834 		peer = vdev->last_real_peer;
835 	} else {
836 		peer = NULL;
837 	}
838 	qdf_spin_unlock_bh(&vdev->pdev->last_real_peer_mutex);
839 	return peer;
840 }
841 
842 
843 /*=== function definitions for debug ========================================*/
844 
845 #if defined(TXRX_DEBUG_LEVEL) && TXRX_DEBUG_LEVEL > 5
ol_txrx_peer_find_display(ol_txrx_pdev_handle pdev,int indent)846 void ol_txrx_peer_find_display(ol_txrx_pdev_handle pdev, int indent)
847 {
848 	int i, max_peers;
849 
850 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
851 		  "%*speer map:\n", indent, " ");
852 	max_peers = ol_cfg_max_peer_id(pdev->ctrl_pdev) + 1;
853 	for (i = 0; i < max_peers; i++) {
854 		if (pdev->peer_id_to_obj_map[i].peer) {
855 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
856 				  "%*sid %d -> %pK\n",
857 				  indent + 4, " ", i,
858 				  pdev->peer_id_to_obj_map[i].peer);
859 		}
860 	}
861 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
862 		  "%*speer hash table:\n", indent, " ");
863 	for (i = 0; i <= pdev->peer_hash.mask; i++) {
864 		if (!TAILQ_EMPTY(&pdev->peer_hash.bins[i])) {
865 			struct ol_txrx_peer_t *peer;
866 
867 			TAILQ_FOREACH(peer, &pdev->peer_hash.bins[i],
868 				      hash_list_elem) {
869 				QDF_TRACE(QDF_MODULE_ID_TXRX,
870 					  QDF_TRACE_LEVEL_INFO_LOW,
871 					  "%*shash idx %d -> %pK ("QDF_MAC_ADDR_FMT")\n",
872 					indent + 4, " ", i, peer,
873 					QDF_MAC_ADDR_REF(peer->mac_addr.raw));
874 			}
875 		}
876 	}
877 }
878 
879 #endif /* if TXRX_DEBUG_LEVEL */
880