xref: /wlan-driver/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_desc.c (revision 5113495b16420b49004c444715d2daae2066e7dc)
1*5113495bSYour Name /*
2*5113495bSYour Name  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3*5113495bSYour Name  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4*5113495bSYour Name  *
5*5113495bSYour Name  * Permission to use, copy, modify, and/or distribute this software for
6*5113495bSYour Name  * any purpose with or without fee is hereby granted, provided that the
7*5113495bSYour Name  * above copyright notice and this permission notice appear in all
8*5113495bSYour Name  * copies.
9*5113495bSYour Name  *
10*5113495bSYour Name  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11*5113495bSYour Name  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12*5113495bSYour Name  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13*5113495bSYour Name  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14*5113495bSYour Name  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15*5113495bSYour Name  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16*5113495bSYour Name  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17*5113495bSYour Name  * PERFORMANCE OF THIS SOFTWARE.
18*5113495bSYour Name  */
19*5113495bSYour Name 
20*5113495bSYour Name #include "dp_types.h"
21*5113495bSYour Name #include "dp_rx.h"
22*5113495bSYour Name #include "dp_ipa.h"
23*5113495bSYour Name #include <qdf_module.h>
24*5113495bSYour Name 
25*5113495bSYour Name #ifdef RX_DESC_MULTI_PAGE_ALLOC
26*5113495bSYour Name A_COMPILE_TIME_ASSERT(cookie_size_check,
27*5113495bSYour Name 		      (DP_BLOCKMEM_SIZE /
28*5113495bSYour Name 		       sizeof(union dp_rx_desc_list_elem_t))
29*5113495bSYour Name 		      <= (1 << DP_RX_DESC_PAGE_ID_SHIFT));
30*5113495bSYour Name 
dp_rx_desc_pool_is_allocated(struct rx_desc_pool * rx_desc_pool)31*5113495bSYour Name QDF_STATUS dp_rx_desc_pool_is_allocated(struct rx_desc_pool *rx_desc_pool)
32*5113495bSYour Name {
33*5113495bSYour Name 	if (!rx_desc_pool->desc_pages.num_pages) {
34*5113495bSYour Name 		dp_err("Multi page alloc fail, size=%d, elem=%d",
35*5113495bSYour Name 		       rx_desc_pool->elem_size, rx_desc_pool->pool_size);
36*5113495bSYour Name 		return QDF_STATUS_E_NOMEM;
37*5113495bSYour Name 	}
38*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
39*5113495bSYour Name }
40*5113495bSYour Name 
41*5113495bSYour Name qdf_export_symbol(dp_rx_desc_pool_is_allocated);
42*5113495bSYour Name 
dp_rx_desc_pool_alloc(struct dp_soc * soc,uint32_t num_elem,struct rx_desc_pool * rx_desc_pool)43*5113495bSYour Name QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc,
44*5113495bSYour Name 				 uint32_t num_elem,
45*5113495bSYour Name 				 struct rx_desc_pool *rx_desc_pool)
46*5113495bSYour Name {
47*5113495bSYour Name 	uint32_t desc_size;
48*5113495bSYour Name 	union dp_rx_desc_list_elem_t *rx_desc_elem;
49*5113495bSYour Name 
50*5113495bSYour Name 	desc_size = sizeof(*rx_desc_elem);
51*5113495bSYour Name 	rx_desc_pool->elem_size = desc_size;
52*5113495bSYour Name 	rx_desc_pool->desc_pages.page_size = DP_BLOCKMEM_SIZE;
53*5113495bSYour Name 	dp_desc_multi_pages_mem_alloc(soc, rx_desc_pool->desc_type,
54*5113495bSYour Name 				      &rx_desc_pool->desc_pages,
55*5113495bSYour Name 				      desc_size, num_elem, 0, true);
56*5113495bSYour Name 	if (!rx_desc_pool->desc_pages.num_pages) {
57*5113495bSYour Name 		qdf_err("Multi page alloc fail,size=%d, elem=%d",
58*5113495bSYour Name 			desc_size, num_elem);
59*5113495bSYour Name 		return QDF_STATUS_E_NOMEM;
60*5113495bSYour Name 	}
61*5113495bSYour Name 
62*5113495bSYour Name 	if (qdf_mem_multi_page_link(soc->osdev,
63*5113495bSYour Name 				    &rx_desc_pool->desc_pages,
64*5113495bSYour Name 				    desc_size, num_elem, true)) {
65*5113495bSYour Name 		qdf_err("overflow num link,size=%d, elem=%d",
66*5113495bSYour Name 			desc_size, num_elem);
67*5113495bSYour Name 		goto free_rx_desc_pool;
68*5113495bSYour Name 	}
69*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
70*5113495bSYour Name 
71*5113495bSYour Name free_rx_desc_pool:
72*5113495bSYour Name 	dp_rx_desc_pool_free(soc, rx_desc_pool);
73*5113495bSYour Name 
74*5113495bSYour Name 	return QDF_STATUS_E_FAULT;
75*5113495bSYour Name }
76*5113495bSYour Name 
77*5113495bSYour Name qdf_export_symbol(dp_rx_desc_pool_alloc);
78*5113495bSYour Name 
dp_rx_desc_pool_init_generic(struct dp_soc * soc,struct rx_desc_pool * rx_desc_pool,uint32_t pool_id)79*5113495bSYour Name QDF_STATUS dp_rx_desc_pool_init_generic(struct dp_soc *soc,
80*5113495bSYour Name 				  struct rx_desc_pool *rx_desc_pool,
81*5113495bSYour Name 				  uint32_t pool_id)
82*5113495bSYour Name {
83*5113495bSYour Name 	uint32_t id, page_id, offset, num_desc_per_page;
84*5113495bSYour Name 	uint32_t count = 0;
85*5113495bSYour Name 	union dp_rx_desc_list_elem_t *rx_desc_elem;
86*5113495bSYour Name 
87*5113495bSYour Name 	num_desc_per_page = rx_desc_pool->desc_pages.num_element_per_page;
88*5113495bSYour Name 
89*5113495bSYour Name 	rx_desc_elem = rx_desc_pool->freelist;
90*5113495bSYour Name 	while (rx_desc_elem) {
91*5113495bSYour Name 		page_id = count / num_desc_per_page;
92*5113495bSYour Name 		offset = count % num_desc_per_page;
93*5113495bSYour Name 		/*
94*5113495bSYour Name 		 * Below cookie size is from REO destination ring
95*5113495bSYour Name 		 * reo_destination_ring -> buffer_addr_info -> sw_buffer_cookie
96*5113495bSYour Name 		 * cookie size = 21 bits
97*5113495bSYour Name 		 * 8 bits - offset
98*5113495bSYour Name 		 * 8 bits - page ID
99*5113495bSYour Name 		 * 4 bits - pool ID
100*5113495bSYour Name 		 */
101*5113495bSYour Name 		id = ((pool_id << DP_RX_DESC_POOL_ID_SHIFT) |
102*5113495bSYour Name 		      (page_id << DP_RX_DESC_PAGE_ID_SHIFT) |
103*5113495bSYour Name 		      offset);
104*5113495bSYour Name 		rx_desc_elem->rx_desc.cookie = id;
105*5113495bSYour Name 		rx_desc_elem->rx_desc.pool_id = pool_id;
106*5113495bSYour Name 		rx_desc_elem->rx_desc.in_use = 0;
107*5113495bSYour Name 		rx_desc_elem = rx_desc_elem->next;
108*5113495bSYour Name 		count++;
109*5113495bSYour Name 	}
110*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
111*5113495bSYour Name }
112*5113495bSYour Name 
dp_rx_desc_pool_init(struct dp_soc * soc,uint32_t pool_id,uint32_t pool_size,struct rx_desc_pool * rx_desc_pool)113*5113495bSYour Name void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id,
114*5113495bSYour Name 			  uint32_t pool_size, struct rx_desc_pool *rx_desc_pool)
115*5113495bSYour Name {
116*5113495bSYour Name 	QDF_STATUS status;
117*5113495bSYour Name 
118*5113495bSYour Name 	/* Initialize the lock */
119*5113495bSYour Name 	qdf_spinlock_create(&rx_desc_pool->lock);
120*5113495bSYour Name 
121*5113495bSYour Name 	qdf_spin_lock_bh(&rx_desc_pool->lock);
122*5113495bSYour Name 	rx_desc_pool->pool_size = pool_size;
123*5113495bSYour Name 
124*5113495bSYour Name 	rx_desc_pool->freelist = (union dp_rx_desc_list_elem_t *)
125*5113495bSYour Name 				  *rx_desc_pool->desc_pages.cacheable_pages;
126*5113495bSYour Name 
127*5113495bSYour Name 	status = soc->arch_ops.dp_rx_desc_pool_init(soc, rx_desc_pool,
128*5113495bSYour Name 						    pool_id);
129*5113495bSYour Name 	if (!QDF_IS_STATUS_SUCCESS(status))
130*5113495bSYour Name 		dp_err("RX desc pool initialization failed");
131*5113495bSYour Name 
132*5113495bSYour Name 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
133*5113495bSYour Name }
134*5113495bSYour Name 
135*5113495bSYour Name qdf_export_symbol(dp_rx_desc_pool_init);
136*5113495bSYour Name 
dp_rx_desc_find(uint16_t page_id,uint16_t offset,struct rx_desc_pool * rx_desc_pool)137*5113495bSYour Name union dp_rx_desc_list_elem_t *dp_rx_desc_find(uint16_t page_id, uint16_t offset,
138*5113495bSYour Name 					      struct rx_desc_pool *rx_desc_pool)
139*5113495bSYour Name {
140*5113495bSYour Name 	return rx_desc_pool->desc_pages.cacheable_pages[page_id] +
141*5113495bSYour Name 		rx_desc_pool->elem_size * offset;
142*5113495bSYour Name }
143*5113495bSYour Name 
dp_rx_desc_nbuf_collect(struct dp_soc * soc,struct rx_desc_pool * rx_desc_pool,qdf_nbuf_t * nbuf_unmap_list,qdf_nbuf_t * nbuf_free_list)144*5113495bSYour Name static QDF_STATUS dp_rx_desc_nbuf_collect(struct dp_soc *soc,
145*5113495bSYour Name 					  struct rx_desc_pool *rx_desc_pool,
146*5113495bSYour Name 					  qdf_nbuf_t *nbuf_unmap_list,
147*5113495bSYour Name 					  qdf_nbuf_t *nbuf_free_list)
148*5113495bSYour Name {
149*5113495bSYour Name 	uint32_t i, num_desc, page_id, offset, num_desc_per_page;
150*5113495bSYour Name 	union dp_rx_desc_list_elem_t *rx_desc_elem;
151*5113495bSYour Name 	struct dp_rx_desc *rx_desc;
152*5113495bSYour Name 
153*5113495bSYour Name 	if (qdf_unlikely(!(rx_desc_pool->desc_pages.cacheable_pages))) {
154*5113495bSYour Name 		qdf_err("No pages found on this desc pool");
155*5113495bSYour Name 		return QDF_STATUS_E_INVAL;
156*5113495bSYour Name 	}
157*5113495bSYour Name 	num_desc = rx_desc_pool->pool_size;
158*5113495bSYour Name 	num_desc_per_page = rx_desc_pool->desc_pages.num_element_per_page;
159*5113495bSYour Name 	for (i = 0; i < num_desc; i++) {
160*5113495bSYour Name 		page_id = i / num_desc_per_page;
161*5113495bSYour Name 		offset = i % num_desc_per_page;
162*5113495bSYour Name 		rx_desc_elem = dp_rx_desc_find(page_id, offset, rx_desc_pool);
163*5113495bSYour Name 		rx_desc = &rx_desc_elem->rx_desc;
164*5113495bSYour Name 		dp_rx_desc_free_dbg_info(rx_desc);
165*5113495bSYour Name 		if (rx_desc->in_use) {
166*5113495bSYour Name 			if (!rx_desc->unmapped) {
167*5113495bSYour Name 				DP_RX_HEAD_APPEND(*nbuf_unmap_list,
168*5113495bSYour Name 						  rx_desc->nbuf);
169*5113495bSYour Name 				rx_desc->unmapped = 1;
170*5113495bSYour Name 			} else {
171*5113495bSYour Name 				DP_RX_HEAD_APPEND(*nbuf_free_list,
172*5113495bSYour Name 						  rx_desc->nbuf);
173*5113495bSYour Name 			}
174*5113495bSYour Name 		}
175*5113495bSYour Name 	}
176*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
177*5113495bSYour Name }
178*5113495bSYour Name 
dp_rx_desc_nbuf_cleanup(struct dp_soc * soc,qdf_nbuf_t nbuf_unmap_list,qdf_nbuf_t nbuf_free_list,uint16_t buf_size,bool is_mon_pool)179*5113495bSYour Name static void dp_rx_desc_nbuf_cleanup(struct dp_soc *soc,
180*5113495bSYour Name 				    qdf_nbuf_t nbuf_unmap_list,
181*5113495bSYour Name 				    qdf_nbuf_t nbuf_free_list,
182*5113495bSYour Name 				    uint16_t buf_size,
183*5113495bSYour Name 				    bool is_mon_pool)
184*5113495bSYour Name {
185*5113495bSYour Name 	qdf_nbuf_t nbuf = nbuf_unmap_list;
186*5113495bSYour Name 	qdf_nbuf_t next;
187*5113495bSYour Name 
188*5113495bSYour Name 	while (nbuf) {
189*5113495bSYour Name 		next = nbuf->next;
190*5113495bSYour Name 
191*5113495bSYour Name 		if (!is_mon_pool)
192*5113495bSYour Name 			dp_audio_smmu_unmap(soc->osdev,
193*5113495bSYour Name 					    QDF_NBUF_CB_PADDR(nbuf),
194*5113495bSYour Name 					    buf_size);
195*5113495bSYour Name 
196*5113495bSYour Name 		if (dp_ipa_handle_rx_buf_smmu_mapping(
197*5113495bSYour Name 						soc, nbuf, buf_size,
198*5113495bSYour Name 						false, __func__,
199*5113495bSYour Name 						__LINE__))
200*5113495bSYour Name 			dp_info_rl("Unable to unmap nbuf: %pK", nbuf);
201*5113495bSYour Name 
202*5113495bSYour Name 		qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf,
203*5113495bSYour Name 					     QDF_DMA_BIDIRECTIONAL, buf_size);
204*5113495bSYour Name 		dp_rx_nbuf_free(nbuf);
205*5113495bSYour Name 		nbuf = next;
206*5113495bSYour Name 	}
207*5113495bSYour Name 
208*5113495bSYour Name 	nbuf = nbuf_free_list;
209*5113495bSYour Name 	while (nbuf) {
210*5113495bSYour Name 		next = nbuf->next;
211*5113495bSYour Name 		dp_rx_nbuf_free(nbuf);
212*5113495bSYour Name 		nbuf = next;
213*5113495bSYour Name 	}
214*5113495bSYour Name }
215*5113495bSYour Name 
dp_rx_desc_nbuf_and_pool_free(struct dp_soc * soc,uint32_t pool_id,struct rx_desc_pool * rx_desc_pool)216*5113495bSYour Name void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id,
217*5113495bSYour Name 				   struct rx_desc_pool *rx_desc_pool)
218*5113495bSYour Name {
219*5113495bSYour Name 	qdf_nbuf_t nbuf_unmap_list = NULL;
220*5113495bSYour Name 	qdf_nbuf_t nbuf_free_list = NULL;
221*5113495bSYour Name 
222*5113495bSYour Name 	qdf_spin_lock_bh(&rx_desc_pool->lock);
223*5113495bSYour Name 	dp_rx_desc_nbuf_collect(soc, rx_desc_pool,
224*5113495bSYour Name 				&nbuf_unmap_list, &nbuf_free_list);
225*5113495bSYour Name 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
226*5113495bSYour Name 	dp_rx_desc_nbuf_cleanup(soc, nbuf_unmap_list, nbuf_free_list,
227*5113495bSYour Name 				rx_desc_pool->buf_size, false);
228*5113495bSYour Name 	qdf_spinlock_destroy(&rx_desc_pool->lock);
229*5113495bSYour Name }
230*5113495bSYour Name 
dp_rx_desc_nbuf_free(struct dp_soc * soc,struct rx_desc_pool * rx_desc_pool,bool is_mon_pool)231*5113495bSYour Name void dp_rx_desc_nbuf_free(struct dp_soc *soc,
232*5113495bSYour Name 			  struct rx_desc_pool *rx_desc_pool,
233*5113495bSYour Name 			  bool is_mon_pool)
234*5113495bSYour Name {
235*5113495bSYour Name 	qdf_nbuf_t nbuf_unmap_list = NULL;
236*5113495bSYour Name 	qdf_nbuf_t nbuf_free_list = NULL;
237*5113495bSYour Name 	qdf_spin_lock_bh(&rx_desc_pool->lock);
238*5113495bSYour Name 	dp_rx_desc_nbuf_collect(soc, rx_desc_pool,
239*5113495bSYour Name 				&nbuf_unmap_list, &nbuf_free_list);
240*5113495bSYour Name 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
241*5113495bSYour Name 	dp_rx_desc_nbuf_cleanup(soc, nbuf_unmap_list, nbuf_free_list,
242*5113495bSYour Name 				rx_desc_pool->buf_size, is_mon_pool);
243*5113495bSYour Name }
244*5113495bSYour Name 
245*5113495bSYour Name qdf_export_symbol(dp_rx_desc_nbuf_free);
246*5113495bSYour Name 
dp_rx_desc_pool_free(struct dp_soc * soc,struct rx_desc_pool * rx_desc_pool)247*5113495bSYour Name void dp_rx_desc_pool_free(struct dp_soc *soc,
248*5113495bSYour Name 			  struct rx_desc_pool *rx_desc_pool)
249*5113495bSYour Name {
250*5113495bSYour Name 	if (qdf_unlikely(!(rx_desc_pool->desc_pages.cacheable_pages)))
251*5113495bSYour Name 		return;
252*5113495bSYour Name 
253*5113495bSYour Name 	dp_desc_multi_pages_mem_free(soc, rx_desc_pool->desc_type,
254*5113495bSYour Name 				     &rx_desc_pool->desc_pages, 0, true);
255*5113495bSYour Name }
256*5113495bSYour Name 
257*5113495bSYour Name qdf_export_symbol(dp_rx_desc_pool_free);
258*5113495bSYour Name 
dp_rx_desc_pool_deinit(struct dp_soc * soc,struct rx_desc_pool * rx_desc_pool,uint32_t pool_id)259*5113495bSYour Name void dp_rx_desc_pool_deinit(struct dp_soc *soc,
260*5113495bSYour Name 			    struct rx_desc_pool *rx_desc_pool,
261*5113495bSYour Name 			    uint32_t pool_id)
262*5113495bSYour Name {
263*5113495bSYour Name 	qdf_spin_lock_bh(&rx_desc_pool->lock);
264*5113495bSYour Name 
265*5113495bSYour Name 	rx_desc_pool->freelist = NULL;
266*5113495bSYour Name 	rx_desc_pool->pool_size = 0;
267*5113495bSYour Name 
268*5113495bSYour Name 	/* Deinitialize rx mon desr frag flag */
269*5113495bSYour Name 	rx_desc_pool->rx_mon_dest_frag_enable = false;
270*5113495bSYour Name 	qdf_frag_cache_drain(&rx_desc_pool->pf_cache);
271*5113495bSYour Name 
272*5113495bSYour Name 	soc->arch_ops.dp_rx_desc_pool_deinit(soc, rx_desc_pool, pool_id);
273*5113495bSYour Name 
274*5113495bSYour Name 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
275*5113495bSYour Name 	qdf_spinlock_destroy(&rx_desc_pool->lock);
276*5113495bSYour Name }
277*5113495bSYour Name 
278*5113495bSYour Name qdf_export_symbol(dp_rx_desc_pool_deinit);
279*5113495bSYour Name #else
dp_rx_desc_pool_is_allocated(struct rx_desc_pool * rx_desc_pool)280*5113495bSYour Name QDF_STATUS dp_rx_desc_pool_is_allocated(struct rx_desc_pool *rx_desc_pool)
281*5113495bSYour Name {
282*5113495bSYour Name 	if (!rx_desc_pool->array) {
283*5113495bSYour Name 		dp_err("nss-wifi<4> skip Rx refil");
284*5113495bSYour Name 		return QDF_STATUS_E_NOMEM;
285*5113495bSYour Name 	}
286*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
287*5113495bSYour Name }
288*5113495bSYour Name 
289*5113495bSYour Name qdf_export_symbol(dp_rx_desc_pool_is_allocated);
290*5113495bSYour Name 
dp_rx_desc_pool_alloc(struct dp_soc * soc,uint32_t pool_size,struct rx_desc_pool * rx_desc_pool)291*5113495bSYour Name QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc,
292*5113495bSYour Name 				 uint32_t pool_size,
293*5113495bSYour Name 				 struct rx_desc_pool *rx_desc_pool)
294*5113495bSYour Name {
295*5113495bSYour Name 	rx_desc_pool->array = qdf_mem_common_alloc(pool_size *
296*5113495bSYour Name 				     sizeof(union dp_rx_desc_list_elem_t));
297*5113495bSYour Name 
298*5113495bSYour Name 	if (!(rx_desc_pool->array)) {
299*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
300*5113495bSYour Name 			  "RX Desc Pool allocation failed");
301*5113495bSYour Name 		return QDF_STATUS_E_NOMEM;
302*5113495bSYour Name 	}
303*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
304*5113495bSYour Name }
305*5113495bSYour Name 
306*5113495bSYour Name qdf_export_symbol(dp_rx_desc_pool_alloc);
307*5113495bSYour Name 
dp_rx_desc_pool_init_generic(struct dp_soc * soc,struct rx_desc_pool * rx_desc_pool,uint32_t pool_id)308*5113495bSYour Name QDF_STATUS dp_rx_desc_pool_init_generic(struct dp_soc *soc,
309*5113495bSYour Name 				  struct rx_desc_pool *rx_desc_pool,
310*5113495bSYour Name 				  uint32_t pool_id)
311*5113495bSYour Name {
312*5113495bSYour Name 	int i;
313*5113495bSYour Name 
314*5113495bSYour Name 	for (i = 0; i <= rx_desc_pool->pool_size - 1; i++) {
315*5113495bSYour Name 		if (i == rx_desc_pool->pool_size - 1)
316*5113495bSYour Name 			rx_desc_pool->array[i].next = NULL;
317*5113495bSYour Name 		else
318*5113495bSYour Name 			rx_desc_pool->array[i].next =
319*5113495bSYour Name 				&rx_desc_pool->array[i + 1];
320*5113495bSYour Name 		rx_desc_pool->array[i].rx_desc.cookie = i | (pool_id << 18);
321*5113495bSYour Name 		rx_desc_pool->array[i].rx_desc.pool_id = pool_id;
322*5113495bSYour Name 		rx_desc_pool->array[i].rx_desc.in_use = 0;
323*5113495bSYour Name 	}
324*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
325*5113495bSYour Name }
326*5113495bSYour Name 
dp_rx_desc_pool_init(struct dp_soc * soc,uint32_t pool_id,uint32_t pool_size,struct rx_desc_pool * rx_desc_pool)327*5113495bSYour Name void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id,
328*5113495bSYour Name 			  uint32_t pool_size, struct rx_desc_pool *rx_desc_pool)
329*5113495bSYour Name {
330*5113495bSYour Name 	QDF_STATUS status;
331*5113495bSYour Name 
332*5113495bSYour Name 	/* Initialize the lock */
333*5113495bSYour Name 	qdf_spinlock_create(&rx_desc_pool->lock);
334*5113495bSYour Name 
335*5113495bSYour Name 	qdf_spin_lock_bh(&rx_desc_pool->lock);
336*5113495bSYour Name 	rx_desc_pool->pool_size = pool_size;
337*5113495bSYour Name 
338*5113495bSYour Name 	/* link SW rx descs into a freelist */
339*5113495bSYour Name 	rx_desc_pool->freelist = &rx_desc_pool->array[0];
340*5113495bSYour Name 	qdf_mem_zero(rx_desc_pool->freelist, rx_desc_pool->pool_size);
341*5113495bSYour Name 
342*5113495bSYour Name 	status = soc->arch_ops.dp_rx_desc_pool_init(soc, rx_desc_pool,
343*5113495bSYour Name 						    pool_id);
344*5113495bSYour Name 	if (!QDF_IS_STATUS_SUCCESS(status))
345*5113495bSYour Name 		dp_err("RX desc pool initialization failed");
346*5113495bSYour Name 
347*5113495bSYour Name 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
348*5113495bSYour Name }
349*5113495bSYour Name 
350*5113495bSYour Name qdf_export_symbol(dp_rx_desc_pool_init);
351*5113495bSYour Name 
352*5113495bSYour Name #ifdef WLAN_SUPPORT_PPEDS
353*5113495bSYour Name static inline
dp_rx_desc_get_nbuf(struct rx_desc_pool * rx_desc_pool,int i)354*5113495bSYour Name qdf_nbuf_t dp_rx_desc_get_nbuf(struct rx_desc_pool *rx_desc_pool, int i)
355*5113495bSYour Name {
356*5113495bSYour Name 	if (rx_desc_pool->array[i].rx_desc.has_reuse_nbuf)
357*5113495bSYour Name 		return rx_desc_pool->array[i].rx_desc.reuse_nbuf;
358*5113495bSYour Name 	else
359*5113495bSYour Name 		return rx_desc_pool->array[i].rx_desc.nbuf;
360*5113495bSYour Name }
361*5113495bSYour Name #else
362*5113495bSYour Name static inline
dp_rx_desc_get_nbuf(struct rx_desc_pool * rx_desc_pool,int i)363*5113495bSYour Name qdf_nbuf_t dp_rx_desc_get_nbuf(struct rx_desc_pool *rx_desc_pool, int i)
364*5113495bSYour Name {
365*5113495bSYour Name 	return rx_desc_pool->array[i].rx_desc.nbuf;
366*5113495bSYour Name }
367*5113495bSYour Name #endif
368*5113495bSYour Name 
dp_rx_desc_nbuf_and_pool_free(struct dp_soc * soc,uint32_t pool_id,struct rx_desc_pool * rx_desc_pool)369*5113495bSYour Name void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id,
370*5113495bSYour Name 				   struct rx_desc_pool *rx_desc_pool)
371*5113495bSYour Name {
372*5113495bSYour Name 	qdf_nbuf_t nbuf;
373*5113495bSYour Name 	int i;
374*5113495bSYour Name 
375*5113495bSYour Name 	qdf_spin_lock_bh(&rx_desc_pool->lock);
376*5113495bSYour Name 	for (i = 0; i < rx_desc_pool->pool_size; i++) {
377*5113495bSYour Name 		if (rx_desc_pool->array[i].rx_desc.in_use) {
378*5113495bSYour Name 			nbuf = dp_rx_desc_get_nbuf(rx_desc_pool, i);
379*5113495bSYour Name 
380*5113495bSYour Name 			if (!(rx_desc_pool->array[i].rx_desc.unmapped)) {
381*5113495bSYour Name 				dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf);
382*5113495bSYour Name 				rx_desc_pool->array[i].rx_desc.unmapped = 1;
383*5113495bSYour Name 			}
384*5113495bSYour Name 			dp_rx_nbuf_free(nbuf);
385*5113495bSYour Name 		}
386*5113495bSYour Name 	}
387*5113495bSYour Name 	qdf_mem_common_free(rx_desc_pool->array);
388*5113495bSYour Name 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
389*5113495bSYour Name 	qdf_spinlock_destroy(&rx_desc_pool->lock);
390*5113495bSYour Name }
391*5113495bSYour Name 
dp_rx_desc_nbuf_free(struct dp_soc * soc,struct rx_desc_pool * rx_desc_pool,bool is_mon_pool)392*5113495bSYour Name void dp_rx_desc_nbuf_free(struct dp_soc *soc,
393*5113495bSYour Name 			  struct rx_desc_pool *rx_desc_pool,
394*5113495bSYour Name 			  bool is_mon_pool)
395*5113495bSYour Name {
396*5113495bSYour Name 	qdf_nbuf_t nbuf;
397*5113495bSYour Name 	int i;
398*5113495bSYour Name 
399*5113495bSYour Name 	qdf_spin_lock_bh(&rx_desc_pool->lock);
400*5113495bSYour Name 	for (i = 0; i < rx_desc_pool->pool_size; i++) {
401*5113495bSYour Name 		dp_rx_desc_free_dbg_info(&rx_desc_pool->array[i].rx_desc);
402*5113495bSYour Name 		if (rx_desc_pool->array[i].rx_desc.in_use) {
403*5113495bSYour Name 			nbuf = dp_rx_desc_get_nbuf(rx_desc_pool, i);
404*5113495bSYour Name 
405*5113495bSYour Name 			if (!(rx_desc_pool->array[i].rx_desc.unmapped)) {
406*5113495bSYour Name 				dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf);
407*5113495bSYour Name 				rx_desc_pool->array[i].rx_desc.unmapped = 1;
408*5113495bSYour Name 			}
409*5113495bSYour Name 			dp_rx_nbuf_free(nbuf);
410*5113495bSYour Name 		}
411*5113495bSYour Name 	}
412*5113495bSYour Name 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
413*5113495bSYour Name }
414*5113495bSYour Name 
415*5113495bSYour Name qdf_export_symbol(dp_rx_desc_nbuf_free);
416*5113495bSYour Name 
417*5113495bSYour Name #ifdef DP_RX_MON_MEM_FRAG
dp_rx_desc_frag_free(struct dp_soc * soc,struct rx_desc_pool * rx_desc_pool)418*5113495bSYour Name void dp_rx_desc_frag_free(struct dp_soc *soc,
419*5113495bSYour Name 			  struct rx_desc_pool *rx_desc_pool)
420*5113495bSYour Name {
421*5113495bSYour Name 	qdf_dma_addr_t paddr;
422*5113495bSYour Name 	qdf_frag_t vaddr;
423*5113495bSYour Name 	int i;
424*5113495bSYour Name 
425*5113495bSYour Name 	qdf_spin_lock_bh(&rx_desc_pool->lock);
426*5113495bSYour Name 	for (i = 0; i < rx_desc_pool->pool_size; i++) {
427*5113495bSYour Name 		if (rx_desc_pool->array[i].rx_desc.in_use) {
428*5113495bSYour Name 			paddr = rx_desc_pool->array[i].rx_desc.paddr_buf_start;
429*5113495bSYour Name 			vaddr = rx_desc_pool->array[i].rx_desc.rx_buf_start;
430*5113495bSYour Name 
431*5113495bSYour Name 			dp_rx_desc_free_dbg_info(&rx_desc_pool->array[i].rx_desc);
432*5113495bSYour Name 			if (!(rx_desc_pool->array[i].rx_desc.unmapped)) {
433*5113495bSYour Name 				qdf_mem_unmap_page(soc->osdev, paddr,
434*5113495bSYour Name 						   rx_desc_pool->buf_size,
435*5113495bSYour Name 						   QDF_DMA_FROM_DEVICE);
436*5113495bSYour Name 				rx_desc_pool->array[i].rx_desc.unmapped = 1;
437*5113495bSYour Name 			}
438*5113495bSYour Name 			qdf_frag_free(vaddr);
439*5113495bSYour Name 		}
440*5113495bSYour Name 	}
441*5113495bSYour Name 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
442*5113495bSYour Name }
443*5113495bSYour Name 
444*5113495bSYour Name qdf_export_symbol(dp_rx_desc_frag_free);
445*5113495bSYour Name #endif
446*5113495bSYour Name 
dp_rx_desc_pool_free(struct dp_soc * soc,struct rx_desc_pool * rx_desc_pool)447*5113495bSYour Name void dp_rx_desc_pool_free(struct dp_soc *soc,
448*5113495bSYour Name 			  struct rx_desc_pool *rx_desc_pool)
449*5113495bSYour Name {
450*5113495bSYour Name 	qdf_mem_common_free(rx_desc_pool->array);
451*5113495bSYour Name }
452*5113495bSYour Name 
453*5113495bSYour Name qdf_export_symbol(dp_rx_desc_pool_free);
454*5113495bSYour Name 
dp_rx_desc_pool_deinit(struct dp_soc * soc,struct rx_desc_pool * rx_desc_pool,uint32_t pool_id)455*5113495bSYour Name void dp_rx_desc_pool_deinit(struct dp_soc *soc,
456*5113495bSYour Name 			    struct rx_desc_pool *rx_desc_pool,
457*5113495bSYour Name 			    uint32_t pool_id)
458*5113495bSYour Name {
459*5113495bSYour Name 	if (rx_desc_pool->pool_size) {
460*5113495bSYour Name 		qdf_spin_lock_bh(&rx_desc_pool->lock);
461*5113495bSYour Name 
462*5113495bSYour Name 		rx_desc_pool->freelist = NULL;
463*5113495bSYour Name 		rx_desc_pool->pool_size = 0;
464*5113495bSYour Name 
465*5113495bSYour Name 		/* Deinitialize rx mon dest frag flag */
466*5113495bSYour Name 		rx_desc_pool->rx_mon_dest_frag_enable = false;
467*5113495bSYour Name 		qdf_frag_cache_drain(&rx_desc_pool->pf_cache);
468*5113495bSYour Name 
469*5113495bSYour Name 		soc->arch_ops.dp_rx_desc_pool_deinit(soc, rx_desc_pool,
470*5113495bSYour Name 						     pool_id);
471*5113495bSYour Name 
472*5113495bSYour Name 		qdf_spin_unlock_bh(&rx_desc_pool->lock);
473*5113495bSYour Name 		qdf_spinlock_destroy(&rx_desc_pool->lock);
474*5113495bSYour Name 	}
475*5113495bSYour Name }
476*5113495bSYour Name 
477*5113495bSYour Name qdf_export_symbol(dp_rx_desc_pool_deinit);
478*5113495bSYour Name 
479*5113495bSYour Name #endif /* RX_DESC_MULTI_PAGE_ALLOC */
480*5113495bSYour Name 
dp_rx_desc_pool_deinit_generic(struct dp_soc * soc,struct rx_desc_pool * rx_desc_pool,uint32_t pool_id)481*5113495bSYour Name void dp_rx_desc_pool_deinit_generic(struct dp_soc *soc,
482*5113495bSYour Name 			       struct rx_desc_pool *rx_desc_pool,
483*5113495bSYour Name 			       uint32_t pool_id)
484*5113495bSYour Name {
485*5113495bSYour Name }
486*5113495bSYour Name 
dp_rx_get_free_desc_list(struct dp_soc * soc,uint32_t pool_id,struct rx_desc_pool * rx_desc_pool,uint16_t num_descs,union dp_rx_desc_list_elem_t ** desc_list,union dp_rx_desc_list_elem_t ** tail)487*5113495bSYour Name uint16_t dp_rx_get_free_desc_list(struct dp_soc *soc, uint32_t pool_id,
488*5113495bSYour Name 				struct rx_desc_pool *rx_desc_pool,
489*5113495bSYour Name 				uint16_t num_descs,
490*5113495bSYour Name 				union dp_rx_desc_list_elem_t **desc_list,
491*5113495bSYour Name 				union dp_rx_desc_list_elem_t **tail)
492*5113495bSYour Name {
493*5113495bSYour Name 	uint16_t count;
494*5113495bSYour Name 
495*5113495bSYour Name 	qdf_spin_lock_bh(&rx_desc_pool->lock);
496*5113495bSYour Name 
497*5113495bSYour Name 	*desc_list = *tail = rx_desc_pool->freelist;
498*5113495bSYour Name 
499*5113495bSYour Name 	for (count = 0; count < num_descs; count++) {
500*5113495bSYour Name 
501*5113495bSYour Name 		if (qdf_unlikely(!rx_desc_pool->freelist)) {
502*5113495bSYour Name 			qdf_spin_unlock_bh(&rx_desc_pool->lock);
503*5113495bSYour Name 			return count;
504*5113495bSYour Name 		}
505*5113495bSYour Name 		*tail = rx_desc_pool->freelist;
506*5113495bSYour Name 		rx_desc_pool->freelist = rx_desc_pool->freelist->next;
507*5113495bSYour Name 	}
508*5113495bSYour Name 	(*tail)->next = NULL;
509*5113495bSYour Name 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
510*5113495bSYour Name 	return count;
511*5113495bSYour Name }
512*5113495bSYour Name 
513*5113495bSYour Name qdf_export_symbol(dp_rx_get_free_desc_list);
514*5113495bSYour Name 
dp_rx_add_desc_list_to_free_list(struct dp_soc * soc,union dp_rx_desc_list_elem_t ** local_desc_list,union dp_rx_desc_list_elem_t ** tail,uint16_t pool_id,struct rx_desc_pool * rx_desc_pool)515*5113495bSYour Name void dp_rx_add_desc_list_to_free_list(struct dp_soc *soc,
516*5113495bSYour Name 				union dp_rx_desc_list_elem_t **local_desc_list,
517*5113495bSYour Name 				union dp_rx_desc_list_elem_t **tail,
518*5113495bSYour Name 				uint16_t pool_id,
519*5113495bSYour Name 				struct rx_desc_pool *rx_desc_pool)
520*5113495bSYour Name {
521*5113495bSYour Name 	union dp_rx_desc_list_elem_t *temp_list = NULL;
522*5113495bSYour Name 
523*5113495bSYour Name 	qdf_spin_lock_bh(&rx_desc_pool->lock);
524*5113495bSYour Name 
525*5113495bSYour Name 
526*5113495bSYour Name 	temp_list = rx_desc_pool->freelist;
527*5113495bSYour Name 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
528*5113495bSYour Name 	"temp_list: %pK, *local_desc_list: %pK, *tail: %pK (*tail)->next: %pK",
529*5113495bSYour Name 	temp_list, *local_desc_list, *tail, (*tail)->next);
530*5113495bSYour Name 	rx_desc_pool->freelist = *local_desc_list;
531*5113495bSYour Name 	(*tail)->next = temp_list;
532*5113495bSYour Name 	*tail = NULL;
533*5113495bSYour Name 	*local_desc_list = NULL;
534*5113495bSYour Name 
535*5113495bSYour Name 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
536*5113495bSYour Name }
537*5113495bSYour Name 
538*5113495bSYour Name qdf_export_symbol(dp_rx_add_desc_list_to_free_list);
539