xref: /wlan-driver/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_desc.c (revision 5113495b16420b49004c444715d2daae2066e7dc)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "dp_types.h"
21 #include "dp_rx.h"
22 #include "dp_ipa.h"
23 #include <qdf_module.h>
24 
25 #ifdef RX_DESC_MULTI_PAGE_ALLOC
26 A_COMPILE_TIME_ASSERT(cookie_size_check,
27 		      (DP_BLOCKMEM_SIZE /
28 		       sizeof(union dp_rx_desc_list_elem_t))
29 		      <= (1 << DP_RX_DESC_PAGE_ID_SHIFT));
30 
dp_rx_desc_pool_is_allocated(struct rx_desc_pool * rx_desc_pool)31 QDF_STATUS dp_rx_desc_pool_is_allocated(struct rx_desc_pool *rx_desc_pool)
32 {
33 	if (!rx_desc_pool->desc_pages.num_pages) {
34 		dp_err("Multi page alloc fail, size=%d, elem=%d",
35 		       rx_desc_pool->elem_size, rx_desc_pool->pool_size);
36 		return QDF_STATUS_E_NOMEM;
37 	}
38 	return QDF_STATUS_SUCCESS;
39 }
40 
41 qdf_export_symbol(dp_rx_desc_pool_is_allocated);
42 
dp_rx_desc_pool_alloc(struct dp_soc * soc,uint32_t num_elem,struct rx_desc_pool * rx_desc_pool)43 QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc,
44 				 uint32_t num_elem,
45 				 struct rx_desc_pool *rx_desc_pool)
46 {
47 	uint32_t desc_size;
48 	union dp_rx_desc_list_elem_t *rx_desc_elem;
49 
50 	desc_size = sizeof(*rx_desc_elem);
51 	rx_desc_pool->elem_size = desc_size;
52 	rx_desc_pool->desc_pages.page_size = DP_BLOCKMEM_SIZE;
53 	dp_desc_multi_pages_mem_alloc(soc, rx_desc_pool->desc_type,
54 				      &rx_desc_pool->desc_pages,
55 				      desc_size, num_elem, 0, true);
56 	if (!rx_desc_pool->desc_pages.num_pages) {
57 		qdf_err("Multi page alloc fail,size=%d, elem=%d",
58 			desc_size, num_elem);
59 		return QDF_STATUS_E_NOMEM;
60 	}
61 
62 	if (qdf_mem_multi_page_link(soc->osdev,
63 				    &rx_desc_pool->desc_pages,
64 				    desc_size, num_elem, true)) {
65 		qdf_err("overflow num link,size=%d, elem=%d",
66 			desc_size, num_elem);
67 		goto free_rx_desc_pool;
68 	}
69 	return QDF_STATUS_SUCCESS;
70 
71 free_rx_desc_pool:
72 	dp_rx_desc_pool_free(soc, rx_desc_pool);
73 
74 	return QDF_STATUS_E_FAULT;
75 }
76 
77 qdf_export_symbol(dp_rx_desc_pool_alloc);
78 
dp_rx_desc_pool_init_generic(struct dp_soc * soc,struct rx_desc_pool * rx_desc_pool,uint32_t pool_id)79 QDF_STATUS dp_rx_desc_pool_init_generic(struct dp_soc *soc,
80 				  struct rx_desc_pool *rx_desc_pool,
81 				  uint32_t pool_id)
82 {
83 	uint32_t id, page_id, offset, num_desc_per_page;
84 	uint32_t count = 0;
85 	union dp_rx_desc_list_elem_t *rx_desc_elem;
86 
87 	num_desc_per_page = rx_desc_pool->desc_pages.num_element_per_page;
88 
89 	rx_desc_elem = rx_desc_pool->freelist;
90 	while (rx_desc_elem) {
91 		page_id = count / num_desc_per_page;
92 		offset = count % num_desc_per_page;
93 		/*
94 		 * Below cookie size is from REO destination ring
95 		 * reo_destination_ring -> buffer_addr_info -> sw_buffer_cookie
96 		 * cookie size = 21 bits
97 		 * 8 bits - offset
98 		 * 8 bits - page ID
99 		 * 4 bits - pool ID
100 		 */
101 		id = ((pool_id << DP_RX_DESC_POOL_ID_SHIFT) |
102 		      (page_id << DP_RX_DESC_PAGE_ID_SHIFT) |
103 		      offset);
104 		rx_desc_elem->rx_desc.cookie = id;
105 		rx_desc_elem->rx_desc.pool_id = pool_id;
106 		rx_desc_elem->rx_desc.in_use = 0;
107 		rx_desc_elem = rx_desc_elem->next;
108 		count++;
109 	}
110 	return QDF_STATUS_SUCCESS;
111 }
112 
dp_rx_desc_pool_init(struct dp_soc * soc,uint32_t pool_id,uint32_t pool_size,struct rx_desc_pool * rx_desc_pool)113 void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id,
114 			  uint32_t pool_size, struct rx_desc_pool *rx_desc_pool)
115 {
116 	QDF_STATUS status;
117 
118 	/* Initialize the lock */
119 	qdf_spinlock_create(&rx_desc_pool->lock);
120 
121 	qdf_spin_lock_bh(&rx_desc_pool->lock);
122 	rx_desc_pool->pool_size = pool_size;
123 
124 	rx_desc_pool->freelist = (union dp_rx_desc_list_elem_t *)
125 				  *rx_desc_pool->desc_pages.cacheable_pages;
126 
127 	status = soc->arch_ops.dp_rx_desc_pool_init(soc, rx_desc_pool,
128 						    pool_id);
129 	if (!QDF_IS_STATUS_SUCCESS(status))
130 		dp_err("RX desc pool initialization failed");
131 
132 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
133 }
134 
135 qdf_export_symbol(dp_rx_desc_pool_init);
136 
dp_rx_desc_find(uint16_t page_id,uint16_t offset,struct rx_desc_pool * rx_desc_pool)137 union dp_rx_desc_list_elem_t *dp_rx_desc_find(uint16_t page_id, uint16_t offset,
138 					      struct rx_desc_pool *rx_desc_pool)
139 {
140 	return rx_desc_pool->desc_pages.cacheable_pages[page_id] +
141 		rx_desc_pool->elem_size * offset;
142 }
143 
dp_rx_desc_nbuf_collect(struct dp_soc * soc,struct rx_desc_pool * rx_desc_pool,qdf_nbuf_t * nbuf_unmap_list,qdf_nbuf_t * nbuf_free_list)144 static QDF_STATUS dp_rx_desc_nbuf_collect(struct dp_soc *soc,
145 					  struct rx_desc_pool *rx_desc_pool,
146 					  qdf_nbuf_t *nbuf_unmap_list,
147 					  qdf_nbuf_t *nbuf_free_list)
148 {
149 	uint32_t i, num_desc, page_id, offset, num_desc_per_page;
150 	union dp_rx_desc_list_elem_t *rx_desc_elem;
151 	struct dp_rx_desc *rx_desc;
152 
153 	if (qdf_unlikely(!(rx_desc_pool->desc_pages.cacheable_pages))) {
154 		qdf_err("No pages found on this desc pool");
155 		return QDF_STATUS_E_INVAL;
156 	}
157 	num_desc = rx_desc_pool->pool_size;
158 	num_desc_per_page = rx_desc_pool->desc_pages.num_element_per_page;
159 	for (i = 0; i < num_desc; i++) {
160 		page_id = i / num_desc_per_page;
161 		offset = i % num_desc_per_page;
162 		rx_desc_elem = dp_rx_desc_find(page_id, offset, rx_desc_pool);
163 		rx_desc = &rx_desc_elem->rx_desc;
164 		dp_rx_desc_free_dbg_info(rx_desc);
165 		if (rx_desc->in_use) {
166 			if (!rx_desc->unmapped) {
167 				DP_RX_HEAD_APPEND(*nbuf_unmap_list,
168 						  rx_desc->nbuf);
169 				rx_desc->unmapped = 1;
170 			} else {
171 				DP_RX_HEAD_APPEND(*nbuf_free_list,
172 						  rx_desc->nbuf);
173 			}
174 		}
175 	}
176 	return QDF_STATUS_SUCCESS;
177 }
178 
dp_rx_desc_nbuf_cleanup(struct dp_soc * soc,qdf_nbuf_t nbuf_unmap_list,qdf_nbuf_t nbuf_free_list,uint16_t buf_size,bool is_mon_pool)179 static void dp_rx_desc_nbuf_cleanup(struct dp_soc *soc,
180 				    qdf_nbuf_t nbuf_unmap_list,
181 				    qdf_nbuf_t nbuf_free_list,
182 				    uint16_t buf_size,
183 				    bool is_mon_pool)
184 {
185 	qdf_nbuf_t nbuf = nbuf_unmap_list;
186 	qdf_nbuf_t next;
187 
188 	while (nbuf) {
189 		next = nbuf->next;
190 
191 		if (!is_mon_pool)
192 			dp_audio_smmu_unmap(soc->osdev,
193 					    QDF_NBUF_CB_PADDR(nbuf),
194 					    buf_size);
195 
196 		if (dp_ipa_handle_rx_buf_smmu_mapping(
197 						soc, nbuf, buf_size,
198 						false, __func__,
199 						__LINE__))
200 			dp_info_rl("Unable to unmap nbuf: %pK", nbuf);
201 
202 		qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf,
203 					     QDF_DMA_BIDIRECTIONAL, buf_size);
204 		dp_rx_nbuf_free(nbuf);
205 		nbuf = next;
206 	}
207 
208 	nbuf = nbuf_free_list;
209 	while (nbuf) {
210 		next = nbuf->next;
211 		dp_rx_nbuf_free(nbuf);
212 		nbuf = next;
213 	}
214 }
215 
dp_rx_desc_nbuf_and_pool_free(struct dp_soc * soc,uint32_t pool_id,struct rx_desc_pool * rx_desc_pool)216 void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id,
217 				   struct rx_desc_pool *rx_desc_pool)
218 {
219 	qdf_nbuf_t nbuf_unmap_list = NULL;
220 	qdf_nbuf_t nbuf_free_list = NULL;
221 
222 	qdf_spin_lock_bh(&rx_desc_pool->lock);
223 	dp_rx_desc_nbuf_collect(soc, rx_desc_pool,
224 				&nbuf_unmap_list, &nbuf_free_list);
225 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
226 	dp_rx_desc_nbuf_cleanup(soc, nbuf_unmap_list, nbuf_free_list,
227 				rx_desc_pool->buf_size, false);
228 	qdf_spinlock_destroy(&rx_desc_pool->lock);
229 }
230 
dp_rx_desc_nbuf_free(struct dp_soc * soc,struct rx_desc_pool * rx_desc_pool,bool is_mon_pool)231 void dp_rx_desc_nbuf_free(struct dp_soc *soc,
232 			  struct rx_desc_pool *rx_desc_pool,
233 			  bool is_mon_pool)
234 {
235 	qdf_nbuf_t nbuf_unmap_list = NULL;
236 	qdf_nbuf_t nbuf_free_list = NULL;
237 	qdf_spin_lock_bh(&rx_desc_pool->lock);
238 	dp_rx_desc_nbuf_collect(soc, rx_desc_pool,
239 				&nbuf_unmap_list, &nbuf_free_list);
240 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
241 	dp_rx_desc_nbuf_cleanup(soc, nbuf_unmap_list, nbuf_free_list,
242 				rx_desc_pool->buf_size, is_mon_pool);
243 }
244 
245 qdf_export_symbol(dp_rx_desc_nbuf_free);
246 
dp_rx_desc_pool_free(struct dp_soc * soc,struct rx_desc_pool * rx_desc_pool)247 void dp_rx_desc_pool_free(struct dp_soc *soc,
248 			  struct rx_desc_pool *rx_desc_pool)
249 {
250 	if (qdf_unlikely(!(rx_desc_pool->desc_pages.cacheable_pages)))
251 		return;
252 
253 	dp_desc_multi_pages_mem_free(soc, rx_desc_pool->desc_type,
254 				     &rx_desc_pool->desc_pages, 0, true);
255 }
256 
257 qdf_export_symbol(dp_rx_desc_pool_free);
258 
dp_rx_desc_pool_deinit(struct dp_soc * soc,struct rx_desc_pool * rx_desc_pool,uint32_t pool_id)259 void dp_rx_desc_pool_deinit(struct dp_soc *soc,
260 			    struct rx_desc_pool *rx_desc_pool,
261 			    uint32_t pool_id)
262 {
263 	qdf_spin_lock_bh(&rx_desc_pool->lock);
264 
265 	rx_desc_pool->freelist = NULL;
266 	rx_desc_pool->pool_size = 0;
267 
268 	/* Deinitialize rx mon desr frag flag */
269 	rx_desc_pool->rx_mon_dest_frag_enable = false;
270 	qdf_frag_cache_drain(&rx_desc_pool->pf_cache);
271 
272 	soc->arch_ops.dp_rx_desc_pool_deinit(soc, rx_desc_pool, pool_id);
273 
274 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
275 	qdf_spinlock_destroy(&rx_desc_pool->lock);
276 }
277 
278 qdf_export_symbol(dp_rx_desc_pool_deinit);
279 #else
dp_rx_desc_pool_is_allocated(struct rx_desc_pool * rx_desc_pool)280 QDF_STATUS dp_rx_desc_pool_is_allocated(struct rx_desc_pool *rx_desc_pool)
281 {
282 	if (!rx_desc_pool->array) {
283 		dp_err("nss-wifi<4> skip Rx refil");
284 		return QDF_STATUS_E_NOMEM;
285 	}
286 	return QDF_STATUS_SUCCESS;
287 }
288 
289 qdf_export_symbol(dp_rx_desc_pool_is_allocated);
290 
dp_rx_desc_pool_alloc(struct dp_soc * soc,uint32_t pool_size,struct rx_desc_pool * rx_desc_pool)291 QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc,
292 				 uint32_t pool_size,
293 				 struct rx_desc_pool *rx_desc_pool)
294 {
295 	rx_desc_pool->array = qdf_mem_common_alloc(pool_size *
296 				     sizeof(union dp_rx_desc_list_elem_t));
297 
298 	if (!(rx_desc_pool->array)) {
299 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
300 			  "RX Desc Pool allocation failed");
301 		return QDF_STATUS_E_NOMEM;
302 	}
303 	return QDF_STATUS_SUCCESS;
304 }
305 
306 qdf_export_symbol(dp_rx_desc_pool_alloc);
307 
dp_rx_desc_pool_init_generic(struct dp_soc * soc,struct rx_desc_pool * rx_desc_pool,uint32_t pool_id)308 QDF_STATUS dp_rx_desc_pool_init_generic(struct dp_soc *soc,
309 				  struct rx_desc_pool *rx_desc_pool,
310 				  uint32_t pool_id)
311 {
312 	int i;
313 
314 	for (i = 0; i <= rx_desc_pool->pool_size - 1; i++) {
315 		if (i == rx_desc_pool->pool_size - 1)
316 			rx_desc_pool->array[i].next = NULL;
317 		else
318 			rx_desc_pool->array[i].next =
319 				&rx_desc_pool->array[i + 1];
320 		rx_desc_pool->array[i].rx_desc.cookie = i | (pool_id << 18);
321 		rx_desc_pool->array[i].rx_desc.pool_id = pool_id;
322 		rx_desc_pool->array[i].rx_desc.in_use = 0;
323 	}
324 	return QDF_STATUS_SUCCESS;
325 }
326 
dp_rx_desc_pool_init(struct dp_soc * soc,uint32_t pool_id,uint32_t pool_size,struct rx_desc_pool * rx_desc_pool)327 void dp_rx_desc_pool_init(struct dp_soc *soc, uint32_t pool_id,
328 			  uint32_t pool_size, struct rx_desc_pool *rx_desc_pool)
329 {
330 	QDF_STATUS status;
331 
332 	/* Initialize the lock */
333 	qdf_spinlock_create(&rx_desc_pool->lock);
334 
335 	qdf_spin_lock_bh(&rx_desc_pool->lock);
336 	rx_desc_pool->pool_size = pool_size;
337 
338 	/* link SW rx descs into a freelist */
339 	rx_desc_pool->freelist = &rx_desc_pool->array[0];
340 	qdf_mem_zero(rx_desc_pool->freelist, rx_desc_pool->pool_size);
341 
342 	status = soc->arch_ops.dp_rx_desc_pool_init(soc, rx_desc_pool,
343 						    pool_id);
344 	if (!QDF_IS_STATUS_SUCCESS(status))
345 		dp_err("RX desc pool initialization failed");
346 
347 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
348 }
349 
350 qdf_export_symbol(dp_rx_desc_pool_init);
351 
352 #ifdef WLAN_SUPPORT_PPEDS
353 static inline
dp_rx_desc_get_nbuf(struct rx_desc_pool * rx_desc_pool,int i)354 qdf_nbuf_t dp_rx_desc_get_nbuf(struct rx_desc_pool *rx_desc_pool, int i)
355 {
356 	if (rx_desc_pool->array[i].rx_desc.has_reuse_nbuf)
357 		return rx_desc_pool->array[i].rx_desc.reuse_nbuf;
358 	else
359 		return rx_desc_pool->array[i].rx_desc.nbuf;
360 }
361 #else
362 static inline
dp_rx_desc_get_nbuf(struct rx_desc_pool * rx_desc_pool,int i)363 qdf_nbuf_t dp_rx_desc_get_nbuf(struct rx_desc_pool *rx_desc_pool, int i)
364 {
365 	return rx_desc_pool->array[i].rx_desc.nbuf;
366 }
367 #endif
368 
dp_rx_desc_nbuf_and_pool_free(struct dp_soc * soc,uint32_t pool_id,struct rx_desc_pool * rx_desc_pool)369 void dp_rx_desc_nbuf_and_pool_free(struct dp_soc *soc, uint32_t pool_id,
370 				   struct rx_desc_pool *rx_desc_pool)
371 {
372 	qdf_nbuf_t nbuf;
373 	int i;
374 
375 	qdf_spin_lock_bh(&rx_desc_pool->lock);
376 	for (i = 0; i < rx_desc_pool->pool_size; i++) {
377 		if (rx_desc_pool->array[i].rx_desc.in_use) {
378 			nbuf = dp_rx_desc_get_nbuf(rx_desc_pool, i);
379 
380 			if (!(rx_desc_pool->array[i].rx_desc.unmapped)) {
381 				dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf);
382 				rx_desc_pool->array[i].rx_desc.unmapped = 1;
383 			}
384 			dp_rx_nbuf_free(nbuf);
385 		}
386 	}
387 	qdf_mem_common_free(rx_desc_pool->array);
388 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
389 	qdf_spinlock_destroy(&rx_desc_pool->lock);
390 }
391 
dp_rx_desc_nbuf_free(struct dp_soc * soc,struct rx_desc_pool * rx_desc_pool,bool is_mon_pool)392 void dp_rx_desc_nbuf_free(struct dp_soc *soc,
393 			  struct rx_desc_pool *rx_desc_pool,
394 			  bool is_mon_pool)
395 {
396 	qdf_nbuf_t nbuf;
397 	int i;
398 
399 	qdf_spin_lock_bh(&rx_desc_pool->lock);
400 	for (i = 0; i < rx_desc_pool->pool_size; i++) {
401 		dp_rx_desc_free_dbg_info(&rx_desc_pool->array[i].rx_desc);
402 		if (rx_desc_pool->array[i].rx_desc.in_use) {
403 			nbuf = dp_rx_desc_get_nbuf(rx_desc_pool, i);
404 
405 			if (!(rx_desc_pool->array[i].rx_desc.unmapped)) {
406 				dp_rx_nbuf_unmap_pool(soc, rx_desc_pool, nbuf);
407 				rx_desc_pool->array[i].rx_desc.unmapped = 1;
408 			}
409 			dp_rx_nbuf_free(nbuf);
410 		}
411 	}
412 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
413 }
414 
415 qdf_export_symbol(dp_rx_desc_nbuf_free);
416 
417 #ifdef DP_RX_MON_MEM_FRAG
dp_rx_desc_frag_free(struct dp_soc * soc,struct rx_desc_pool * rx_desc_pool)418 void dp_rx_desc_frag_free(struct dp_soc *soc,
419 			  struct rx_desc_pool *rx_desc_pool)
420 {
421 	qdf_dma_addr_t paddr;
422 	qdf_frag_t vaddr;
423 	int i;
424 
425 	qdf_spin_lock_bh(&rx_desc_pool->lock);
426 	for (i = 0; i < rx_desc_pool->pool_size; i++) {
427 		if (rx_desc_pool->array[i].rx_desc.in_use) {
428 			paddr = rx_desc_pool->array[i].rx_desc.paddr_buf_start;
429 			vaddr = rx_desc_pool->array[i].rx_desc.rx_buf_start;
430 
431 			dp_rx_desc_free_dbg_info(&rx_desc_pool->array[i].rx_desc);
432 			if (!(rx_desc_pool->array[i].rx_desc.unmapped)) {
433 				qdf_mem_unmap_page(soc->osdev, paddr,
434 						   rx_desc_pool->buf_size,
435 						   QDF_DMA_FROM_DEVICE);
436 				rx_desc_pool->array[i].rx_desc.unmapped = 1;
437 			}
438 			qdf_frag_free(vaddr);
439 		}
440 	}
441 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
442 }
443 
444 qdf_export_symbol(dp_rx_desc_frag_free);
445 #endif
446 
dp_rx_desc_pool_free(struct dp_soc * soc,struct rx_desc_pool * rx_desc_pool)447 void dp_rx_desc_pool_free(struct dp_soc *soc,
448 			  struct rx_desc_pool *rx_desc_pool)
449 {
450 	qdf_mem_common_free(rx_desc_pool->array);
451 }
452 
453 qdf_export_symbol(dp_rx_desc_pool_free);
454 
dp_rx_desc_pool_deinit(struct dp_soc * soc,struct rx_desc_pool * rx_desc_pool,uint32_t pool_id)455 void dp_rx_desc_pool_deinit(struct dp_soc *soc,
456 			    struct rx_desc_pool *rx_desc_pool,
457 			    uint32_t pool_id)
458 {
459 	if (rx_desc_pool->pool_size) {
460 		qdf_spin_lock_bh(&rx_desc_pool->lock);
461 
462 		rx_desc_pool->freelist = NULL;
463 		rx_desc_pool->pool_size = 0;
464 
465 		/* Deinitialize rx mon dest frag flag */
466 		rx_desc_pool->rx_mon_dest_frag_enable = false;
467 		qdf_frag_cache_drain(&rx_desc_pool->pf_cache);
468 
469 		soc->arch_ops.dp_rx_desc_pool_deinit(soc, rx_desc_pool,
470 						     pool_id);
471 
472 		qdf_spin_unlock_bh(&rx_desc_pool->lock);
473 		qdf_spinlock_destroy(&rx_desc_pool->lock);
474 	}
475 }
476 
477 qdf_export_symbol(dp_rx_desc_pool_deinit);
478 
479 #endif /* RX_DESC_MULTI_PAGE_ALLOC */
480 
dp_rx_desc_pool_deinit_generic(struct dp_soc * soc,struct rx_desc_pool * rx_desc_pool,uint32_t pool_id)481 void dp_rx_desc_pool_deinit_generic(struct dp_soc *soc,
482 			       struct rx_desc_pool *rx_desc_pool,
483 			       uint32_t pool_id)
484 {
485 }
486 
dp_rx_get_free_desc_list(struct dp_soc * soc,uint32_t pool_id,struct rx_desc_pool * rx_desc_pool,uint16_t num_descs,union dp_rx_desc_list_elem_t ** desc_list,union dp_rx_desc_list_elem_t ** tail)487 uint16_t dp_rx_get_free_desc_list(struct dp_soc *soc, uint32_t pool_id,
488 				struct rx_desc_pool *rx_desc_pool,
489 				uint16_t num_descs,
490 				union dp_rx_desc_list_elem_t **desc_list,
491 				union dp_rx_desc_list_elem_t **tail)
492 {
493 	uint16_t count;
494 
495 	qdf_spin_lock_bh(&rx_desc_pool->lock);
496 
497 	*desc_list = *tail = rx_desc_pool->freelist;
498 
499 	for (count = 0; count < num_descs; count++) {
500 
501 		if (qdf_unlikely(!rx_desc_pool->freelist)) {
502 			qdf_spin_unlock_bh(&rx_desc_pool->lock);
503 			return count;
504 		}
505 		*tail = rx_desc_pool->freelist;
506 		rx_desc_pool->freelist = rx_desc_pool->freelist->next;
507 	}
508 	(*tail)->next = NULL;
509 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
510 	return count;
511 }
512 
513 qdf_export_symbol(dp_rx_get_free_desc_list);
514 
dp_rx_add_desc_list_to_free_list(struct dp_soc * soc,union dp_rx_desc_list_elem_t ** local_desc_list,union dp_rx_desc_list_elem_t ** tail,uint16_t pool_id,struct rx_desc_pool * rx_desc_pool)515 void dp_rx_add_desc_list_to_free_list(struct dp_soc *soc,
516 				union dp_rx_desc_list_elem_t **local_desc_list,
517 				union dp_rx_desc_list_elem_t **tail,
518 				uint16_t pool_id,
519 				struct rx_desc_pool *rx_desc_pool)
520 {
521 	union dp_rx_desc_list_elem_t *temp_list = NULL;
522 
523 	qdf_spin_lock_bh(&rx_desc_pool->lock);
524 
525 
526 	temp_list = rx_desc_pool->freelist;
527 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
528 	"temp_list: %pK, *local_desc_list: %pK, *tail: %pK (*tail)->next: %pK",
529 	temp_list, *local_desc_list, *tail, (*tail)->next);
530 	rx_desc_pool->freelist = *local_desc_list;
531 	(*tail)->next = temp_list;
532 	*tail = NULL;
533 	*local_desc_list = NULL;
534 
535 	qdf_spin_unlock_bh(&rx_desc_pool->lock);
536 }
537 
538 qdf_export_symbol(dp_rx_add_desc_list_to_free_list);
539