xref: /wlan-driver/qca-wifi-host-cmn/dp/wifi3.0/dp_rx_buffer_pool.c (revision 5113495b16420b49004c444715d2daae2066e7dc)
1*5113495bSYour Name /*
2*5113495bSYour Name  * Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
3*5113495bSYour Name  * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
4*5113495bSYour Name  *
5*5113495bSYour Name  * Permission to use, copy, modify, and/or distribute this software for
6*5113495bSYour Name  * any purpose with or without fee is hereby granted, provided that the
7*5113495bSYour Name  * above copyright notice and this permission notice appear in all
8*5113495bSYour Name  * copies.
9*5113495bSYour Name  *
10*5113495bSYour Name  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11*5113495bSYour Name  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12*5113495bSYour Name  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13*5113495bSYour Name  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14*5113495bSYour Name  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15*5113495bSYour Name  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16*5113495bSYour Name  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17*5113495bSYour Name  * PERFORMANCE OF THIS SOFTWARE.
18*5113495bSYour Name  */
19*5113495bSYour Name 
20*5113495bSYour Name #include "dp_rx_buffer_pool.h"
21*5113495bSYour Name #include "dp_ipa.h"
22*5113495bSYour Name 
23*5113495bSYour Name #ifndef DP_RX_BUFFER_POOL_SIZE
24*5113495bSYour Name #define DP_RX_BUFFER_POOL_SIZE 128
25*5113495bSYour Name #endif
26*5113495bSYour Name 
27*5113495bSYour Name #ifndef DP_RX_BUFF_POOL_ALLOC_THRES
28*5113495bSYour Name #define DP_RX_BUFF_POOL_ALLOC_THRES 1
29*5113495bSYour Name #endif
30*5113495bSYour Name 
31*5113495bSYour Name #ifdef WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL
dp_rx_buffer_pool_refill(struct dp_soc * soc,qdf_nbuf_t nbuf,u8 mac_id)32*5113495bSYour Name bool dp_rx_buffer_pool_refill(struct dp_soc *soc, qdf_nbuf_t nbuf, u8 mac_id)
33*5113495bSYour Name {
34*5113495bSYour Name 	struct dp_pdev *pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
35*5113495bSYour Name 	struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id];
36*5113495bSYour Name 	struct rx_buff_pool *bufpool = &soc->rx_buff_pool[mac_id];
37*5113495bSYour Name 	qdf_nbuf_t next_nbuf, first_nbuf, refill_nbuf;
38*5113495bSYour Name 	bool consumed = false;
39*5113495bSYour Name 
40*5113495bSYour Name 	if (!bufpool->is_initialized || !pdev)
41*5113495bSYour Name 		return consumed;
42*5113495bSYour Name 
43*5113495bSYour Name 	/* process only buffers of RXDMA ring */
44*5113495bSYour Name 	if (soc->wlan_cfg_ctx->rxdma1_enable)
45*5113495bSYour Name 		return consumed;
46*5113495bSYour Name 
47*5113495bSYour Name 	first_nbuf = nbuf;
48*5113495bSYour Name 
49*5113495bSYour Name 	while (nbuf) {
50*5113495bSYour Name 		next_nbuf = qdf_nbuf_next(nbuf);
51*5113495bSYour Name 
52*5113495bSYour Name 		if (qdf_likely(qdf_nbuf_queue_head_qlen(&bufpool->emerg_nbuf_q) >=
53*5113495bSYour Name 		    DP_RX_BUFFER_POOL_SIZE))
54*5113495bSYour Name 			break;
55*5113495bSYour Name 
56*5113495bSYour Name 		refill_nbuf = qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size,
57*5113495bSYour Name 					     RX_BUFFER_RESERVATION,
58*5113495bSYour Name 					     rx_desc_pool->buf_alignment,
59*5113495bSYour Name 					     FALSE);
60*5113495bSYour Name 
61*5113495bSYour Name 		/* Failed to allocate new nbuf, reset and place it back
62*5113495bSYour Name 		 * in to the pool.
63*5113495bSYour Name 		 */
64*5113495bSYour Name 		if (!refill_nbuf) {
65*5113495bSYour Name 			DP_STATS_INC(pdev,
66*5113495bSYour Name 				     rx_buffer_pool.num_bufs_consumed, 1);
67*5113495bSYour Name 			consumed = true;
68*5113495bSYour Name 			break;
69*5113495bSYour Name 		}
70*5113495bSYour Name 
71*5113495bSYour Name 		/* Successful allocation!! */
72*5113495bSYour Name 		DP_STATS_INC(pdev,
73*5113495bSYour Name 			     rx_buffer_pool.num_bufs_alloc_success, 1);
74*5113495bSYour Name 		qdf_nbuf_queue_head_enqueue_tail(&bufpool->emerg_nbuf_q,
75*5113495bSYour Name 						 refill_nbuf);
76*5113495bSYour Name 		nbuf = next_nbuf;
77*5113495bSYour Name 	}
78*5113495bSYour Name 
79*5113495bSYour Name 	nbuf = first_nbuf;
80*5113495bSYour Name 	if (consumed) {
81*5113495bSYour Name 		/* Free the MSDU/scattered MSDU */
82*5113495bSYour Name 		while (nbuf) {
83*5113495bSYour Name 			next_nbuf = qdf_nbuf_next(nbuf);
84*5113495bSYour Name 			dp_rx_buffer_pool_nbuf_free(soc, nbuf, mac_id);
85*5113495bSYour Name 			nbuf = next_nbuf;
86*5113495bSYour Name 		}
87*5113495bSYour Name 	}
88*5113495bSYour Name 
89*5113495bSYour Name 	return consumed;
90*5113495bSYour Name }
91*5113495bSYour Name 
dp_rx_buffer_pool_nbuf_free(struct dp_soc * soc,qdf_nbuf_t nbuf,u8 mac_id)92*5113495bSYour Name void dp_rx_buffer_pool_nbuf_free(struct dp_soc *soc, qdf_nbuf_t nbuf, u8 mac_id)
93*5113495bSYour Name {
94*5113495bSYour Name 	struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
95*5113495bSYour Name 	struct rx_desc_pool *rx_desc_pool;
96*5113495bSYour Name 	struct rx_buff_pool *buff_pool;
97*5113495bSYour Name 
98*5113495bSYour Name 	if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
99*5113495bSYour Name 		mac_id = dp_pdev->lmac_id;
100*5113495bSYour Name 
101*5113495bSYour Name 	rx_desc_pool = &soc->rx_desc_buf[mac_id];
102*5113495bSYour Name 	buff_pool = &soc->rx_buff_pool[mac_id];
103*5113495bSYour Name 
104*5113495bSYour Name 	if (qdf_likely(qdf_nbuf_queue_head_qlen(&buff_pool->emerg_nbuf_q) >=
105*5113495bSYour Name 		       DP_RX_BUFFER_POOL_SIZE) ||
106*5113495bSYour Name 	    !buff_pool->is_initialized)
107*5113495bSYour Name 		return qdf_nbuf_free(nbuf);
108*5113495bSYour Name 
109*5113495bSYour Name 	qdf_nbuf_reset(nbuf, RX_BUFFER_RESERVATION,
110*5113495bSYour Name 		       rx_desc_pool->buf_alignment);
111*5113495bSYour Name 	qdf_nbuf_queue_head_enqueue_tail(&buff_pool->emerg_nbuf_q, nbuf);
112*5113495bSYour Name }
113*5113495bSYour Name 
dp_rx_refill_buff_pool_enqueue(struct dp_soc * soc)114*5113495bSYour Name void dp_rx_refill_buff_pool_enqueue(struct dp_soc *soc)
115*5113495bSYour Name {
116*5113495bSYour Name 	struct rx_desc_pool *rx_desc_pool;
117*5113495bSYour Name 	struct rx_refill_buff_pool *buff_pool;
118*5113495bSYour Name 	qdf_device_t dev;
119*5113495bSYour Name 	qdf_nbuf_t nbuf;
120*5113495bSYour Name 	QDF_STATUS ret;
121*5113495bSYour Name 	int count, i;
122*5113495bSYour Name 	uint16_t num_refill;
123*5113495bSYour Name 	uint16_t total_num_refill;
124*5113495bSYour Name 	uint16_t total_count = 0;
125*5113495bSYour Name 	uint16_t head, tail;
126*5113495bSYour Name 
127*5113495bSYour Name 	if (!soc)
128*5113495bSYour Name 		return;
129*5113495bSYour Name 
130*5113495bSYour Name 	dev = soc->osdev;
131*5113495bSYour Name 	buff_pool = &soc->rx_refill_buff_pool;
132*5113495bSYour Name 	rx_desc_pool = &soc->rx_desc_buf[0];
133*5113495bSYour Name 	if (!buff_pool->is_initialized)
134*5113495bSYour Name 		return;
135*5113495bSYour Name 
136*5113495bSYour Name 	head = buff_pool->head;
137*5113495bSYour Name 	tail = buff_pool->tail;
138*5113495bSYour Name 	if (tail > head)
139*5113495bSYour Name 		total_num_refill = (tail - head - 1);
140*5113495bSYour Name 	else
141*5113495bSYour Name 		total_num_refill = (buff_pool->max_bufq_len - head +
142*5113495bSYour Name 				    tail - 1);
143*5113495bSYour Name 
144*5113495bSYour Name 	while (total_num_refill) {
145*5113495bSYour Name 		if (total_num_refill > DP_RX_REFILL_BUFF_POOL_BURST)
146*5113495bSYour Name 			num_refill = DP_RX_REFILL_BUFF_POOL_BURST;
147*5113495bSYour Name 		else
148*5113495bSYour Name 			num_refill = total_num_refill;
149*5113495bSYour Name 
150*5113495bSYour Name 		count = 0;
151*5113495bSYour Name 		for (i = 0; i < num_refill; i++) {
152*5113495bSYour Name 			nbuf = qdf_nbuf_alloc(dev, rx_desc_pool->buf_size,
153*5113495bSYour Name 					      RX_BUFFER_RESERVATION,
154*5113495bSYour Name 					      rx_desc_pool->buf_alignment,
155*5113495bSYour Name 					      FALSE);
156*5113495bSYour Name 			if (qdf_unlikely(!nbuf))
157*5113495bSYour Name 				continue;
158*5113495bSYour Name 
159*5113495bSYour Name 			ret = qdf_nbuf_map_nbytes_single(dev, nbuf,
160*5113495bSYour Name 							 QDF_DMA_FROM_DEVICE,
161*5113495bSYour Name 							 rx_desc_pool->buf_size);
162*5113495bSYour Name 			if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
163*5113495bSYour Name 				qdf_nbuf_free(nbuf);
164*5113495bSYour Name 				continue;
165*5113495bSYour Name 			}
166*5113495bSYour Name 
167*5113495bSYour Name 			dp_audio_smmu_map(dev,
168*5113495bSYour Name 					  qdf_mem_paddr_from_dmaaddr(dev,
169*5113495bSYour Name 								     QDF_NBUF_CB_PADDR(nbuf)),
170*5113495bSYour Name 					  QDF_NBUF_CB_PADDR(nbuf),
171*5113495bSYour Name 					  rx_desc_pool->buf_size);
172*5113495bSYour Name 
173*5113495bSYour Name 			buff_pool->buf_elem[head++] = nbuf;
174*5113495bSYour Name 			head &= (buff_pool->max_bufq_len - 1);
175*5113495bSYour Name 			count++;
176*5113495bSYour Name 		}
177*5113495bSYour Name 
178*5113495bSYour Name 		if (count) {
179*5113495bSYour Name 			buff_pool->head = head;
180*5113495bSYour Name 			total_num_refill -= count;
181*5113495bSYour Name 			total_count += count;
182*5113495bSYour Name 		}
183*5113495bSYour Name 	}
184*5113495bSYour Name 
185*5113495bSYour Name 	DP_STATS_INC(buff_pool->dp_pdev,
186*5113495bSYour Name 		     rx_refill_buff_pool.num_bufs_refilled,
187*5113495bSYour Name 		     total_count);
188*5113495bSYour Name }
189*5113495bSYour Name 
dp_rx_refill_buff_pool_dequeue_nbuf(struct dp_soc * soc)190*5113495bSYour Name static inline qdf_nbuf_t dp_rx_refill_buff_pool_dequeue_nbuf(struct dp_soc *soc)
191*5113495bSYour Name {
192*5113495bSYour Name 	struct rx_refill_buff_pool *buff_pool = &soc->rx_refill_buff_pool;
193*5113495bSYour Name 	qdf_nbuf_t nbuf = NULL;
194*5113495bSYour Name 	uint16_t head, tail;
195*5113495bSYour Name 
196*5113495bSYour Name 	head = buff_pool->head;
197*5113495bSYour Name 	tail = buff_pool->tail;
198*5113495bSYour Name 
199*5113495bSYour Name 	if (head == tail)
200*5113495bSYour Name 		return NULL;
201*5113495bSYour Name 
202*5113495bSYour Name 	nbuf = buff_pool->buf_elem[tail++];
203*5113495bSYour Name 	tail &= (buff_pool->max_bufq_len - 1);
204*5113495bSYour Name 	buff_pool->tail = tail;
205*5113495bSYour Name 
206*5113495bSYour Name 	return nbuf;
207*5113495bSYour Name }
208*5113495bSYour Name 
209*5113495bSYour Name qdf_nbuf_t
dp_rx_buffer_pool_nbuf_alloc(struct dp_soc * soc,uint32_t mac_id,struct rx_desc_pool * rx_desc_pool,uint32_t num_available_buffers)210*5113495bSYour Name dp_rx_buffer_pool_nbuf_alloc(struct dp_soc *soc, uint32_t mac_id,
211*5113495bSYour Name 			     struct rx_desc_pool *rx_desc_pool,
212*5113495bSYour Name 			     uint32_t num_available_buffers)
213*5113495bSYour Name {
214*5113495bSYour Name 	struct dp_pdev *dp_pdev = dp_get_pdev_for_lmac_id(soc, mac_id);
215*5113495bSYour Name 	struct rx_buff_pool *buff_pool;
216*5113495bSYour Name 	struct dp_srng *dp_rxdma_srng;
217*5113495bSYour Name 	qdf_nbuf_t nbuf;
218*5113495bSYour Name 
219*5113495bSYour Name 	nbuf = dp_rx_refill_buff_pool_dequeue_nbuf(soc);
220*5113495bSYour Name 	if (qdf_likely(nbuf)) {
221*5113495bSYour Name 		DP_STATS_INC(dp_pdev,
222*5113495bSYour Name 			     rx_refill_buff_pool.num_bufs_allocated, 1);
223*5113495bSYour Name 		return nbuf;
224*5113495bSYour Name 	}
225*5113495bSYour Name 
226*5113495bSYour Name 	if (!wlan_cfg_per_pdev_lmac_ring(soc->wlan_cfg_ctx))
227*5113495bSYour Name 		mac_id = dp_pdev->lmac_id;
228*5113495bSYour Name 
229*5113495bSYour Name 	buff_pool = &soc->rx_buff_pool[mac_id];
230*5113495bSYour Name 	dp_rxdma_srng = &soc->rx_refill_buf_ring[mac_id];
231*5113495bSYour Name 
232*5113495bSYour Name 	nbuf = qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size,
233*5113495bSYour Name 			      RX_BUFFER_RESERVATION,
234*5113495bSYour Name 			      rx_desc_pool->buf_alignment,
235*5113495bSYour Name 			      FALSE);
236*5113495bSYour Name 
237*5113495bSYour Name 	if (!buff_pool->is_initialized)
238*5113495bSYour Name 		return nbuf;
239*5113495bSYour Name 
240*5113495bSYour Name 	if (qdf_likely(nbuf)) {
241*5113495bSYour Name 		buff_pool->nbuf_fail_cnt = 0;
242*5113495bSYour Name 		return nbuf;
243*5113495bSYour Name 	}
244*5113495bSYour Name 
245*5113495bSYour Name 	buff_pool->nbuf_fail_cnt++;
246*5113495bSYour Name 
247*5113495bSYour Name 	/* Allocate buffer from the buffer pool */
248*5113495bSYour Name 	if (buff_pool->nbuf_fail_cnt >= DP_RX_BUFFER_POOL_ALLOC_THRES ||
249*5113495bSYour Name 	    (num_available_buffers < dp_rxdma_srng->num_entries / 10)) {
250*5113495bSYour Name 		nbuf = qdf_nbuf_queue_head_dequeue(&buff_pool->emerg_nbuf_q);
251*5113495bSYour Name 		if (nbuf)
252*5113495bSYour Name 			DP_STATS_INC(dp_pdev,
253*5113495bSYour Name 				     rx_buffer_pool.num_pool_bufs_replenish, 1);
254*5113495bSYour Name 	}
255*5113495bSYour Name 
256*5113495bSYour Name 	return nbuf;
257*5113495bSYour Name }
258*5113495bSYour Name 
259*5113495bSYour Name QDF_STATUS
dp_rx_buffer_pool_nbuf_map(struct dp_soc * soc,struct rx_desc_pool * rx_desc_pool,struct dp_rx_nbuf_frag_info * nbuf_frag_info_t)260*5113495bSYour Name dp_rx_buffer_pool_nbuf_map(struct dp_soc *soc,
261*5113495bSYour Name 			   struct rx_desc_pool *rx_desc_pool,
262*5113495bSYour Name 			   struct dp_rx_nbuf_frag_info *nbuf_frag_info_t)
263*5113495bSYour Name {
264*5113495bSYour Name 	QDF_STATUS ret = QDF_STATUS_SUCCESS;
265*5113495bSYour Name 
266*5113495bSYour Name 	if (!QDF_NBUF_CB_PADDR((nbuf_frag_info_t->virt_addr).nbuf)) {
267*5113495bSYour Name 		ret = qdf_nbuf_map_nbytes_single(soc->osdev,
268*5113495bSYour Name 						 (nbuf_frag_info_t->virt_addr).nbuf,
269*5113495bSYour Name 						 QDF_DMA_FROM_DEVICE,
270*5113495bSYour Name 						 rx_desc_pool->buf_size);
271*5113495bSYour Name 		if (QDF_IS_STATUS_SUCCESS(ret))
272*5113495bSYour Name 			dp_audio_smmu_map(soc->osdev,
273*5113495bSYour Name 					  qdf_mem_paddr_from_dmaaddr(soc->osdev,
274*5113495bSYour Name 								     QDF_NBUF_CB_PADDR((nbuf_frag_info_t->virt_addr).nbuf)),
275*5113495bSYour Name 					  QDF_NBUF_CB_PADDR((nbuf_frag_info_t->virt_addr).nbuf),
276*5113495bSYour Name 					  rx_desc_pool->buf_size);
277*5113495bSYour Name 	}
278*5113495bSYour Name 
279*5113495bSYour Name 
280*5113495bSYour Name 	return ret;
281*5113495bSYour Name }
282*5113495bSYour Name 
dp_rx_refill_buff_pool_init(struct dp_soc * soc,u8 mac_id)283*5113495bSYour Name static void dp_rx_refill_buff_pool_init(struct dp_soc *soc, u8 mac_id)
284*5113495bSYour Name {
285*5113495bSYour Name 	struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id];
286*5113495bSYour Name 	qdf_nbuf_t nbuf;
287*5113495bSYour Name 	struct rx_refill_buff_pool *buff_pool = &soc->rx_refill_buff_pool;
288*5113495bSYour Name 	QDF_STATUS ret;
289*5113495bSYour Name 	uint16_t head = 0;
290*5113495bSYour Name 	int i;
291*5113495bSYour Name 
292*5113495bSYour Name 	if (!wlan_cfg_is_rx_refill_buffer_pool_enabled(soc->wlan_cfg_ctx)) {
293*5113495bSYour Name 		dp_err("RX refill buffer pool support is disabled");
294*5113495bSYour Name 		buff_pool->is_initialized = false;
295*5113495bSYour Name 		return;
296*5113495bSYour Name 	}
297*5113495bSYour Name 
298*5113495bSYour Name 	buff_pool->max_bufq_len =
299*5113495bSYour Name 		wlan_cfg_get_rx_refill_buf_pool_size(soc->wlan_cfg_ctx);
300*5113495bSYour Name 
301*5113495bSYour Name 	buff_pool->buf_elem = qdf_mem_malloc(buff_pool->max_bufq_len *
302*5113495bSYour Name 					     sizeof(qdf_nbuf_t));
303*5113495bSYour Name 	if (!buff_pool->buf_elem) {
304*5113495bSYour Name 		dp_err("Failed to allocate memory for RX refill buf element");
305*5113495bSYour Name 		buff_pool->is_initialized = false;
306*5113495bSYour Name 		return;
307*5113495bSYour Name 	}
308*5113495bSYour Name 
309*5113495bSYour Name 	buff_pool->dp_pdev = dp_get_pdev_for_lmac_id(soc, 0);
310*5113495bSYour Name 	buff_pool->tail = 0;
311*5113495bSYour Name 
312*5113495bSYour Name 	for (i = 0; i < (buff_pool->max_bufq_len - 1); i++) {
313*5113495bSYour Name 		nbuf = qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size,
314*5113495bSYour Name 				      RX_BUFFER_RESERVATION,
315*5113495bSYour Name 				      rx_desc_pool->buf_alignment, FALSE);
316*5113495bSYour Name 		if (!nbuf)
317*5113495bSYour Name 			continue;
318*5113495bSYour Name 
319*5113495bSYour Name 		ret = qdf_nbuf_map_nbytes_single(soc->osdev, nbuf,
320*5113495bSYour Name 						 QDF_DMA_FROM_DEVICE,
321*5113495bSYour Name 						 rx_desc_pool->buf_size);
322*5113495bSYour Name 		if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
323*5113495bSYour Name 			qdf_nbuf_free(nbuf);
324*5113495bSYour Name 			continue;
325*5113495bSYour Name 		}
326*5113495bSYour Name 
327*5113495bSYour Name 		dp_audio_smmu_map(soc->osdev,
328*5113495bSYour Name 				  qdf_mem_paddr_from_dmaaddr(soc->osdev,
329*5113495bSYour Name 							     QDF_NBUF_CB_PADDR(nbuf)),
330*5113495bSYour Name 				  QDF_NBUF_CB_PADDR(nbuf),
331*5113495bSYour Name 				  rx_desc_pool->buf_size);
332*5113495bSYour Name 
333*5113495bSYour Name 		buff_pool->buf_elem[head] = nbuf;
334*5113495bSYour Name 		head++;
335*5113495bSYour Name 	}
336*5113495bSYour Name 
337*5113495bSYour Name 	buff_pool->head =  head;
338*5113495bSYour Name 
339*5113495bSYour Name 	dp_info("RX refill buffer pool required allocation: %u actual allocation: %u",
340*5113495bSYour Name 		buff_pool->max_bufq_len,
341*5113495bSYour Name 		buff_pool->head);
342*5113495bSYour Name 
343*5113495bSYour Name 	buff_pool->is_initialized = true;
344*5113495bSYour Name }
345*5113495bSYour Name 
dp_rx_buffer_pool_init(struct dp_soc * soc,u8 mac_id)346*5113495bSYour Name void dp_rx_buffer_pool_init(struct dp_soc *soc, u8 mac_id)
347*5113495bSYour Name {
348*5113495bSYour Name 	struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id];
349*5113495bSYour Name 	struct rx_buff_pool *buff_pool = &soc->rx_buff_pool[mac_id];
350*5113495bSYour Name 	qdf_nbuf_t nbuf;
351*5113495bSYour Name 	int i;
352*5113495bSYour Name 
353*5113495bSYour Name 	dp_rx_refill_buff_pool_init(soc, mac_id);
354*5113495bSYour Name 
355*5113495bSYour Name 	if (!wlan_cfg_is_rx_buffer_pool_enabled(soc->wlan_cfg_ctx)) {
356*5113495bSYour Name 		dp_info("RX buffer pool support is disabled");
357*5113495bSYour Name 		buff_pool->is_initialized = false;
358*5113495bSYour Name 		return;
359*5113495bSYour Name 	}
360*5113495bSYour Name 
361*5113495bSYour Name 	if (buff_pool->is_initialized)
362*5113495bSYour Name 		return;
363*5113495bSYour Name 
364*5113495bSYour Name 	qdf_nbuf_queue_head_init(&buff_pool->emerg_nbuf_q);
365*5113495bSYour Name 
366*5113495bSYour Name 	for (i = 0; i < DP_RX_BUFFER_POOL_SIZE; i++) {
367*5113495bSYour Name 		nbuf = qdf_nbuf_alloc(soc->osdev, rx_desc_pool->buf_size,
368*5113495bSYour Name 				      RX_BUFFER_RESERVATION,
369*5113495bSYour Name 				      rx_desc_pool->buf_alignment, FALSE);
370*5113495bSYour Name 		if (!nbuf)
371*5113495bSYour Name 			continue;
372*5113495bSYour Name 		qdf_nbuf_queue_head_enqueue_tail(&buff_pool->emerg_nbuf_q,
373*5113495bSYour Name 						 nbuf);
374*5113495bSYour Name 	}
375*5113495bSYour Name 
376*5113495bSYour Name 	dp_info("RX buffer pool required allocation: %u actual allocation: %u",
377*5113495bSYour Name 		DP_RX_BUFFER_POOL_SIZE,
378*5113495bSYour Name 		qdf_nbuf_queue_head_qlen(&buff_pool->emerg_nbuf_q));
379*5113495bSYour Name 
380*5113495bSYour Name 	buff_pool->is_initialized = true;
381*5113495bSYour Name }
382*5113495bSYour Name 
dp_rx_refill_buff_pool_deinit(struct dp_soc * soc,u8 mac_id)383*5113495bSYour Name static void dp_rx_refill_buff_pool_deinit(struct dp_soc *soc, u8 mac_id)
384*5113495bSYour Name {
385*5113495bSYour Name 	struct rx_refill_buff_pool *buff_pool = &soc->rx_refill_buff_pool;
386*5113495bSYour Name 	struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id];
387*5113495bSYour Name 	qdf_nbuf_t nbuf;
388*5113495bSYour Name 	uint32_t count = 0;
389*5113495bSYour Name 
390*5113495bSYour Name 	if (!buff_pool->is_initialized)
391*5113495bSYour Name 		return;
392*5113495bSYour Name 
393*5113495bSYour Name 	while ((nbuf = dp_rx_refill_buff_pool_dequeue_nbuf(soc))) {
394*5113495bSYour Name 		dp_audio_smmu_unmap(soc->osdev,
395*5113495bSYour Name 				    QDF_NBUF_CB_PADDR(nbuf),
396*5113495bSYour Name 				    rx_desc_pool->buf_size);
397*5113495bSYour Name 		qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf,
398*5113495bSYour Name 					     QDF_DMA_BIDIRECTIONAL,
399*5113495bSYour Name 					     rx_desc_pool->buf_size);
400*5113495bSYour Name 		qdf_nbuf_free(nbuf);
401*5113495bSYour Name 		count++;
402*5113495bSYour Name 	}
403*5113495bSYour Name 
404*5113495bSYour Name 	dp_info("Rx refill buffers freed during deinit %u head: %u, tail: %u",
405*5113495bSYour Name 		count, buff_pool->head, buff_pool->tail);
406*5113495bSYour Name 
407*5113495bSYour Name 	qdf_mem_free(buff_pool->buf_elem);
408*5113495bSYour Name 	buff_pool->is_initialized = false;
409*5113495bSYour Name }
410*5113495bSYour Name 
dp_rx_buffer_pool_deinit(struct dp_soc * soc,u8 mac_id)411*5113495bSYour Name void dp_rx_buffer_pool_deinit(struct dp_soc *soc, u8 mac_id)
412*5113495bSYour Name {
413*5113495bSYour Name 	struct rx_buff_pool *buff_pool = &soc->rx_buff_pool[mac_id];
414*5113495bSYour Name 	qdf_nbuf_t nbuf;
415*5113495bSYour Name 
416*5113495bSYour Name 	dp_rx_refill_buff_pool_deinit(soc, mac_id);
417*5113495bSYour Name 
418*5113495bSYour Name 	if (!buff_pool->is_initialized)
419*5113495bSYour Name 		return;
420*5113495bSYour Name 
421*5113495bSYour Name 	dp_info("buffers in the RX buffer pool during deinit: %u",
422*5113495bSYour Name 		qdf_nbuf_queue_head_qlen(&buff_pool->emerg_nbuf_q));
423*5113495bSYour Name 
424*5113495bSYour Name 	while ((nbuf = qdf_nbuf_queue_head_dequeue(&buff_pool->emerg_nbuf_q)))
425*5113495bSYour Name 		qdf_nbuf_free(nbuf);
426*5113495bSYour Name 
427*5113495bSYour Name 	buff_pool->is_initialized = false;
428*5113495bSYour Name }
429*5113495bSYour Name #endif /* WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL */
430