1*5113495bSYour Name /*
2*5113495bSYour Name * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3*5113495bSYour Name * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4*5113495bSYour Name *
5*5113495bSYour Name * Permission to use, copy, modify, and/or distribute this software for
6*5113495bSYour Name * any purpose with or without fee is hereby granted, provided that the
7*5113495bSYour Name * above copyright notice and this permission notice appear in all
8*5113495bSYour Name * copies.
9*5113495bSYour Name *
10*5113495bSYour Name * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11*5113495bSYour Name * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12*5113495bSYour Name * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13*5113495bSYour Name * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14*5113495bSYour Name * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15*5113495bSYour Name * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16*5113495bSYour Name * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17*5113495bSYour Name * PERFORMANCE OF THIS SOFTWARE.
18*5113495bSYour Name */
19*5113495bSYour Name
20*5113495bSYour Name #include "hal_hw_headers.h"
21*5113495bSYour Name #include "dp_types.h"
22*5113495bSYour Name #include "dp_tx_desc.h"
23*5113495bSYour Name
24*5113495bSYour Name #ifndef DESC_PARTITION
25*5113495bSYour Name #define DP_TX_DESC_SIZE(a) qdf_get_pwr2(a)
26*5113495bSYour Name #define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id) \
27*5113495bSYour Name do { \
28*5113495bSYour Name uint8_t sig_bit; \
29*5113495bSYour Name soc->tx_desc[pool_id].offset_filter = num_desc_per_page - 1; \
30*5113495bSYour Name /* Calculate page divider to find page number */ \
31*5113495bSYour Name sig_bit = 0; \
32*5113495bSYour Name while (num_desc_per_page) { \
33*5113495bSYour Name sig_bit++; \
34*5113495bSYour Name num_desc_per_page = num_desc_per_page >> 1; \
35*5113495bSYour Name } \
36*5113495bSYour Name soc->tx_desc[pool_id].page_divider = (sig_bit - 1); \
37*5113495bSYour Name } while (0)
38*5113495bSYour Name #else
39*5113495bSYour Name #define DP_TX_DESC_SIZE(a) a
40*5113495bSYour Name #define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id) {}
41*5113495bSYour Name #endif /* DESC_PARTITION */
42*5113495bSYour Name
43*5113495bSYour Name /**
44*5113495bSYour Name * dp_tx_desc_pool_counter_initialize() - Initialize counters
45*5113495bSYour Name * @tx_desc_pool: Handle to DP tx_desc_pool structure
46*5113495bSYour Name * @num_elem: Number of descriptor elements per pool
47*5113495bSYour Name *
48*5113495bSYour Name * Return: None
49*5113495bSYour Name */
50*5113495bSYour Name #ifdef QCA_LL_TX_FLOW_CONTROL_V2
51*5113495bSYour Name static void
dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s * tx_desc_pool,uint16_t num_elem)52*5113495bSYour Name dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s *tx_desc_pool,
53*5113495bSYour Name uint16_t num_elem)
54*5113495bSYour Name {
55*5113495bSYour Name }
56*5113495bSYour Name #else
57*5113495bSYour Name static void
dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s * tx_desc_pool,uint16_t num_elem)58*5113495bSYour Name dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s *tx_desc_pool,
59*5113495bSYour Name uint16_t num_elem)
60*5113495bSYour Name {
61*5113495bSYour Name tx_desc_pool->elem_count = num_elem;
62*5113495bSYour Name tx_desc_pool->num_free = num_elem;
63*5113495bSYour Name tx_desc_pool->num_allocated = 0;
64*5113495bSYour Name }
65*5113495bSYour Name #endif
66*5113495bSYour Name
67*5113495bSYour Name #ifdef DP_UMAC_HW_RESET_SUPPORT
68*5113495bSYour Name /**
69*5113495bSYour Name * dp_tx_desc_clean_up() - Clean up the tx descriptors
70*5113495bSYour Name * @ctxt: context passed
71*5113495bSYour Name * @elem: element to be cleaned up
72*5113495bSYour Name * @elem_list: element list
73*5113495bSYour Name *
74*5113495bSYour Name */
dp_tx_desc_clean_up(void * ctxt,void * elem,void * elem_list)75*5113495bSYour Name static void dp_tx_desc_clean_up(void *ctxt, void *elem, void *elem_list)
76*5113495bSYour Name {
77*5113495bSYour Name struct dp_soc *soc = (struct dp_soc *)ctxt;
78*5113495bSYour Name struct dp_tx_desc_s *tx_desc = (struct dp_tx_desc_s *)elem;
79*5113495bSYour Name qdf_nbuf_t *nbuf_list = (qdf_nbuf_t *)elem_list;
80*5113495bSYour Name qdf_nbuf_t nbuf = NULL;
81*5113495bSYour Name
82*5113495bSYour Name if (tx_desc->nbuf) {
83*5113495bSYour Name nbuf = dp_tx_comp_free_buf(soc, tx_desc, true);
84*5113495bSYour Name dp_tx_desc_release(soc, tx_desc, tx_desc->pool_id);
85*5113495bSYour Name
86*5113495bSYour Name if (nbuf) {
87*5113495bSYour Name if (!nbuf_list) {
88*5113495bSYour Name dp_err("potential memory leak");
89*5113495bSYour Name qdf_assert_always(0);
90*5113495bSYour Name }
91*5113495bSYour Name
92*5113495bSYour Name nbuf->next = *nbuf_list;
93*5113495bSYour Name *nbuf_list = nbuf;
94*5113495bSYour Name }
95*5113495bSYour Name }
96*5113495bSYour Name }
97*5113495bSYour Name
dp_tx_desc_pool_cleanup(struct dp_soc * soc,qdf_nbuf_t * nbuf_list,bool cleanup)98*5113495bSYour Name void dp_tx_desc_pool_cleanup(struct dp_soc *soc, qdf_nbuf_t *nbuf_list,
99*5113495bSYour Name bool cleanup)
100*5113495bSYour Name {
101*5113495bSYour Name int i;
102*5113495bSYour Name struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
103*5113495bSYour Name uint8_t num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
104*5113495bSYour Name
105*5113495bSYour Name if (!cleanup)
106*5113495bSYour Name return;
107*5113495bSYour Name
108*5113495bSYour Name for (i = 0; i < num_pool; i++) {
109*5113495bSYour Name tx_desc_pool = dp_get_tx_desc_pool(soc, i);
110*5113495bSYour Name
111*5113495bSYour Name TX_DESC_LOCK_LOCK(&tx_desc_pool->lock);
112*5113495bSYour Name if (tx_desc_pool)
113*5113495bSYour Name qdf_tx_desc_pool_free_bufs(soc,
114*5113495bSYour Name &tx_desc_pool->desc_pages,
115*5113495bSYour Name tx_desc_pool->elem_size,
116*5113495bSYour Name tx_desc_pool->elem_count,
117*5113495bSYour Name true, &dp_tx_desc_clean_up,
118*5113495bSYour Name nbuf_list);
119*5113495bSYour Name
120*5113495bSYour Name TX_DESC_LOCK_UNLOCK(&tx_desc_pool->lock);
121*5113495bSYour Name
122*5113495bSYour Name tx_desc_pool = dp_get_spcl_tx_desc_pool(soc, i);
123*5113495bSYour Name TX_DESC_LOCK_LOCK(&tx_desc_pool->lock);
124*5113495bSYour Name
125*5113495bSYour Name if (tx_desc_pool)
126*5113495bSYour Name qdf_tx_desc_pool_free_bufs(soc,
127*5113495bSYour Name &tx_desc_pool->desc_pages,
128*5113495bSYour Name tx_desc_pool->elem_size,
129*5113495bSYour Name tx_desc_pool->elem_count,
130*5113495bSYour Name true, &dp_tx_desc_clean_up,
131*5113495bSYour Name nbuf_list);
132*5113495bSYour Name
133*5113495bSYour Name TX_DESC_LOCK_UNLOCK(&tx_desc_pool->lock);
134*5113495bSYour Name }
135*5113495bSYour Name }
136*5113495bSYour Name #endif
137*5113495bSYour Name
138*5113495bSYour Name #ifdef QCA_SUPPORT_DP_GLOBAL_CTX
dp_tx_desc_pool_alloc_mem(struct dp_soc * soc,int8_t pool_id,bool spcl_tx_desc)139*5113495bSYour Name static void dp_tx_desc_pool_alloc_mem(struct dp_soc *soc, int8_t pool_id,
140*5113495bSYour Name bool spcl_tx_desc)
141*5113495bSYour Name {
142*5113495bSYour Name struct dp_global_context *dp_global = NULL;
143*5113495bSYour Name
144*5113495bSYour Name dp_global = wlan_objmgr_get_global_ctx();
145*5113495bSYour Name
146*5113495bSYour Name if (spcl_tx_desc) {
147*5113495bSYour Name dp_global->spcl_tx_desc[soc->arch_id][pool_id] =
148*5113495bSYour Name qdf_mem_malloc(sizeof(struct dp_tx_desc_pool_s));
149*5113495bSYour Name } else {
150*5113495bSYour Name dp_global->tx_desc[soc->arch_id][pool_id] =
151*5113495bSYour Name qdf_mem_malloc(sizeof(struct dp_tx_desc_pool_s));
152*5113495bSYour Name }
153*5113495bSYour Name }
154*5113495bSYour Name
dp_tx_desc_pool_free_mem(struct dp_soc * soc,int8_t pool_id,bool spcl_tx_desc)155*5113495bSYour Name static void dp_tx_desc_pool_free_mem(struct dp_soc *soc, int8_t pool_id,
156*5113495bSYour Name bool spcl_tx_desc)
157*5113495bSYour Name {
158*5113495bSYour Name struct dp_global_context *dp_global = NULL;
159*5113495bSYour Name
160*5113495bSYour Name dp_global = wlan_objmgr_get_global_ctx();
161*5113495bSYour Name if (spcl_tx_desc) {
162*5113495bSYour Name if (!dp_global->spcl_tx_desc[soc->arch_id][pool_id])
163*5113495bSYour Name return;
164*5113495bSYour Name
165*5113495bSYour Name qdf_mem_free(dp_global->spcl_tx_desc[soc->arch_id][pool_id]);
166*5113495bSYour Name dp_global->spcl_tx_desc[soc->arch_id][pool_id] = NULL;
167*5113495bSYour Name } else {
168*5113495bSYour Name if (!dp_global->tx_desc[soc->arch_id][pool_id])
169*5113495bSYour Name return;
170*5113495bSYour Name
171*5113495bSYour Name qdf_mem_free(dp_global->tx_desc[soc->arch_id][pool_id]);
172*5113495bSYour Name dp_global->tx_desc[soc->arch_id][pool_id] = NULL;
173*5113495bSYour Name }
174*5113495bSYour Name }
175*5113495bSYour Name #else
dp_tx_desc_pool_alloc_mem(struct dp_soc * soc,int8_t pool_id,bool spcl_tx_desc)176*5113495bSYour Name static void dp_tx_desc_pool_alloc_mem(struct dp_soc *soc, int8_t pool_id,
177*5113495bSYour Name bool spcl_tx_desc)
178*5113495bSYour Name {
179*5113495bSYour Name }
180*5113495bSYour Name
dp_tx_desc_pool_free_mem(struct dp_soc * soc,int8_t pool_id,bool spcl_tx_desc)181*5113495bSYour Name static void dp_tx_desc_pool_free_mem(struct dp_soc *soc, int8_t pool_id,
182*5113495bSYour Name bool spcl_tx_desc)
183*5113495bSYour Name {
184*5113495bSYour Name }
185*5113495bSYour Name #endif
186*5113495bSYour Name
dp_tx_desc_pool_alloc(struct dp_soc * soc,uint8_t pool_id,uint32_t num_elem,bool spcl_tx_desc)187*5113495bSYour Name QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
188*5113495bSYour Name uint32_t num_elem, bool spcl_tx_desc)
189*5113495bSYour Name {
190*5113495bSYour Name uint32_t desc_size, num_elem_t;
191*5113495bSYour Name struct dp_tx_desc_pool_s *tx_desc_pool;
192*5113495bSYour Name QDF_STATUS status;
193*5113495bSYour Name enum qdf_dp_desc_type desc_type = QDF_DP_TX_DESC_TYPE;
194*5113495bSYour Name
195*5113495bSYour Name desc_size = DP_TX_DESC_SIZE(sizeof(struct dp_tx_desc_s));
196*5113495bSYour Name
197*5113495bSYour Name dp_tx_desc_pool_alloc_mem(soc, pool_id, spcl_tx_desc);
198*5113495bSYour Name if (spcl_tx_desc) {
199*5113495bSYour Name tx_desc_pool = dp_get_spcl_tx_desc_pool(soc, pool_id);
200*5113495bSYour Name desc_type = QDF_DP_TX_SPCL_DESC_TYPE;
201*5113495bSYour Name num_elem_t = num_elem;
202*5113495bSYour Name } else {
203*5113495bSYour Name tx_desc_pool = dp_get_tx_desc_pool(soc, pool_id);
204*5113495bSYour Name desc_type = QDF_DP_TX_DESC_TYPE;
205*5113495bSYour Name num_elem_t = dp_get_updated_tx_desc(soc->ctrl_psoc, pool_id, num_elem);
206*5113495bSYour Name }
207*5113495bSYour Name
208*5113495bSYour Name tx_desc_pool->desc_pages.page_size = DP_BLOCKMEM_SIZE;
209*5113495bSYour Name dp_desc_multi_pages_mem_alloc(soc, desc_type,
210*5113495bSYour Name &tx_desc_pool->desc_pages,
211*5113495bSYour Name desc_size, num_elem_t,
212*5113495bSYour Name 0, true);
213*5113495bSYour Name
214*5113495bSYour Name if (!tx_desc_pool->desc_pages.num_pages) {
215*5113495bSYour Name dp_err("Multi page alloc fail, tx desc");
216*5113495bSYour Name return QDF_STATUS_E_NOMEM;
217*5113495bSYour Name }
218*5113495bSYour Name
219*5113495bSYour Name /* Arch specific TX descriptor allocation */
220*5113495bSYour Name status = soc->arch_ops.dp_tx_desc_pool_alloc(soc, num_elem_t, pool_id);
221*5113495bSYour Name if (QDF_IS_STATUS_ERROR(status)) {
222*5113495bSYour Name dp_err("failed to allocate arch specific descriptors");
223*5113495bSYour Name return QDF_STATUS_E_NOMEM;
224*5113495bSYour Name }
225*5113495bSYour Name
226*5113495bSYour Name return QDF_STATUS_SUCCESS;
227*5113495bSYour Name }
228*5113495bSYour Name
dp_tx_desc_pool_free(struct dp_soc * soc,uint8_t pool_id,bool spcl_tx_desc)229*5113495bSYour Name void dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id,
230*5113495bSYour Name bool spcl_tx_desc)
231*5113495bSYour Name {
232*5113495bSYour Name struct dp_tx_desc_pool_s *tx_desc_pool;
233*5113495bSYour Name enum qdf_dp_desc_type desc_type = QDF_DP_TX_DESC_TYPE;
234*5113495bSYour Name
235*5113495bSYour Name if (spcl_tx_desc) {
236*5113495bSYour Name tx_desc_pool = dp_get_spcl_tx_desc_pool(soc, pool_id);
237*5113495bSYour Name desc_type = QDF_DP_TX_SPCL_DESC_TYPE;
238*5113495bSYour Name } else {
239*5113495bSYour Name tx_desc_pool = dp_get_tx_desc_pool(soc, pool_id);
240*5113495bSYour Name desc_type = QDF_DP_TX_DESC_TYPE;
241*5113495bSYour Name }
242*5113495bSYour Name
243*5113495bSYour Name if (tx_desc_pool->desc_pages.num_pages)
244*5113495bSYour Name dp_desc_multi_pages_mem_free(soc, QDF_DP_TX_DESC_TYPE,
245*5113495bSYour Name &tx_desc_pool->desc_pages, 0,
246*5113495bSYour Name true);
247*5113495bSYour Name
248*5113495bSYour Name /* Free arch specific TX descriptor */
249*5113495bSYour Name soc->arch_ops.dp_tx_desc_pool_free(soc, pool_id);
250*5113495bSYour Name dp_tx_desc_pool_free_mem(soc, pool_id, spcl_tx_desc);
251*5113495bSYour Name }
252*5113495bSYour Name
dp_tx_desc_pool_init(struct dp_soc * soc,uint8_t pool_id,uint32_t num_elem,bool spcl_tx_desc)253*5113495bSYour Name QDF_STATUS dp_tx_desc_pool_init(struct dp_soc *soc, uint8_t pool_id,
254*5113495bSYour Name uint32_t num_elem, bool spcl_tx_desc)
255*5113495bSYour Name {
256*5113495bSYour Name struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
257*5113495bSYour Name uint32_t desc_size, num_elem_t;
258*5113495bSYour Name
259*5113495bSYour Name desc_size = DP_TX_DESC_SIZE(sizeof(struct dp_tx_desc_s));
260*5113495bSYour Name
261*5113495bSYour Name if (spcl_tx_desc) {
262*5113495bSYour Name tx_desc_pool = dp_get_spcl_tx_desc_pool(soc, pool_id);
263*5113495bSYour Name num_elem_t = num_elem;
264*5113495bSYour Name } else {
265*5113495bSYour Name tx_desc_pool = dp_get_tx_desc_pool(soc, pool_id);
266*5113495bSYour Name num_elem_t = dp_get_updated_tx_desc(soc->ctrl_psoc, pool_id, num_elem);
267*5113495bSYour Name }
268*5113495bSYour Name if (qdf_mem_multi_page_link(soc->osdev,
269*5113495bSYour Name &tx_desc_pool->desc_pages,
270*5113495bSYour Name desc_size, num_elem_t, true)) {
271*5113495bSYour Name dp_err("invalid tx desc allocation -overflow num link");
272*5113495bSYour Name return QDF_STATUS_E_FAULT;
273*5113495bSYour Name }
274*5113495bSYour Name
275*5113495bSYour Name tx_desc_pool->freelist = (struct dp_tx_desc_s *)
276*5113495bSYour Name *tx_desc_pool->desc_pages.cacheable_pages;
277*5113495bSYour Name /* Set unique IDs for each Tx descriptor */
278*5113495bSYour Name if (QDF_STATUS_SUCCESS != soc->arch_ops.dp_tx_desc_pool_init(
279*5113495bSYour Name soc, num_elem_t,
280*5113495bSYour Name pool_id, spcl_tx_desc)) {
281*5113495bSYour Name dp_err("initialization per target failed");
282*5113495bSYour Name return QDF_STATUS_E_FAULT;
283*5113495bSYour Name }
284*5113495bSYour Name
285*5113495bSYour Name tx_desc_pool->elem_size = DP_TX_DESC_SIZE(sizeof(struct dp_tx_desc_s));
286*5113495bSYour Name
287*5113495bSYour Name dp_tx_desc_pool_counter_initialize(tx_desc_pool, num_elem_t);
288*5113495bSYour Name TX_DESC_LOCK_CREATE(&tx_desc_pool->lock);
289*5113495bSYour Name
290*5113495bSYour Name return QDF_STATUS_SUCCESS;
291*5113495bSYour Name }
292*5113495bSYour Name
dp_tx_desc_pool_deinit(struct dp_soc * soc,uint8_t pool_id,bool spcl_tx_desc)293*5113495bSYour Name void dp_tx_desc_pool_deinit(struct dp_soc *soc, uint8_t pool_id,
294*5113495bSYour Name bool spcl_tx_desc)
295*5113495bSYour Name {
296*5113495bSYour Name struct dp_tx_desc_pool_s *tx_desc_pool;
297*5113495bSYour Name
298*5113495bSYour Name if (spcl_tx_desc)
299*5113495bSYour Name tx_desc_pool = dp_get_spcl_tx_desc_pool(soc, pool_id);
300*5113495bSYour Name else
301*5113495bSYour Name tx_desc_pool = dp_get_tx_desc_pool(soc, pool_id);
302*5113495bSYour Name soc->arch_ops.dp_tx_desc_pool_deinit(soc, tx_desc_pool,
303*5113495bSYour Name pool_id, spcl_tx_desc);
304*5113495bSYour Name TX_DESC_POOL_MEMBER_CLEAN(tx_desc_pool);
305*5113495bSYour Name TX_DESC_LOCK_DESTROY(&tx_desc_pool->lock);
306*5113495bSYour Name }
307*5113495bSYour Name
308*5113495bSYour Name QDF_STATUS
dp_tx_ext_desc_pool_alloc_by_id(struct dp_soc * soc,uint32_t num_elem,uint8_t pool_id)309*5113495bSYour Name dp_tx_ext_desc_pool_alloc_by_id(struct dp_soc *soc, uint32_t num_elem,
310*5113495bSYour Name uint8_t pool_id)
311*5113495bSYour Name {
312*5113495bSYour Name QDF_STATUS status;
313*5113495bSYour Name qdf_dma_context_t memctx = 0;
314*5113495bSYour Name uint16_t elem_size = HAL_TX_EXT_DESC_WITH_META_DATA;
315*5113495bSYour Name struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
316*5113495bSYour Name uint16_t link_elem_size = sizeof(struct dp_tx_ext_desc_elem_s);
317*5113495bSYour Name
318*5113495bSYour Name dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
319*5113495bSYour Name memctx = qdf_get_dma_mem_context(dp_tx_ext_desc_pool, memctx);
320*5113495bSYour Name
321*5113495bSYour Name /* Coherent tx extension descriptor alloc */
322*5113495bSYour Name dp_desc_multi_pages_mem_alloc(soc, QDF_DP_TX_EXT_DESC_TYPE,
323*5113495bSYour Name &dp_tx_ext_desc_pool->desc_pages,
324*5113495bSYour Name elem_size, num_elem, memctx, false);
325*5113495bSYour Name
326*5113495bSYour Name if (!dp_tx_ext_desc_pool->desc_pages.num_pages) {
327*5113495bSYour Name QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
328*5113495bSYour Name "ext desc page alloc fail");
329*5113495bSYour Name return QDF_STATUS_E_NOMEM;
330*5113495bSYour Name }
331*5113495bSYour Name
332*5113495bSYour Name /*
333*5113495bSYour Name * Cacheable ext descriptor link alloc
334*5113495bSYour Name * This structure also large size already
335*5113495bSYour Name * single element is 24bytes, 2K elements are 48Kbytes
336*5113495bSYour Name * Have to alloc multi page cacheable memory
337*5113495bSYour Name */
338*5113495bSYour Name dp_desc_multi_pages_mem_alloc(soc, QDF_DP_TX_EXT_DESC_LINK_TYPE,
339*5113495bSYour Name &dp_tx_ext_desc_pool->desc_link_pages,
340*5113495bSYour Name link_elem_size, num_elem, 0, true);
341*5113495bSYour Name
342*5113495bSYour Name if (!dp_tx_ext_desc_pool->desc_link_pages.num_pages) {
343*5113495bSYour Name QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
344*5113495bSYour Name "ext link desc page alloc fail");
345*5113495bSYour Name status = QDF_STATUS_E_NOMEM;
346*5113495bSYour Name goto free_ext_desc;
347*5113495bSYour Name }
348*5113495bSYour Name
349*5113495bSYour Name return QDF_STATUS_SUCCESS;
350*5113495bSYour Name
351*5113495bSYour Name free_ext_desc:
352*5113495bSYour Name dp_desc_multi_pages_mem_free(soc, QDF_DP_TX_EXT_DESC_TYPE,
353*5113495bSYour Name &dp_tx_ext_desc_pool->desc_pages,
354*5113495bSYour Name memctx, false);
355*5113495bSYour Name return status;
356*5113495bSYour Name }
357*5113495bSYour Name
dp_tx_ext_desc_pool_alloc(struct dp_soc * soc,uint8_t num_pool,uint32_t num_elem)358*5113495bSYour Name QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
359*5113495bSYour Name uint32_t num_elem)
360*5113495bSYour Name {
361*5113495bSYour Name QDF_STATUS status;
362*5113495bSYour Name uint8_t pool_id, count;
363*5113495bSYour Name
364*5113495bSYour Name for (pool_id = 0; pool_id < num_pool; pool_id++) {
365*5113495bSYour Name status = dp_tx_ext_desc_pool_alloc_by_id(soc, num_elem, pool_id);
366*5113495bSYour Name if (QDF_IS_STATUS_ERROR(status)) {
367*5113495bSYour Name dp_err("failed to allocate tx ext desc pool %d", pool_id);
368*5113495bSYour Name goto free_ext_desc_pool;
369*5113495bSYour Name }
370*5113495bSYour Name }
371*5113495bSYour Name
372*5113495bSYour Name return QDF_STATUS_SUCCESS;
373*5113495bSYour Name
374*5113495bSYour Name free_ext_desc_pool:
375*5113495bSYour Name for (count = 0; count < pool_id; count++)
376*5113495bSYour Name dp_tx_ext_desc_pool_free_by_id(soc, count);
377*5113495bSYour Name
378*5113495bSYour Name return status;
379*5113495bSYour Name }
380*5113495bSYour Name
dp_tx_ext_desc_pool_init_by_id(struct dp_soc * soc,uint32_t num_elem,uint8_t pool_id)381*5113495bSYour Name QDF_STATUS dp_tx_ext_desc_pool_init_by_id(struct dp_soc *soc, uint32_t num_elem,
382*5113495bSYour Name uint8_t pool_id)
383*5113495bSYour Name {
384*5113495bSYour Name uint32_t i;
385*5113495bSYour Name struct dp_tx_ext_desc_elem_s *c_elem, *p_elem;
386*5113495bSYour Name struct qdf_mem_dma_page_t *page_info;
387*5113495bSYour Name struct qdf_mem_multi_page_t *pages;
388*5113495bSYour Name struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
389*5113495bSYour Name QDF_STATUS status;
390*5113495bSYour Name
391*5113495bSYour Name /* link tx descriptors into a freelist */
392*5113495bSYour Name dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
393*5113495bSYour Name soc->tx_ext_desc[pool_id].elem_size =
394*5113495bSYour Name HAL_TX_EXT_DESC_WITH_META_DATA;
395*5113495bSYour Name soc->tx_ext_desc[pool_id].link_elem_size =
396*5113495bSYour Name sizeof(struct dp_tx_ext_desc_elem_s);
397*5113495bSYour Name soc->tx_ext_desc[pool_id].elem_count = num_elem;
398*5113495bSYour Name
399*5113495bSYour Name dp_tx_ext_desc_pool->freelist = (struct dp_tx_ext_desc_elem_s *)
400*5113495bSYour Name *dp_tx_ext_desc_pool->desc_link_pages.cacheable_pages;
401*5113495bSYour Name
402*5113495bSYour Name if (qdf_mem_multi_page_link(soc->osdev,
403*5113495bSYour Name &dp_tx_ext_desc_pool->desc_link_pages,
404*5113495bSYour Name dp_tx_ext_desc_pool->link_elem_size,
405*5113495bSYour Name dp_tx_ext_desc_pool->elem_count,
406*5113495bSYour Name true)) {
407*5113495bSYour Name QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
408*5113495bSYour Name "ext link desc page linking fail");
409*5113495bSYour Name status = QDF_STATUS_E_FAULT;
410*5113495bSYour Name goto fail;
411*5113495bSYour Name }
412*5113495bSYour Name
413*5113495bSYour Name /* Assign coherent memory pointer into linked free list */
414*5113495bSYour Name pages = &dp_tx_ext_desc_pool->desc_pages;
415*5113495bSYour Name page_info = dp_tx_ext_desc_pool->desc_pages.dma_pages;
416*5113495bSYour Name c_elem = dp_tx_ext_desc_pool->freelist;
417*5113495bSYour Name p_elem = c_elem;
418*5113495bSYour Name for (i = 0; i < dp_tx_ext_desc_pool->elem_count; i++) {
419*5113495bSYour Name if (!(i % pages->num_element_per_page)) {
420*5113495bSYour Name /**
421*5113495bSYour Name * First element for new page,
422*5113495bSYour Name * should point next page
423*5113495bSYour Name */
424*5113495bSYour Name if (!pages->dma_pages->page_v_addr_start) {
425*5113495bSYour Name QDF_TRACE(QDF_MODULE_ID_DP,
426*5113495bSYour Name QDF_TRACE_LEVEL_ERROR,
427*5113495bSYour Name "link over flow");
428*5113495bSYour Name status = QDF_STATUS_E_FAULT;
429*5113495bSYour Name goto fail;
430*5113495bSYour Name }
431*5113495bSYour Name
432*5113495bSYour Name c_elem->vaddr =
433*5113495bSYour Name (void *)page_info->page_v_addr_start;
434*5113495bSYour Name c_elem->paddr = page_info->page_p_addr;
435*5113495bSYour Name page_info++;
436*5113495bSYour Name } else {
437*5113495bSYour Name c_elem->vaddr = (void *)(p_elem->vaddr +
438*5113495bSYour Name dp_tx_ext_desc_pool->elem_size);
439*5113495bSYour Name c_elem->paddr = (p_elem->paddr +
440*5113495bSYour Name dp_tx_ext_desc_pool->elem_size);
441*5113495bSYour Name }
442*5113495bSYour Name p_elem = c_elem;
443*5113495bSYour Name c_elem = c_elem->next;
444*5113495bSYour Name if (!c_elem)
445*5113495bSYour Name break;
446*5113495bSYour Name }
447*5113495bSYour Name dp_tx_ext_desc_pool->num_free = num_elem;
448*5113495bSYour Name qdf_spinlock_create(&dp_tx_ext_desc_pool->lock);
449*5113495bSYour Name
450*5113495bSYour Name return QDF_STATUS_SUCCESS;
451*5113495bSYour Name
452*5113495bSYour Name fail:
453*5113495bSYour Name return status;
454*5113495bSYour Name }
455*5113495bSYour Name
dp_tx_ext_desc_pool_init(struct dp_soc * soc,uint8_t num_pool,uint32_t num_elem)456*5113495bSYour Name QDF_STATUS dp_tx_ext_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
457*5113495bSYour Name uint32_t num_elem)
458*5113495bSYour Name {
459*5113495bSYour Name uint8_t pool_id;
460*5113495bSYour Name QDF_STATUS status;
461*5113495bSYour Name
462*5113495bSYour Name for (pool_id = 0; pool_id < num_pool; pool_id++) {
463*5113495bSYour Name status = dp_tx_ext_desc_pool_init_by_id(soc, num_elem, pool_id);
464*5113495bSYour Name if (QDF_IS_STATUS_ERROR(status)) {
465*5113495bSYour Name dp_err("failed to init ext desc pool %d", pool_id);
466*5113495bSYour Name goto fail;
467*5113495bSYour Name }
468*5113495bSYour Name }
469*5113495bSYour Name
470*5113495bSYour Name return QDF_STATUS_SUCCESS;
471*5113495bSYour Name fail:
472*5113495bSYour Name return status;
473*5113495bSYour Name }
474*5113495bSYour Name
dp_tx_ext_desc_pool_free_by_id(struct dp_soc * soc,uint8_t pool_id)475*5113495bSYour Name void dp_tx_ext_desc_pool_free_by_id(struct dp_soc *soc, uint8_t pool_id)
476*5113495bSYour Name {
477*5113495bSYour Name struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
478*5113495bSYour Name qdf_dma_context_t memctx = 0;
479*5113495bSYour Name
480*5113495bSYour Name dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
481*5113495bSYour Name memctx = qdf_get_dma_mem_context(dp_tx_ext_desc_pool, memctx);
482*5113495bSYour Name
483*5113495bSYour Name dp_desc_multi_pages_mem_free(soc, QDF_DP_TX_EXT_DESC_LINK_TYPE,
484*5113495bSYour Name &dp_tx_ext_desc_pool->desc_link_pages,
485*5113495bSYour Name 0, true);
486*5113495bSYour Name
487*5113495bSYour Name dp_desc_multi_pages_mem_free(soc, QDF_DP_TX_EXT_DESC_TYPE,
488*5113495bSYour Name &dp_tx_ext_desc_pool->desc_pages,
489*5113495bSYour Name memctx, false);
490*5113495bSYour Name }
491*5113495bSYour Name
dp_tx_ext_desc_pool_free(struct dp_soc * soc,uint8_t num_pool)492*5113495bSYour Name void dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
493*5113495bSYour Name {
494*5113495bSYour Name uint8_t pool_id;
495*5113495bSYour Name
496*5113495bSYour Name for (pool_id = 0; pool_id < num_pool; pool_id++)
497*5113495bSYour Name dp_tx_ext_desc_pool_free_by_id(soc, pool_id);
498*5113495bSYour Name }
499*5113495bSYour Name
dp_tx_ext_desc_pool_deinit_by_id(struct dp_soc * soc,uint8_t pool_id)500*5113495bSYour Name void dp_tx_ext_desc_pool_deinit_by_id(struct dp_soc *soc, uint8_t pool_id)
501*5113495bSYour Name {
502*5113495bSYour Name struct dp_tx_ext_desc_pool_s *dp_tx_ext_desc_pool;
503*5113495bSYour Name
504*5113495bSYour Name dp_tx_ext_desc_pool = &((soc)->tx_ext_desc[pool_id]);
505*5113495bSYour Name qdf_spinlock_destroy(&dp_tx_ext_desc_pool->lock);
506*5113495bSYour Name }
507*5113495bSYour Name
dp_tx_ext_desc_pool_deinit(struct dp_soc * soc,uint8_t num_pool)508*5113495bSYour Name void dp_tx_ext_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
509*5113495bSYour Name {
510*5113495bSYour Name uint8_t pool_id;
511*5113495bSYour Name
512*5113495bSYour Name for (pool_id = 0; pool_id < num_pool; pool_id++)
513*5113495bSYour Name dp_tx_ext_desc_pool_deinit_by_id(soc, pool_id);
514*5113495bSYour Name }
515*5113495bSYour Name
516*5113495bSYour Name #if defined(FEATURE_TSO)
dp_tx_tso_desc_pool_alloc_by_id(struct dp_soc * soc,uint32_t num_elem,uint8_t pool_id)517*5113495bSYour Name QDF_STATUS dp_tx_tso_desc_pool_alloc_by_id(struct dp_soc *soc, uint32_t num_elem,
518*5113495bSYour Name uint8_t pool_id)
519*5113495bSYour Name {
520*5113495bSYour Name struct dp_tx_tso_seg_pool_s *tso_desc_pool;
521*5113495bSYour Name uint32_t desc_size;
522*5113495bSYour Name
523*5113495bSYour Name desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_seg_elem_t));
524*5113495bSYour Name
525*5113495bSYour Name tso_desc_pool = &soc->tx_tso_desc[pool_id];
526*5113495bSYour Name tso_desc_pool->num_free = 0;
527*5113495bSYour Name dp_desc_multi_pages_mem_alloc(soc, QDF_DP_TX_TSO_DESC_TYPE,
528*5113495bSYour Name &tso_desc_pool->desc_pages,
529*5113495bSYour Name desc_size, num_elem, 0, true);
530*5113495bSYour Name if (!tso_desc_pool->desc_pages.num_pages) {
531*5113495bSYour Name dp_err("Multi page alloc fail, tx desc");
532*5113495bSYour Name return QDF_STATUS_E_NOMEM;
533*5113495bSYour Name }
534*5113495bSYour Name
535*5113495bSYour Name return QDF_STATUS_SUCCESS;
536*5113495bSYour Name }
537*5113495bSYour Name
dp_tx_tso_desc_pool_alloc(struct dp_soc * soc,uint8_t num_pool,uint32_t num_elem)538*5113495bSYour Name QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
539*5113495bSYour Name uint32_t num_elem)
540*5113495bSYour Name {
541*5113495bSYour Name uint32_t pool_id, i;
542*5113495bSYour Name QDF_STATUS status;
543*5113495bSYour Name
544*5113495bSYour Name for (pool_id = 0; pool_id < num_pool; pool_id++) {
545*5113495bSYour Name status = dp_tx_tso_desc_pool_alloc_by_id(soc, num_elem,
546*5113495bSYour Name pool_id);
547*5113495bSYour Name if (QDF_IS_STATUS_ERROR(status)) {
548*5113495bSYour Name dp_err("failed to allocate TSO desc pool %d", pool_id);
549*5113495bSYour Name goto fail;
550*5113495bSYour Name }
551*5113495bSYour Name }
552*5113495bSYour Name
553*5113495bSYour Name return QDF_STATUS_SUCCESS;
554*5113495bSYour Name
555*5113495bSYour Name fail:
556*5113495bSYour Name for (i = 0; i < pool_id; i++)
557*5113495bSYour Name dp_tx_tso_desc_pool_free_by_id(soc, i);
558*5113495bSYour Name
559*5113495bSYour Name return QDF_STATUS_E_NOMEM;
560*5113495bSYour Name }
561*5113495bSYour Name
dp_tx_tso_desc_pool_free_by_id(struct dp_soc * soc,uint8_t pool_id)562*5113495bSYour Name void dp_tx_tso_desc_pool_free_by_id(struct dp_soc *soc, uint8_t pool_id)
563*5113495bSYour Name {
564*5113495bSYour Name struct dp_tx_tso_seg_pool_s *tso_desc_pool;
565*5113495bSYour Name
566*5113495bSYour Name tso_desc_pool = &soc->tx_tso_desc[pool_id];
567*5113495bSYour Name dp_desc_multi_pages_mem_free(soc, QDF_DP_TX_TSO_DESC_TYPE,
568*5113495bSYour Name &tso_desc_pool->desc_pages,
569*5113495bSYour Name 0, true);
570*5113495bSYour Name }
571*5113495bSYour Name
dp_tx_tso_desc_pool_free(struct dp_soc * soc,uint8_t num_pool)572*5113495bSYour Name void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
573*5113495bSYour Name {
574*5113495bSYour Name uint32_t pool_id;
575*5113495bSYour Name
576*5113495bSYour Name for (pool_id = 0; pool_id < num_pool; pool_id++)
577*5113495bSYour Name dp_tx_tso_desc_pool_free_by_id(soc, pool_id);
578*5113495bSYour Name }
579*5113495bSYour Name
dp_tx_tso_desc_pool_init_by_id(struct dp_soc * soc,uint32_t num_elem,uint8_t pool_id)580*5113495bSYour Name QDF_STATUS dp_tx_tso_desc_pool_init_by_id(struct dp_soc *soc, uint32_t num_elem,
581*5113495bSYour Name uint8_t pool_id)
582*5113495bSYour Name {
583*5113495bSYour Name struct dp_tx_tso_seg_pool_s *tso_desc_pool;
584*5113495bSYour Name uint32_t desc_size;
585*5113495bSYour Name
586*5113495bSYour Name desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_seg_elem_t));
587*5113495bSYour Name
588*5113495bSYour Name tso_desc_pool = &soc->tx_tso_desc[pool_id];
589*5113495bSYour Name
590*5113495bSYour Name if (qdf_mem_multi_page_link(soc->osdev,
591*5113495bSYour Name &tso_desc_pool->desc_pages,
592*5113495bSYour Name desc_size,
593*5113495bSYour Name num_elem, true)) {
594*5113495bSYour Name QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
595*5113495bSYour Name "invalid tso desc allocation - overflow num link");
596*5113495bSYour Name return QDF_STATUS_E_FAULT;
597*5113495bSYour Name }
598*5113495bSYour Name
599*5113495bSYour Name tso_desc_pool->freelist = (struct qdf_tso_seg_elem_t *)
600*5113495bSYour Name *tso_desc_pool->desc_pages.cacheable_pages;
601*5113495bSYour Name tso_desc_pool->num_free = num_elem;
602*5113495bSYour Name
603*5113495bSYour Name TSO_DEBUG("Number of free descriptors: %u\n",
604*5113495bSYour Name tso_desc_pool->num_free);
605*5113495bSYour Name tso_desc_pool->pool_size = num_elem;
606*5113495bSYour Name qdf_spinlock_create(&tso_desc_pool->lock);
607*5113495bSYour Name
608*5113495bSYour Name return QDF_STATUS_SUCCESS;
609*5113495bSYour Name }
610*5113495bSYour Name
dp_tx_tso_desc_pool_init(struct dp_soc * soc,uint8_t num_pool,uint32_t num_elem)611*5113495bSYour Name QDF_STATUS dp_tx_tso_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
612*5113495bSYour Name uint32_t num_elem)
613*5113495bSYour Name {
614*5113495bSYour Name QDF_STATUS status;
615*5113495bSYour Name uint32_t pool_id;
616*5113495bSYour Name
617*5113495bSYour Name for (pool_id = 0; pool_id < num_pool; pool_id++) {
618*5113495bSYour Name status = dp_tx_tso_desc_pool_init_by_id(soc, num_elem,
619*5113495bSYour Name pool_id);
620*5113495bSYour Name if (QDF_IS_STATUS_ERROR(status)) {
621*5113495bSYour Name dp_err("failed to initialise TSO desc pool %d", pool_id);
622*5113495bSYour Name return status;
623*5113495bSYour Name }
624*5113495bSYour Name }
625*5113495bSYour Name
626*5113495bSYour Name return QDF_STATUS_SUCCESS;
627*5113495bSYour Name }
628*5113495bSYour Name
dp_tx_tso_desc_pool_deinit_by_id(struct dp_soc * soc,uint8_t pool_id)629*5113495bSYour Name void dp_tx_tso_desc_pool_deinit_by_id(struct dp_soc *soc, uint8_t pool_id)
630*5113495bSYour Name {
631*5113495bSYour Name struct dp_tx_tso_seg_pool_s *tso_desc_pool;
632*5113495bSYour Name
633*5113495bSYour Name tso_desc_pool = &soc->tx_tso_desc[pool_id];
634*5113495bSYour Name
635*5113495bSYour Name if (tso_desc_pool->pool_size) {
636*5113495bSYour Name qdf_spin_lock_bh(&tso_desc_pool->lock);
637*5113495bSYour Name tso_desc_pool->freelist = NULL;
638*5113495bSYour Name tso_desc_pool->num_free = 0;
639*5113495bSYour Name tso_desc_pool->pool_size = 0;
640*5113495bSYour Name qdf_spin_unlock_bh(&tso_desc_pool->lock);
641*5113495bSYour Name qdf_spinlock_destroy(&tso_desc_pool->lock);
642*5113495bSYour Name }
643*5113495bSYour Name }
644*5113495bSYour Name
dp_tx_tso_desc_pool_deinit(struct dp_soc * soc,uint8_t num_pool)645*5113495bSYour Name void dp_tx_tso_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
646*5113495bSYour Name {
647*5113495bSYour Name uint32_t pool_id;
648*5113495bSYour Name
649*5113495bSYour Name for (pool_id = 0; pool_id < num_pool; pool_id++)
650*5113495bSYour Name dp_tx_tso_desc_pool_deinit_by_id(soc, pool_id);
651*5113495bSYour Name }
652*5113495bSYour Name
dp_tx_tso_num_seg_pool_alloc_by_id(struct dp_soc * soc,uint32_t num_elem,uint8_t pool_id)653*5113495bSYour Name QDF_STATUS dp_tx_tso_num_seg_pool_alloc_by_id(struct dp_soc *soc,
654*5113495bSYour Name uint32_t num_elem,
655*5113495bSYour Name uint8_t pool_id)
656*5113495bSYour Name {
657*5113495bSYour Name struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
658*5113495bSYour Name uint32_t desc_size;
659*5113495bSYour Name
660*5113495bSYour Name desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_num_seg_elem_t));
661*5113495bSYour Name
662*5113495bSYour Name tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
663*5113495bSYour Name tso_num_seg_pool->num_free = 0;
664*5113495bSYour Name dp_desc_multi_pages_mem_alloc(soc, QDF_DP_TX_TSO_NUM_SEG_TYPE,
665*5113495bSYour Name &tso_num_seg_pool->desc_pages,
666*5113495bSYour Name desc_size,
667*5113495bSYour Name num_elem, 0, true);
668*5113495bSYour Name
669*5113495bSYour Name if (!tso_num_seg_pool->desc_pages.num_pages) {
670*5113495bSYour Name dp_err("Multi page alloc fail, tso_num_seg_pool");
671*5113495bSYour Name return QDF_STATUS_E_NOMEM;
672*5113495bSYour Name }
673*5113495bSYour Name
674*5113495bSYour Name return QDF_STATUS_SUCCESS;
675*5113495bSYour Name }
676*5113495bSYour Name
dp_tx_tso_num_seg_pool_alloc(struct dp_soc * soc,uint8_t num_pool,uint32_t num_elem)677*5113495bSYour Name QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
678*5113495bSYour Name uint32_t num_elem)
679*5113495bSYour Name {
680*5113495bSYour Name uint32_t pool_id, i;
681*5113495bSYour Name QDF_STATUS status;
682*5113495bSYour Name
683*5113495bSYour Name for (pool_id = 0; pool_id < num_pool; pool_id++) {
684*5113495bSYour Name status = dp_tx_tso_num_seg_pool_alloc_by_id(soc, num_elem,
685*5113495bSYour Name pool_id);
686*5113495bSYour Name if (QDF_IS_STATUS_ERROR(status)) {
687*5113495bSYour Name dp_err("failed to allocate TSO num seg pool %d", pool_id);
688*5113495bSYour Name goto fail;
689*5113495bSYour Name }
690*5113495bSYour Name }
691*5113495bSYour Name
692*5113495bSYour Name return QDF_STATUS_SUCCESS;
693*5113495bSYour Name
694*5113495bSYour Name fail:
695*5113495bSYour Name for (i = 0; i < pool_id; i++)
696*5113495bSYour Name dp_tx_tso_num_seg_pool_free_by_id(soc, pool_id);
697*5113495bSYour Name
698*5113495bSYour Name return QDF_STATUS_E_NOMEM;
699*5113495bSYour Name }
700*5113495bSYour Name
dp_tx_tso_num_seg_pool_free_by_id(struct dp_soc * soc,uint8_t pool_id)701*5113495bSYour Name void dp_tx_tso_num_seg_pool_free_by_id(struct dp_soc *soc, uint8_t pool_id)
702*5113495bSYour Name {
703*5113495bSYour Name struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
704*5113495bSYour Name
705*5113495bSYour Name tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
706*5113495bSYour Name dp_desc_multi_pages_mem_free(soc, QDF_DP_TX_TSO_NUM_SEG_TYPE,
707*5113495bSYour Name &tso_num_seg_pool->desc_pages,
708*5113495bSYour Name 0, true);
709*5113495bSYour Name }
710*5113495bSYour Name
dp_tx_tso_num_seg_pool_free(struct dp_soc * soc,uint8_t num_pool)711*5113495bSYour Name void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t num_pool)
712*5113495bSYour Name {
713*5113495bSYour Name uint32_t pool_id;
714*5113495bSYour Name
715*5113495bSYour Name for (pool_id = 0; pool_id < num_pool; pool_id++)
716*5113495bSYour Name dp_tx_tso_num_seg_pool_free_by_id(soc, pool_id);
717*5113495bSYour Name }
718*5113495bSYour Name
719*5113495bSYour Name QDF_STATUS
dp_tx_tso_num_seg_pool_init_by_id(struct dp_soc * soc,uint32_t num_elem,uint8_t pool_id)720*5113495bSYour Name dp_tx_tso_num_seg_pool_init_by_id(struct dp_soc *soc, uint32_t num_elem,
721*5113495bSYour Name uint8_t pool_id)
722*5113495bSYour Name {
723*5113495bSYour Name struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
724*5113495bSYour Name uint32_t desc_size;
725*5113495bSYour Name
726*5113495bSYour Name desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_num_seg_elem_t));
727*5113495bSYour Name tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
728*5113495bSYour Name
729*5113495bSYour Name if (qdf_mem_multi_page_link(soc->osdev,
730*5113495bSYour Name &tso_num_seg_pool->desc_pages,
731*5113495bSYour Name desc_size,
732*5113495bSYour Name num_elem, true)) {
733*5113495bSYour Name QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
734*5113495bSYour Name "invalid tso desc allocation - overflow num link");
735*5113495bSYour Name return QDF_STATUS_E_FAULT;
736*5113495bSYour Name }
737*5113495bSYour Name
738*5113495bSYour Name tso_num_seg_pool->freelist = (struct qdf_tso_num_seg_elem_t *)
739*5113495bSYour Name *tso_num_seg_pool->desc_pages.cacheable_pages;
740*5113495bSYour Name tso_num_seg_pool->num_free = num_elem;
741*5113495bSYour Name tso_num_seg_pool->num_seg_pool_size = num_elem;
742*5113495bSYour Name
743*5113495bSYour Name qdf_spinlock_create(&tso_num_seg_pool->lock);
744*5113495bSYour Name
745*5113495bSYour Name return QDF_STATUS_SUCCESS;
746*5113495bSYour Name }
747*5113495bSYour Name
dp_tx_tso_num_seg_pool_init(struct dp_soc * soc,uint8_t num_pool,uint32_t num_elem)748*5113495bSYour Name QDF_STATUS dp_tx_tso_num_seg_pool_init(struct dp_soc *soc, uint8_t num_pool,
749*5113495bSYour Name uint32_t num_elem)
750*5113495bSYour Name {
751*5113495bSYour Name uint32_t pool_id;
752*5113495bSYour Name QDF_STATUS status;
753*5113495bSYour Name
754*5113495bSYour Name for (pool_id = 0; pool_id < num_pool; pool_id++) {
755*5113495bSYour Name status = dp_tx_tso_num_seg_pool_init_by_id(soc, num_elem,
756*5113495bSYour Name pool_id);
757*5113495bSYour Name if (QDF_IS_STATUS_ERROR(status)) {
758*5113495bSYour Name dp_err("failed to initialise TSO num seg pool %d", pool_id);
759*5113495bSYour Name return status;
760*5113495bSYour Name }
761*5113495bSYour Name }
762*5113495bSYour Name
763*5113495bSYour Name return QDF_STATUS_SUCCESS;
764*5113495bSYour Name }
765*5113495bSYour Name
dp_tx_tso_num_seg_pool_deinit_by_id(struct dp_soc * soc,uint8_t pool_id)766*5113495bSYour Name void dp_tx_tso_num_seg_pool_deinit_by_id(struct dp_soc *soc, uint8_t pool_id)
767*5113495bSYour Name {
768*5113495bSYour Name struct dp_tx_tso_num_seg_pool_s *tso_num_seg_pool;
769*5113495bSYour Name
770*5113495bSYour Name tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
771*5113495bSYour Name
772*5113495bSYour Name if (tso_num_seg_pool->num_seg_pool_size) {
773*5113495bSYour Name qdf_spin_lock_bh(&tso_num_seg_pool->lock);
774*5113495bSYour Name tso_num_seg_pool->freelist = NULL;
775*5113495bSYour Name tso_num_seg_pool->num_free = 0;
776*5113495bSYour Name tso_num_seg_pool->num_seg_pool_size = 0;
777*5113495bSYour Name qdf_spin_unlock_bh(&tso_num_seg_pool->lock);
778*5113495bSYour Name qdf_spinlock_destroy(&tso_num_seg_pool->lock);
779*5113495bSYour Name }
780*5113495bSYour Name }
781*5113495bSYour Name
dp_tx_tso_num_seg_pool_deinit(struct dp_soc * soc,uint8_t num_pool)782*5113495bSYour Name void dp_tx_tso_num_seg_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
783*5113495bSYour Name {
784*5113495bSYour Name uint32_t pool_id;
785*5113495bSYour Name
786*5113495bSYour Name for (pool_id = 0; pool_id < num_pool; pool_id++)
787*5113495bSYour Name dp_tx_tso_num_seg_pool_deinit_by_id(soc, pool_id);
788*5113495bSYour Name }
789*5113495bSYour Name #else
dp_tx_tso_desc_pool_alloc_by_id(struct dp_soc * soc,uint32_t num_elem,uint8_t pool_id)790*5113495bSYour Name QDF_STATUS dp_tx_tso_desc_pool_alloc_by_id(struct dp_soc *soc, uint32_t num_elem,
791*5113495bSYour Name uint8_t pool_id)
792*5113495bSYour Name {
793*5113495bSYour Name return QDF_STATUS_SUCCESS;
794*5113495bSYour Name }
795*5113495bSYour Name
dp_tx_tso_desc_pool_alloc(struct dp_soc * soc,uint8_t num_pool,uint32_t num_elem)796*5113495bSYour Name QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
797*5113495bSYour Name uint32_t num_elem)
798*5113495bSYour Name {
799*5113495bSYour Name return QDF_STATUS_SUCCESS;
800*5113495bSYour Name }
801*5113495bSYour Name
dp_tx_tso_desc_pool_init_by_id(struct dp_soc * soc,uint32_t num_elem,uint8_t pool_id)802*5113495bSYour Name QDF_STATUS dp_tx_tso_desc_pool_init_by_id(struct dp_soc *soc, uint32_t num_elem,
803*5113495bSYour Name uint8_t pool_id)
804*5113495bSYour Name {
805*5113495bSYour Name return QDF_STATUS_SUCCESS;
806*5113495bSYour Name }
807*5113495bSYour Name
dp_tx_tso_desc_pool_init(struct dp_soc * soc,uint8_t num_pool,uint32_t num_elem)808*5113495bSYour Name QDF_STATUS dp_tx_tso_desc_pool_init(struct dp_soc *soc, uint8_t num_pool,
809*5113495bSYour Name uint32_t num_elem)
810*5113495bSYour Name {
811*5113495bSYour Name return QDF_STATUS_SUCCESS;
812*5113495bSYour Name }
813*5113495bSYour Name
dp_tx_tso_desc_pool_free_by_id(struct dp_soc * soc,uint8_t pool_id)814*5113495bSYour Name void dp_tx_tso_desc_pool_free_by_id(struct dp_soc *soc, uint8_t pool_id)
815*5113495bSYour Name {
816*5113495bSYour Name }
817*5113495bSYour Name
dp_tx_tso_desc_pool_free(struct dp_soc * soc,uint8_t num_pool)818*5113495bSYour Name void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t num_pool)
819*5113495bSYour Name {
820*5113495bSYour Name }
821*5113495bSYour Name
dp_tx_tso_desc_pool_deinit_by_id(struct dp_soc * soc,uint8_t pool_id)822*5113495bSYour Name void dp_tx_tso_desc_pool_deinit_by_id(struct dp_soc *soc, uint8_t pool_id)
823*5113495bSYour Name {
824*5113495bSYour Name }
825*5113495bSYour Name
dp_tx_tso_desc_pool_deinit(struct dp_soc * soc,uint8_t num_pool)826*5113495bSYour Name void dp_tx_tso_desc_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
827*5113495bSYour Name {
828*5113495bSYour Name }
829*5113495bSYour Name
dp_tx_tso_num_seg_pool_alloc_by_id(struct dp_soc * soc,uint32_t num_elem,uint8_t pool_id)830*5113495bSYour Name QDF_STATUS dp_tx_tso_num_seg_pool_alloc_by_id(struct dp_soc *soc,
831*5113495bSYour Name uint32_t num_elem,
832*5113495bSYour Name uint8_t pool_id)
833*5113495bSYour Name {
834*5113495bSYour Name return QDF_STATUS_SUCCESS;
835*5113495bSYour Name }
836*5113495bSYour Name
dp_tx_tso_num_seg_pool_alloc(struct dp_soc * soc,uint8_t num_pool,uint32_t num_elem)837*5113495bSYour Name QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t num_pool,
838*5113495bSYour Name uint32_t num_elem)
839*5113495bSYour Name {
840*5113495bSYour Name return QDF_STATUS_SUCCESS;
841*5113495bSYour Name }
842*5113495bSYour Name
dp_tx_tso_num_seg_pool_free_by_id(struct dp_soc * soc,uint8_t pool_id)843*5113495bSYour Name void dp_tx_tso_num_seg_pool_free_by_id(struct dp_soc *soc, uint8_t pool_id)
844*5113495bSYour Name {
845*5113495bSYour Name }
846*5113495bSYour Name
dp_tx_tso_num_seg_pool_free(struct dp_soc * soc,uint8_t num_pool)847*5113495bSYour Name void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t num_pool)
848*5113495bSYour Name {
849*5113495bSYour Name }
850*5113495bSYour Name
851*5113495bSYour Name QDF_STATUS
dp_tx_tso_num_seg_pool_init_by_id(struct dp_soc * soc,uint32_t num_elem,uint8_t pool_id)852*5113495bSYour Name dp_tx_tso_num_seg_pool_init_by_id(struct dp_soc *soc, uint32_t num_elem,
853*5113495bSYour Name uint8_t pool_id)
854*5113495bSYour Name {
855*5113495bSYour Name return QDF_STATUS_SUCCESS;
856*5113495bSYour Name }
857*5113495bSYour Name
dp_tx_tso_num_seg_pool_init(struct dp_soc * soc,uint8_t num_pool,uint32_t num_elem)858*5113495bSYour Name QDF_STATUS dp_tx_tso_num_seg_pool_init(struct dp_soc *soc, uint8_t num_pool,
859*5113495bSYour Name uint32_t num_elem)
860*5113495bSYour Name {
861*5113495bSYour Name return QDF_STATUS_SUCCESS;
862*5113495bSYour Name }
863*5113495bSYour Name
dp_tx_tso_num_seg_pool_deinit_by_id(struct dp_soc * soc,uint8_t pool_id)864*5113495bSYour Name void dp_tx_tso_num_seg_pool_deinit_by_id(struct dp_soc *soc, uint8_t pool_id)
865*5113495bSYour Name {
866*5113495bSYour Name }
867*5113495bSYour Name
dp_tx_tso_num_seg_pool_deinit(struct dp_soc * soc,uint8_t num_pool)868*5113495bSYour Name void dp_tx_tso_num_seg_pool_deinit(struct dp_soc *soc, uint8_t num_pool)
869*5113495bSYour Name {
870*5113495bSYour Name }
871*5113495bSYour Name #endif
872