1 /*
2 * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
3 * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18 #ifdef IPA_OFFLOAD
19
20 #include <wlan_ipa_ucfg_api.h>
21 #include <wlan_ipa_core.h>
22 #include <qdf_ipa_wdi3.h>
23 #include <qdf_types.h>
24 #include <qdf_lock.h>
25 #include <hal_hw_headers.h>
26 #include <hal_api.h>
27 #include <hal_reo.h>
28 #include <hif.h>
29 #include <htt.h>
30 #include <wdi_event.h>
31 #include <queue.h>
32 #include "dp_types.h"
33 #include "dp_htt.h"
34 #include "dp_tx.h"
35 #include "dp_rx.h"
36 #include "dp_ipa.h"
37 #include "dp_internal.h"
38 #ifdef WIFI_MONITOR_SUPPORT
39 #include "dp_mon.h"
40 #endif
41 #ifdef FEATURE_WDS
42 #include "dp_txrx_wds.h"
43 #endif
44 #ifdef QCA_IPA_LL_TX_FLOW_CONTROL
45 #include <pld_common.h>
46 #endif
47
48 /* Hard coded config parameters until dp_ops_cfg.cfg_attach implemented */
49 #define CFG_IPA_UC_TX_BUF_SIZE_DEFAULT (2048)
50
51 /* WAR for IPA_OFFLOAD case. In some cases, its observed that WBM tries to
52 * release a buffer into WBM2SW RELEASE ring for IPA, and the ring is full.
53 * This causes back pressure, resulting in a FW crash.
54 * By leaving some entries with no buffer attached, WBM will be able to write
55 * to the ring, and from dumps we can figure out the buffer which is causing
56 * this issue.
57 */
58 #define DP_IPA_WAR_WBM2SW_REL_RING_NO_BUF_ENTRIES 16
59
60 /**
61 * struct dp_ipa_reo_remap_record - history for dp ipa reo remaps
62 * @timestamp: Timestamp when remap occurs
63 * @ix0_reg: reo destination ring IX0 value
64 * @ix2_reg: reo destination ring IX2 value
65 * @ix3_reg: reo destination ring IX3 value
66 */
67 struct dp_ipa_reo_remap_record {
68 uint64_t timestamp;
69 uint32_t ix0_reg;
70 uint32_t ix2_reg;
71 uint32_t ix3_reg;
72 };
73
74 #define WLAN_IPA_AST_META_DATA_MASK htonl(0x000000FF)
75 #define WLAN_IPA_META_DATA_MASK htonl(0x00FF0000)
76
77 #define REO_REMAP_HISTORY_SIZE 32
78
79 struct dp_ipa_reo_remap_record dp_ipa_reo_remap_history[REO_REMAP_HISTORY_SIZE];
80
81 static qdf_atomic_t dp_ipa_reo_remap_history_index;
dp_ipa_reo_remap_record_index_next(qdf_atomic_t * index)82 static int dp_ipa_reo_remap_record_index_next(qdf_atomic_t *index)
83 {
84 int next = qdf_atomic_inc_return(index);
85
86 if (next == REO_REMAP_HISTORY_SIZE)
87 qdf_atomic_sub(REO_REMAP_HISTORY_SIZE, index);
88
89 return next % REO_REMAP_HISTORY_SIZE;
90 }
91
92 /**
93 * dp_ipa_reo_remap_history_add() - Record dp ipa reo remap values
94 * @ix0_val: reo destination ring IX0 value
95 * @ix2_val: reo destination ring IX2 value
96 * @ix3_val: reo destination ring IX3 value
97 *
98 * Return: None
99 */
dp_ipa_reo_remap_history_add(uint32_t ix0_val,uint32_t ix2_val,uint32_t ix3_val)100 static void dp_ipa_reo_remap_history_add(uint32_t ix0_val, uint32_t ix2_val,
101 uint32_t ix3_val)
102 {
103 int idx = dp_ipa_reo_remap_record_index_next(
104 &dp_ipa_reo_remap_history_index);
105 struct dp_ipa_reo_remap_record *record = &dp_ipa_reo_remap_history[idx];
106
107 record->timestamp = qdf_get_log_timestamp();
108 record->ix0_reg = ix0_val;
109 record->ix2_reg = ix2_val;
110 record->ix3_reg = ix3_val;
111 }
112
__dp_ipa_handle_buf_smmu_mapping(struct dp_soc * soc,qdf_nbuf_t nbuf,uint32_t size,bool create,const char * func,uint32_t line)113 static QDF_STATUS __dp_ipa_handle_buf_smmu_mapping(struct dp_soc *soc,
114 qdf_nbuf_t nbuf,
115 uint32_t size,
116 bool create,
117 const char *func,
118 uint32_t line)
119 {
120 qdf_mem_info_t mem_map_table = {0};
121 QDF_STATUS ret = QDF_STATUS_SUCCESS;
122 qdf_ipa_wdi_hdl_t hdl;
123
124 /* Need to handle the case when one soc will
125 * have multiple pdev(radio's), Currently passing
126 * pdev_id as 0 assuming 1 soc has only 1 radio.
127 */
128 hdl = wlan_ipa_get_hdl(soc->ctrl_psoc, 0);
129 if (hdl == DP_IPA_HDL_INVALID) {
130 dp_err("IPA handle is invalid");
131 return QDF_STATUS_E_INVAL;
132 }
133 qdf_update_mem_map_table(soc->osdev, &mem_map_table,
134 qdf_nbuf_get_frag_paddr(nbuf, 0),
135 size);
136
137 if (create) {
138 /* Assert if PA is zero */
139 qdf_assert_always(mem_map_table.pa);
140
141 ret = qdf_nbuf_smmu_map_debug(nbuf, hdl, 1, &mem_map_table,
142 func, line);
143 } else {
144 ret = qdf_nbuf_smmu_unmap_debug(nbuf, hdl, 1, &mem_map_table,
145 func, line);
146 }
147 qdf_assert_always(!ret);
148
149 /* Return status of mapping/unmapping is stored in
150 * mem_map_table.result field, assert if the result
151 * is failure
152 */
153 if (create)
154 qdf_assert_always(!mem_map_table.result);
155 else
156 qdf_assert_always(mem_map_table.result >= mem_map_table.size);
157
158 return ret;
159 }
160
dp_ipa_handle_rx_buf_smmu_mapping(struct dp_soc * soc,qdf_nbuf_t nbuf,uint32_t size,bool create,const char * func,uint32_t line)161 QDF_STATUS dp_ipa_handle_rx_buf_smmu_mapping(struct dp_soc *soc,
162 qdf_nbuf_t nbuf,
163 uint32_t size,
164 bool create, const char *func,
165 uint32_t line)
166 {
167 struct dp_pdev *pdev;
168 int i;
169
170 for (i = 0; i < soc->pdev_count; i++) {
171 pdev = soc->pdev_list[i];
172 if (pdev && dp_monitor_is_configured(pdev))
173 return QDF_STATUS_SUCCESS;
174 }
175
176 if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx) ||
177 !qdf_mem_smmu_s1_enabled(soc->osdev))
178 return QDF_STATUS_SUCCESS;
179
180 /*
181 * Even if ipa pipes is disabled, but if it's unmap
182 * operation and nbuf has done ipa smmu map before,
183 * do ipa smmu unmap as well.
184 */
185 if (!(qdf_atomic_read(&soc->ipa_pipes_enabled) &&
186 qdf_atomic_read(&soc->ipa_map_allowed))) {
187 if (!create && qdf_nbuf_is_rx_ipa_smmu_map(nbuf)) {
188 DP_STATS_INC(soc, rx.err.ipa_unmap_no_pipe, 1);
189 } else {
190 return QDF_STATUS_SUCCESS;
191 }
192 }
193
194 if (qdf_unlikely(create == qdf_nbuf_is_rx_ipa_smmu_map(nbuf))) {
195 if (create) {
196 DP_STATS_INC(soc, rx.err.ipa_smmu_map_dup, 1);
197 } else {
198 DP_STATS_INC(soc, rx.err.ipa_smmu_unmap_dup, 1);
199 }
200 return QDF_STATUS_E_INVAL;
201 }
202
203 qdf_nbuf_set_rx_ipa_smmu_map(nbuf, create);
204
205 return __dp_ipa_handle_buf_smmu_mapping(soc, nbuf, size, create,
206 func, line);
207 }
208
__dp_ipa_tx_buf_smmu_mapping(struct dp_soc * soc,struct dp_pdev * pdev,bool create,const char * func,uint32_t line)209 static QDF_STATUS __dp_ipa_tx_buf_smmu_mapping(
210 struct dp_soc *soc,
211 struct dp_pdev *pdev,
212 bool create,
213 const char *func,
214 uint32_t line)
215 {
216 uint32_t index;
217 QDF_STATUS ret = QDF_STATUS_SUCCESS;
218 uint32_t tx_buffer_cnt = soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt;
219 qdf_nbuf_t nbuf;
220 uint32_t buf_len;
221
222 if (!ipa_is_ready()) {
223 dp_info("IPA is not READY");
224 return 0;
225 }
226
227 for (index = 0; index < tx_buffer_cnt; index++) {
228 nbuf = (qdf_nbuf_t)
229 soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned[index];
230 if (!nbuf)
231 continue;
232 buf_len = qdf_nbuf_get_data_len(nbuf);
233 ret = __dp_ipa_handle_buf_smmu_mapping(soc, nbuf, buf_len,
234 create, func, line);
235 }
236
237 return ret;
238 }
239
240 #ifndef QCA_OL_DP_SRNG_LOCK_LESS_ACCESS
dp_ipa_set_reo_ctx_mapping_lock_required(struct dp_soc * soc,bool lock_required)241 static void dp_ipa_set_reo_ctx_mapping_lock_required(struct dp_soc *soc,
242 bool lock_required)
243 {
244 hal_ring_handle_t hal_ring_hdl;
245 int ring;
246
247 for (ring = 0; ring < soc->num_reo_dest_rings; ring++) {
248 hal_ring_hdl = soc->reo_dest_ring[ring].hal_srng;
249 hal_srng_lock(hal_ring_hdl);
250 soc->ipa_reo_ctx_lock_required[ring] = lock_required;
251 hal_srng_unlock(hal_ring_hdl);
252 }
253 }
254 #else
dp_ipa_set_reo_ctx_mapping_lock_required(struct dp_soc * soc,bool lock_required)255 static void dp_ipa_set_reo_ctx_mapping_lock_required(struct dp_soc *soc,
256 bool lock_required)
257 {
258 }
259
260 #endif
261
262 #ifdef RX_DESC_MULTI_PAGE_ALLOC
dp_ipa_handle_rx_buf_pool_smmu_mapping(struct dp_soc * soc,struct dp_pdev * pdev,bool create,const char * func,uint32_t line)263 static QDF_STATUS dp_ipa_handle_rx_buf_pool_smmu_mapping(struct dp_soc *soc,
264 struct dp_pdev *pdev,
265 bool create,
266 const char *func,
267 uint32_t line)
268 {
269 struct rx_desc_pool *rx_pool;
270 uint8_t pdev_id;
271 uint32_t num_desc, page_id, offset, i;
272 uint16_t num_desc_per_page;
273 union dp_rx_desc_list_elem_t *rx_desc_elem;
274 struct dp_rx_desc *rx_desc;
275 qdf_nbuf_t nbuf;
276 QDF_STATUS ret = QDF_STATUS_SUCCESS;
277
278 if (!qdf_ipa_is_ready())
279 return ret;
280
281 if (!qdf_mem_smmu_s1_enabled(soc->osdev))
282 return ret;
283
284 pdev_id = pdev->pdev_id;
285 rx_pool = &soc->rx_desc_buf[pdev_id];
286
287 dp_ipa_set_reo_ctx_mapping_lock_required(soc, true);
288 qdf_spin_lock_bh(&rx_pool->lock);
289 dp_ipa_rx_buf_smmu_mapping_lock(soc);
290 num_desc = rx_pool->pool_size;
291 num_desc_per_page = rx_pool->desc_pages.num_element_per_page;
292 for (i = 0; i < num_desc; i++) {
293 page_id = i / num_desc_per_page;
294 offset = i % num_desc_per_page;
295 if (qdf_unlikely(!(rx_pool->desc_pages.cacheable_pages)))
296 break;
297 rx_desc_elem = dp_rx_desc_find(page_id, offset, rx_pool);
298 rx_desc = &rx_desc_elem->rx_desc;
299 if ((!(rx_desc->in_use)) || rx_desc->unmapped)
300 continue;
301 nbuf = rx_desc->nbuf;
302
303 if (qdf_unlikely(create ==
304 qdf_nbuf_is_rx_ipa_smmu_map(nbuf))) {
305 if (create) {
306 DP_STATS_INC(soc,
307 rx.err.ipa_smmu_map_dup, 1);
308 } else {
309 DP_STATS_INC(soc,
310 rx.err.ipa_smmu_unmap_dup, 1);
311 }
312 continue;
313 }
314 qdf_nbuf_set_rx_ipa_smmu_map(nbuf, create);
315
316 ret = __dp_ipa_handle_buf_smmu_mapping(soc, nbuf,
317 rx_pool->buf_size,
318 create, func, line);
319 }
320 dp_ipa_rx_buf_smmu_mapping_unlock(soc);
321 qdf_spin_unlock_bh(&rx_pool->lock);
322 dp_ipa_set_reo_ctx_mapping_lock_required(soc, false);
323
324 return ret;
325 }
326 #else
dp_ipa_handle_rx_buf_pool_smmu_mapping(struct dp_soc * soc,struct dp_pdev * pdev,bool create,const char * func,uint32_t line)327 static QDF_STATUS dp_ipa_handle_rx_buf_pool_smmu_mapping(
328 struct dp_soc *soc,
329 struct dp_pdev *pdev,
330 bool create,
331 const char *func,
332 uint32_t line)
333 {
334 struct rx_desc_pool *rx_pool;
335 uint8_t pdev_id;
336 qdf_nbuf_t nbuf;
337 int i;
338
339 if (!qdf_ipa_is_ready())
340 return QDF_STATUS_SUCCESS;
341
342 if (!qdf_mem_smmu_s1_enabled(soc->osdev))
343 return QDF_STATUS_SUCCESS;
344
345 pdev_id = pdev->pdev_id;
346 rx_pool = &soc->rx_desc_buf[pdev_id];
347
348 dp_ipa_set_reo_ctx_mapping_lock_required(soc, true);
349 qdf_spin_lock_bh(&rx_pool->lock);
350 dp_ipa_rx_buf_smmu_mapping_lock(soc);
351 for (i = 0; i < rx_pool->pool_size; i++) {
352 if ((!(rx_pool->array[i].rx_desc.in_use)) ||
353 rx_pool->array[i].rx_desc.unmapped)
354 continue;
355
356 nbuf = rx_pool->array[i].rx_desc.nbuf;
357
358 if (qdf_unlikely(create ==
359 qdf_nbuf_is_rx_ipa_smmu_map(nbuf))) {
360 if (create) {
361 DP_STATS_INC(soc,
362 rx.err.ipa_smmu_map_dup, 1);
363 } else {
364 DP_STATS_INC(soc,
365 rx.err.ipa_smmu_unmap_dup, 1);
366 }
367 continue;
368 }
369 qdf_nbuf_set_rx_ipa_smmu_map(nbuf, create);
370
371 __dp_ipa_handle_buf_smmu_mapping(soc, nbuf, rx_pool->buf_size,
372 create, func, line);
373 }
374 dp_ipa_rx_buf_smmu_mapping_unlock(soc);
375 qdf_spin_unlock_bh(&rx_pool->lock);
376 dp_ipa_set_reo_ctx_mapping_lock_required(soc, false);
377
378 return QDF_STATUS_SUCCESS;
379 }
380 #endif /* RX_DESC_MULTI_PAGE_ALLOC */
381
dp_ipa_set_smmu_mapped(struct cdp_soc_t * soc_hdl,int val)382 QDF_STATUS dp_ipa_set_smmu_mapped(struct cdp_soc_t *soc_hdl, int val)
383 {
384 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
385
386 qdf_atomic_set(&soc->ipa_map_allowed, val);
387 return QDF_STATUS_SUCCESS;
388 }
389
dp_ipa_get_smmu_mapped(struct cdp_soc_t * soc_hdl)390 int dp_ipa_get_smmu_mapped(struct cdp_soc_t *soc_hdl)
391 {
392 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
393
394 return qdf_atomic_read(&soc->ipa_map_allowed);
395 }
396
dp_ipa_get_shared_mem_info(qdf_device_t osdev,qdf_shared_mem_t * shared_mem,void * cpu_addr,qdf_dma_addr_t dma_addr,uint32_t size)397 static QDF_STATUS dp_ipa_get_shared_mem_info(qdf_device_t osdev,
398 qdf_shared_mem_t *shared_mem,
399 void *cpu_addr,
400 qdf_dma_addr_t dma_addr,
401 uint32_t size)
402 {
403 qdf_dma_addr_t paddr;
404 int ret;
405
406 shared_mem->vaddr = cpu_addr;
407 qdf_mem_set_dma_size(osdev, &shared_mem->mem_info, size);
408 *qdf_mem_get_dma_addr_ptr(osdev, &shared_mem->mem_info) = dma_addr;
409
410 paddr = qdf_mem_paddr_from_dmaaddr(osdev, dma_addr);
411 qdf_mem_set_dma_pa(osdev, &shared_mem->mem_info, paddr);
412
413 ret = qdf_mem_dma_get_sgtable(osdev->dev, &shared_mem->sgtable,
414 shared_mem->vaddr, dma_addr, size);
415 if (ret) {
416 dp_err("Unable to get DMA sgtable");
417 return QDF_STATUS_E_NOMEM;
418 }
419
420 qdf_dma_get_sgtable_dma_addr(&shared_mem->sgtable);
421
422 return QDF_STATUS_SUCCESS;
423 }
424
425 /**
426 * dp_ipa_get_tx_bank_id() - API to get TCL bank id
427 * @soc: dp_soc handle
428 * @bank_id: out parameter for bank id
429 *
430 * Return: QDF_STATUS
431 */
dp_ipa_get_tx_bank_id(struct dp_soc * soc,uint8_t * bank_id)432 static QDF_STATUS dp_ipa_get_tx_bank_id(struct dp_soc *soc, uint8_t *bank_id)
433 {
434 if (soc->arch_ops.ipa_get_bank_id) {
435 *bank_id = soc->arch_ops.ipa_get_bank_id(soc);
436 if (*bank_id < 0) {
437 return QDF_STATUS_E_INVAL;
438 } else {
439 dp_info("bank_id %u", *bank_id);
440 return QDF_STATUS_SUCCESS;
441 }
442 } else {
443 return QDF_STATUS_E_NOSUPPORT;
444 }
445 }
446
447 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)) || \
448 defined(CONFIG_IPA_WDI_UNIFIED_API)
dp_ipa_setup_tx_params_bank_id(struct dp_soc * soc,qdf_ipa_wdi_pipe_setup_info_t * tx)449 static void dp_ipa_setup_tx_params_bank_id(struct dp_soc *soc,
450 qdf_ipa_wdi_pipe_setup_info_t *tx)
451 {
452 uint8_t bank_id;
453
454 if (QDF_IS_STATUS_SUCCESS(dp_ipa_get_tx_bank_id(soc, &bank_id)))
455 QDF_IPA_WDI_SETUP_INFO_RX_BANK_ID(tx, bank_id);
456 }
457
458 static void
dp_ipa_setup_tx_smmu_params_bank_id(struct dp_soc * soc,qdf_ipa_wdi_pipe_setup_info_smmu_t * tx_smmu)459 dp_ipa_setup_tx_smmu_params_bank_id(struct dp_soc *soc,
460 qdf_ipa_wdi_pipe_setup_info_smmu_t *tx_smmu)
461 {
462 uint8_t bank_id;
463
464 if (QDF_IS_STATUS_SUCCESS(dp_ipa_get_tx_bank_id(soc, &bank_id)))
465 QDF_IPA_WDI_SETUP_INFO_SMMU_RX_BANK_ID(tx_smmu, bank_id);
466 }
467 #else
468 static inline void
dp_ipa_setup_tx_params_bank_id(struct dp_soc * soc,qdf_ipa_wdi_pipe_setup_info_t * tx)469 dp_ipa_setup_tx_params_bank_id(struct dp_soc *soc,
470 qdf_ipa_wdi_pipe_setup_info_t *tx)
471 {
472 }
473
474 static inline void
dp_ipa_setup_tx_smmu_params_bank_id(struct dp_soc * soc,qdf_ipa_wdi_pipe_setup_info_smmu_t * tx_smmu)475 dp_ipa_setup_tx_smmu_params_bank_id(struct dp_soc *soc,
476 qdf_ipa_wdi_pipe_setup_info_smmu_t *tx_smmu)
477 {
478 }
479 #endif
480
481 #ifdef QCA_IPA_LL_TX_FLOW_CONTROL
482 static void
dp_ipa_setup_tx_alt_params_pmac_id(struct dp_soc * soc,qdf_ipa_wdi_pipe_setup_info_t * tx)483 dp_ipa_setup_tx_alt_params_pmac_id(struct dp_soc *soc,
484 qdf_ipa_wdi_pipe_setup_info_t *tx)
485 {
486 uint8_t pmac_id = 0;
487
488 /* Set Pmac ID, extract pmac_id from second radio for TX_ALT ring */
489 if (soc->pdev_count > 1)
490 pmac_id = soc->pdev_list[soc->pdev_count - 1]->lmac_id;
491
492 QDF_IPA_WDI_SETUP_INFO_RX_PMAC_ID(tx, pmac_id);
493 }
494
495 static void
dp_ipa_setup_tx_alt_smmu_params_pmac_id(struct dp_soc * soc,qdf_ipa_wdi_pipe_setup_info_smmu_t * tx_smmu)496 dp_ipa_setup_tx_alt_smmu_params_pmac_id(struct dp_soc *soc,
497 qdf_ipa_wdi_pipe_setup_info_smmu_t *tx_smmu)
498 {
499 uint8_t pmac_id = 0;
500
501 /* Set Pmac ID, extract pmac_id from second radio for TX_ALT ring */
502 if (soc->pdev_count > 1)
503 pmac_id = soc->pdev_list[soc->pdev_count - 1]->lmac_id;
504
505 QDF_IPA_WDI_SETUP_INFO_SMMU_RX_PMAC_ID(tx_smmu, pmac_id);
506 }
507
508 static void
dp_ipa_setup_tx_params_pmac_id(struct dp_soc * soc,qdf_ipa_wdi_pipe_setup_info_t * tx)509 dp_ipa_setup_tx_params_pmac_id(struct dp_soc *soc,
510 qdf_ipa_wdi_pipe_setup_info_t *tx)
511 {
512 uint8_t pmac_id;
513
514 pmac_id = soc->pdev_list[0]->lmac_id;
515
516 QDF_IPA_WDI_SETUP_INFO_RX_PMAC_ID(tx, pmac_id);
517 }
518
519 static void
dp_ipa_setup_tx_smmu_params_pmac_id(struct dp_soc * soc,qdf_ipa_wdi_pipe_setup_info_smmu_t * tx_smmu)520 dp_ipa_setup_tx_smmu_params_pmac_id(struct dp_soc *soc,
521 qdf_ipa_wdi_pipe_setup_info_smmu_t *tx_smmu)
522 {
523 uint8_t pmac_id;
524
525 pmac_id = soc->pdev_list[0]->lmac_id;
526
527 QDF_IPA_WDI_SETUP_INFO_SMMU_RX_PMAC_ID(tx_smmu, pmac_id);
528 }
529 #else
530 static inline void
dp_ipa_setup_tx_alt_params_pmac_id(struct dp_soc * soc,qdf_ipa_wdi_pipe_setup_info_t * tx)531 dp_ipa_setup_tx_alt_params_pmac_id(struct dp_soc *soc,
532 qdf_ipa_wdi_pipe_setup_info_t *tx)
533 {
534 }
535
536 static inline void
dp_ipa_setup_tx_alt_smmu_params_pmac_id(struct dp_soc * soc,qdf_ipa_wdi_pipe_setup_info_smmu_t * tx_smmu)537 dp_ipa_setup_tx_alt_smmu_params_pmac_id(struct dp_soc *soc,
538 qdf_ipa_wdi_pipe_setup_info_smmu_t *tx_smmu)
539 {
540 }
541
542 static inline void
dp_ipa_setup_tx_params_pmac_id(struct dp_soc * soc,qdf_ipa_wdi_pipe_setup_info_t * tx)543 dp_ipa_setup_tx_params_pmac_id(struct dp_soc *soc,
544 qdf_ipa_wdi_pipe_setup_info_t *tx)
545 {
546 }
547
548 static inline void
dp_ipa_setup_tx_smmu_params_pmac_id(struct dp_soc * soc,qdf_ipa_wdi_pipe_setup_info_smmu_t * tx_smmu)549 dp_ipa_setup_tx_smmu_params_pmac_id(struct dp_soc *soc,
550 qdf_ipa_wdi_pipe_setup_info_smmu_t *tx_smmu)
551 {
552 }
553 #endif
554
555 #ifdef IPA_WDI3_TX_TWO_PIPES
dp_ipa_tx_alt_pool_detach(struct dp_soc * soc,struct dp_pdev * pdev)556 static void dp_ipa_tx_alt_pool_detach(struct dp_soc *soc, struct dp_pdev *pdev)
557 {
558 struct dp_ipa_resources *ipa_res;
559 qdf_nbuf_t nbuf;
560 int idx;
561
562 for (idx = 0; idx < soc->ipa_uc_tx_rsc_alt.alloc_tx_buf_cnt; idx++) {
563 nbuf = (qdf_nbuf_t)
564 soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned[idx];
565 if (!nbuf)
566 continue;
567
568 qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_BIDIRECTIONAL);
569 qdf_mem_dp_tx_skb_cnt_dec();
570 qdf_mem_dp_tx_skb_dec(qdf_nbuf_get_end_offset(nbuf));
571 qdf_nbuf_free(nbuf);
572 soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned[idx] =
573 (void *)NULL;
574 }
575
576 qdf_mem_free(soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned);
577 soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned = NULL;
578
579 ipa_res = &pdev->ipa_resource;
580 if (!ipa_res->is_db_ddr_mapped && ipa_res->tx_alt_comp_doorbell_vaddr)
581 iounmap(ipa_res->tx_alt_comp_doorbell_vaddr);
582
583 qdf_mem_free_sgtable(&ipa_res->tx_alt_ring.sgtable);
584 qdf_mem_free_sgtable(&ipa_res->tx_alt_comp_ring.sgtable);
585 }
586
dp_ipa_tx_alt_pool_attach(struct dp_soc * soc)587 static int dp_ipa_tx_alt_pool_attach(struct dp_soc *soc)
588 {
589 uint32_t tx_buffer_count;
590 uint32_t ring_base_align = 8;
591 qdf_dma_addr_t buffer_paddr;
592 struct hal_srng *wbm_srng = (struct hal_srng *)
593 soc->tx_comp_ring[IPA_TX_ALT_COMP_RING_IDX].hal_srng;
594 struct hal_srng_params srng_params;
595 uint32_t wbm_bm_id;
596 void *ring_entry;
597 int num_entries;
598 qdf_nbuf_t nbuf;
599 int retval = QDF_STATUS_SUCCESS;
600 int max_alloc_count = 0;
601
602 /*
603 * Uncomment when dp_ops_cfg.cfg_attach is implemented
604 * unsigned int uc_tx_buf_sz =
605 * dp_cfg_ipa_uc_tx_buf_size(pdev->osif_pdev);
606 */
607 unsigned int uc_tx_buf_sz = CFG_IPA_UC_TX_BUF_SIZE_DEFAULT;
608 unsigned int alloc_size = uc_tx_buf_sz + ring_base_align - 1;
609
610 wbm_bm_id = wlan_cfg_get_rbm_id_for_index(soc->wlan_cfg_ctx,
611 IPA_TX_ALT_RING_IDX);
612
613 hal_get_srng_params(soc->hal_soc,
614 hal_srng_to_hal_ring_handle(wbm_srng),
615 &srng_params);
616 num_entries = srng_params.num_entries;
617
618 max_alloc_count =
619 num_entries - DP_IPA_WAR_WBM2SW_REL_RING_NO_BUF_ENTRIES;
620 if (max_alloc_count <= 0) {
621 dp_err("incorrect value for buffer count %u", max_alloc_count);
622 return -EINVAL;
623 }
624
625 dp_info("requested %d buffers to be posted to wbm ring",
626 max_alloc_count);
627
628 soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned =
629 qdf_mem_malloc(num_entries *
630 sizeof(*soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned));
631 if (!soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned) {
632 dp_err("IPA WBM Ring Tx buf pool vaddr alloc fail");
633 return -ENOMEM;
634 }
635
636 hal_srng_access_start_unlocked(soc->hal_soc,
637 hal_srng_to_hal_ring_handle(wbm_srng));
638
639 /*
640 * Allocate Tx buffers as many as possible.
641 * Leave DP_IPA_WAR_WBM2SW_REL_RING_NO_BUF_ENTRIES empty
642 * Populate Tx buffers into WBM2IPA ring
643 * This initial buffer population will simulate H/W as source ring,
644 * and update HP
645 */
646 for (tx_buffer_count = 0;
647 tx_buffer_count < max_alloc_count - 1; tx_buffer_count++) {
648 nbuf = qdf_nbuf_frag_alloc(soc->osdev, alloc_size, 0,
649 256, FALSE);
650 if (!nbuf)
651 break;
652
653 ring_entry = hal_srng_dst_get_next_hp(
654 soc->hal_soc,
655 hal_srng_to_hal_ring_handle(wbm_srng));
656 if (!ring_entry) {
657 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
658 "%s: Failed to get WBM ring entry",
659 __func__);
660 qdf_nbuf_free(nbuf);
661 break;
662 }
663
664 qdf_nbuf_map_single(soc->osdev, nbuf,
665 QDF_DMA_BIDIRECTIONAL);
666 buffer_paddr = qdf_nbuf_get_frag_paddr(nbuf, 0);
667 qdf_mem_dp_tx_skb_cnt_inc();
668 qdf_mem_dp_tx_skb_inc(qdf_nbuf_get_end_offset(nbuf));
669
670 hal_rxdma_buff_addr_info_set(soc->hal_soc, ring_entry,
671 buffer_paddr, 0, wbm_bm_id);
672
673 soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned[
674 tx_buffer_count] = (void *)nbuf;
675 }
676
677 hal_srng_access_end_unlocked(soc->hal_soc,
678 hal_srng_to_hal_ring_handle(wbm_srng));
679
680 soc->ipa_uc_tx_rsc_alt.alloc_tx_buf_cnt = tx_buffer_count;
681
682 if (tx_buffer_count) {
683 dp_info("IPA TX buffer pool2: %d allocated", tx_buffer_count);
684 } else {
685 dp_err("Failed to allocate IPA TX buffer pool2");
686 qdf_mem_free(
687 soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned);
688 soc->ipa_uc_tx_rsc_alt.tx_buf_pool_vaddr_unaligned = NULL;
689 retval = -ENOMEM;
690 }
691
692 return retval;
693 }
694
dp_ipa_tx_alt_ring_get_resource(struct dp_pdev * pdev)695 static QDF_STATUS dp_ipa_tx_alt_ring_get_resource(struct dp_pdev *pdev)
696 {
697 struct dp_soc *soc = pdev->soc;
698 struct dp_ipa_resources *ipa_res = &pdev->ipa_resource;
699
700 ipa_res->tx_alt_ring_num_alloc_buffer =
701 (uint32_t)soc->ipa_uc_tx_rsc_alt.alloc_tx_buf_cnt;
702
703 dp_ipa_get_shared_mem_info(
704 soc->osdev, &ipa_res->tx_alt_ring,
705 soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_base_vaddr,
706 soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_base_paddr,
707 soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_size);
708
709 dp_ipa_get_shared_mem_info(
710 soc->osdev, &ipa_res->tx_alt_comp_ring,
711 soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_base_vaddr,
712 soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_base_paddr,
713 soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_size);
714
715 if (!qdf_mem_get_dma_addr(soc->osdev,
716 &ipa_res->tx_alt_comp_ring.mem_info))
717 return QDF_STATUS_E_FAILURE;
718
719 return QDF_STATUS_SUCCESS;
720 }
721
dp_ipa_tx_alt_ring_resource_setup(struct dp_soc * soc)722 static void dp_ipa_tx_alt_ring_resource_setup(struct dp_soc *soc)
723 {
724 struct hal_soc *hal_soc = (struct hal_soc *)soc->hal_soc;
725 struct hal_srng *hal_srng;
726 struct hal_srng_params srng_params;
727 unsigned long addr_offset, dev_base_paddr;
728
729 /* IPA TCL_DATA Alternative Ring - HAL_SRNG_SW2TCL2 */
730 hal_srng = (struct hal_srng *)
731 soc->tcl_data_ring[IPA_TX_ALT_RING_IDX].hal_srng;
732 hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc),
733 hal_srng_to_hal_ring_handle(hal_srng),
734 &srng_params);
735
736 soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_base_paddr =
737 srng_params.ring_base_paddr;
738 soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_base_vaddr =
739 srng_params.ring_base_vaddr;
740 soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_size =
741 (srng_params.num_entries * srng_params.entry_size) << 2;
742 /*
743 * For the register backed memory addresses, use the scn->mem_pa to
744 * calculate the physical address of the shadow registers
745 */
746 dev_base_paddr =
747 (unsigned long)
748 ((struct hif_softc *)(hal_soc->hif_handle))->mem_pa;
749 addr_offset = (unsigned long)(hal_srng->u.src_ring.hp_addr) -
750 (unsigned long)(hal_soc->dev_base_addr);
751 soc->ipa_uc_tx_rsc_alt.ipa_tcl_hp_paddr =
752 (qdf_dma_addr_t)(addr_offset + dev_base_paddr);
753
754 dp_info("IPA TCL_DATA Alt Ring addr_offset=%x, dev_base_paddr=%x, hp_paddr=%x paddr=%pK vaddr=%pK size= %u(%u bytes)",
755 (unsigned int)addr_offset,
756 (unsigned int)dev_base_paddr,
757 (unsigned int)(soc->ipa_uc_tx_rsc_alt.ipa_tcl_hp_paddr),
758 (void *)soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_base_paddr,
759 (void *)soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_base_vaddr,
760 srng_params.num_entries,
761 soc->ipa_uc_tx_rsc_alt.ipa_tcl_ring_size);
762
763 /* IPA TX Alternative COMP Ring - HAL_SRNG_WBM2SW4_RELEASE */
764 hal_srng = (struct hal_srng *)
765 soc->tx_comp_ring[IPA_TX_ALT_COMP_RING_IDX].hal_srng;
766 hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc),
767 hal_srng_to_hal_ring_handle(hal_srng),
768 &srng_params);
769
770 soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_base_paddr =
771 srng_params.ring_base_paddr;
772 soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_base_vaddr =
773 srng_params.ring_base_vaddr;
774 soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_size =
775 (srng_params.num_entries * srng_params.entry_size) << 2;
776 soc->ipa_uc_tx_rsc_alt.ipa_wbm_hp_shadow_paddr =
777 hal_srng_get_hp_addr(hal_soc_to_hal_soc_handle(hal_soc),
778 hal_srng_to_hal_ring_handle(hal_srng));
779 addr_offset = (unsigned long)(hal_srng->u.dst_ring.tp_addr) -
780 (unsigned long)(hal_soc->dev_base_addr);
781 soc->ipa_uc_tx_rsc_alt.ipa_wbm_tp_paddr =
782 (qdf_dma_addr_t)(addr_offset + dev_base_paddr);
783
784 dp_info("IPA TX Alt COMP Ring addr_offset=%x, dev_base_paddr=%x, ipa_wbm_tp_paddr=%x paddr=%pK vaddr=0%pK size= %u(%u bytes)",
785 (unsigned int)addr_offset,
786 (unsigned int)dev_base_paddr,
787 (unsigned int)(soc->ipa_uc_tx_rsc_alt.ipa_wbm_tp_paddr),
788 (void *)soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_base_paddr,
789 (void *)soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_base_vaddr,
790 srng_params.num_entries,
791 soc->ipa_uc_tx_rsc_alt.ipa_wbm_ring_size);
792 }
793
dp_ipa_map_ring_doorbell_paddr(struct dp_pdev * pdev)794 static void dp_ipa_map_ring_doorbell_paddr(struct dp_pdev *pdev)
795 {
796 struct dp_ipa_resources *ipa_res = &pdev->ipa_resource;
797 uint32_t rx_ready_doorbell_dmaaddr;
798 uint32_t tx_comp_doorbell_dmaaddr;
799 struct dp_soc *soc = pdev->soc;
800 int ret = 0;
801
802 if (ipa_res->is_db_ddr_mapped)
803 ipa_res->tx_comp_doorbell_vaddr =
804 phys_to_virt(ipa_res->tx_comp_doorbell_paddr);
805 else
806 ipa_res->tx_comp_doorbell_vaddr =
807 ioremap(ipa_res->tx_comp_doorbell_paddr, 4);
808
809 if (qdf_mem_smmu_s1_enabled(soc->osdev)) {
810 ret = pld_smmu_map(soc->osdev->dev,
811 ipa_res->tx_comp_doorbell_paddr,
812 &tx_comp_doorbell_dmaaddr,
813 sizeof(uint32_t));
814 ipa_res->tx_comp_doorbell_paddr = tx_comp_doorbell_dmaaddr;
815 qdf_assert_always(!ret);
816
817 ret = pld_smmu_map(soc->osdev->dev,
818 ipa_res->rx_ready_doorbell_paddr,
819 &rx_ready_doorbell_dmaaddr,
820 sizeof(uint32_t));
821 ipa_res->rx_ready_doorbell_paddr = rx_ready_doorbell_dmaaddr;
822 qdf_assert_always(!ret);
823 }
824
825 /* Setup for alternative TX pipe */
826 if (!ipa_res->tx_alt_comp_doorbell_paddr)
827 return;
828
829 if (ipa_res->is_db_ddr_mapped)
830 ipa_res->tx_alt_comp_doorbell_vaddr =
831 phys_to_virt(ipa_res->tx_alt_comp_doorbell_paddr);
832 else
833 ipa_res->tx_alt_comp_doorbell_vaddr =
834 ioremap(ipa_res->tx_alt_comp_doorbell_paddr, 4);
835
836 if (qdf_mem_smmu_s1_enabled(soc->osdev)) {
837 ret = pld_smmu_map(soc->osdev->dev,
838 ipa_res->tx_alt_comp_doorbell_paddr,
839 &tx_comp_doorbell_dmaaddr,
840 sizeof(uint32_t));
841 ipa_res->tx_alt_comp_doorbell_paddr = tx_comp_doorbell_dmaaddr;
842 qdf_assert_always(!ret);
843 }
844 }
845
dp_ipa_unmap_ring_doorbell_paddr(struct dp_pdev * pdev)846 static void dp_ipa_unmap_ring_doorbell_paddr(struct dp_pdev *pdev)
847 {
848 struct dp_ipa_resources *ipa_res = &pdev->ipa_resource;
849 struct dp_soc *soc = pdev->soc;
850 int ret = 0;
851
852 if (!qdf_mem_smmu_s1_enabled(soc->osdev))
853 return;
854
855 /* Unmap must be in reverse order of map */
856 if (ipa_res->tx_alt_comp_doorbell_paddr) {
857 ret = pld_smmu_unmap(soc->osdev->dev,
858 ipa_res->tx_alt_comp_doorbell_paddr,
859 sizeof(uint32_t));
860 qdf_assert_always(!ret);
861 }
862
863 ret = pld_smmu_unmap(soc->osdev->dev,
864 ipa_res->rx_ready_doorbell_paddr,
865 sizeof(uint32_t));
866 qdf_assert_always(!ret);
867
868 ret = pld_smmu_unmap(soc->osdev->dev,
869 ipa_res->tx_comp_doorbell_paddr,
870 sizeof(uint32_t));
871 qdf_assert_always(!ret);
872 }
873
dp_ipa_tx_alt_buf_smmu_mapping(struct dp_soc * soc,struct dp_pdev * pdev,bool create,const char * func,uint32_t line)874 static QDF_STATUS dp_ipa_tx_alt_buf_smmu_mapping(struct dp_soc *soc,
875 struct dp_pdev *pdev,
876 bool create, const char *func,
877 uint32_t line)
878 {
879 QDF_STATUS ret = QDF_STATUS_SUCCESS;
880 struct ipa_dp_tx_rsc *rsc;
881 uint32_t tx_buffer_cnt;
882 uint32_t buf_len;
883 qdf_nbuf_t nbuf;
884 uint32_t index;
885
886 if (!ipa_is_ready()) {
887 dp_info("IPA is not READY");
888 return QDF_STATUS_SUCCESS;
889 }
890
891 rsc = &soc->ipa_uc_tx_rsc_alt;
892 tx_buffer_cnt = rsc->alloc_tx_buf_cnt;
893
894 for (index = 0; index < tx_buffer_cnt; index++) {
895 nbuf = (qdf_nbuf_t)rsc->tx_buf_pool_vaddr_unaligned[index];
896 if (!nbuf)
897 continue;
898
899 buf_len = qdf_nbuf_get_data_len(nbuf);
900 ret = __dp_ipa_handle_buf_smmu_mapping(soc, nbuf, buf_len,
901 create, func, line);
902 }
903
904 return ret;
905 }
906
dp_ipa_wdi_tx_alt_pipe_params(struct dp_soc * soc,struct dp_ipa_resources * ipa_res,qdf_ipa_wdi_pipe_setup_info_t * tx)907 static void dp_ipa_wdi_tx_alt_pipe_params(struct dp_soc *soc,
908 struct dp_ipa_resources *ipa_res,
909 qdf_ipa_wdi_pipe_setup_info_t *tx)
910 {
911 QDF_IPA_WDI_SETUP_INFO_CLIENT(tx) = IPA_CLIENT_WLAN2_CONS1;
912
913 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(tx) =
914 qdf_mem_get_dma_addr(soc->osdev,
915 &ipa_res->tx_alt_comp_ring.mem_info);
916 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(tx) =
917 qdf_mem_get_dma_size(soc->osdev,
918 &ipa_res->tx_alt_comp_ring.mem_info);
919
920 /* WBM Tail Pointer Address */
921 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(tx) =
922 soc->ipa_uc_tx_rsc_alt.ipa_wbm_tp_paddr;
923 QDF_IPA_WDI_SETUP_INFO_IS_TXR_RN_DB_PCIE_ADDR(tx) = true;
924
925 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(tx) =
926 qdf_mem_get_dma_addr(soc->osdev,
927 &ipa_res->tx_alt_ring.mem_info);
928 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(tx) =
929 qdf_mem_get_dma_size(soc->osdev,
930 &ipa_res->tx_alt_ring.mem_info);
931
932 /* TCL Head Pointer Address */
933 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(tx) =
934 soc->ipa_uc_tx_rsc_alt.ipa_tcl_hp_paddr;
935 QDF_IPA_WDI_SETUP_INFO_IS_EVT_RN_DB_PCIE_ADDR(tx) = true;
936
937 QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(tx) =
938 ipa_res->tx_alt_ring_num_alloc_buffer;
939
940 QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(tx) = 0;
941
942 dp_ipa_setup_tx_params_bank_id(soc, tx);
943
944 /* Set Pmac ID, extract pmac_id from second radio for TX_ALT ring */
945 dp_ipa_setup_tx_alt_params_pmac_id(soc, tx);
946 }
947
948 static void
dp_ipa_wdi_tx_alt_pipe_smmu_params(struct dp_soc * soc,struct dp_ipa_resources * ipa_res,qdf_ipa_wdi_pipe_setup_info_smmu_t * tx_smmu)949 dp_ipa_wdi_tx_alt_pipe_smmu_params(struct dp_soc *soc,
950 struct dp_ipa_resources *ipa_res,
951 qdf_ipa_wdi_pipe_setup_info_smmu_t *tx_smmu)
952 {
953 QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(tx_smmu) = IPA_CLIENT_WLAN2_CONS1;
954
955 qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_BASE(tx_smmu),
956 &ipa_res->tx_alt_comp_ring.sgtable,
957 sizeof(sgtable_t));
958 QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_SIZE(tx_smmu) =
959 qdf_mem_get_dma_size(soc->osdev,
960 &ipa_res->tx_alt_comp_ring.mem_info);
961 /* WBM Tail Pointer Address */
962 QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_DOORBELL_PA(tx_smmu) =
963 soc->ipa_uc_tx_rsc_alt.ipa_wbm_tp_paddr;
964 QDF_IPA_WDI_SETUP_INFO_SMMU_IS_TXR_RN_DB_PCIE_ADDR(tx_smmu) = true;
965
966 qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_BASE(tx_smmu),
967 &ipa_res->tx_alt_ring.sgtable,
968 sizeof(sgtable_t));
969 QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_SIZE(tx_smmu) =
970 qdf_mem_get_dma_size(soc->osdev,
971 &ipa_res->tx_alt_ring.mem_info);
972 /* TCL Head Pointer Address */
973 QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_DOORBELL_PA(tx_smmu) =
974 soc->ipa_uc_tx_rsc_alt.ipa_tcl_hp_paddr;
975 QDF_IPA_WDI_SETUP_INFO_SMMU_IS_EVT_RN_DB_PCIE_ADDR(tx_smmu) = true;
976
977 QDF_IPA_WDI_SETUP_INFO_SMMU_NUM_PKT_BUFFERS(tx_smmu) =
978 ipa_res->tx_alt_ring_num_alloc_buffer;
979 QDF_IPA_WDI_SETUP_INFO_SMMU_PKT_OFFSET(tx_smmu) = 0;
980
981 dp_ipa_setup_tx_smmu_params_bank_id(soc, tx_smmu);
982
983 /* Set Pmac ID, extract pmac_id from second radio for TX_ALT ring */
984 dp_ipa_setup_tx_alt_smmu_params_pmac_id(soc, tx_smmu);
985 }
986
dp_ipa_setup_tx_alt_pipe(struct dp_soc * soc,struct dp_ipa_resources * res,qdf_ipa_wdi_conn_in_params_t * in)987 static void dp_ipa_setup_tx_alt_pipe(struct dp_soc *soc,
988 struct dp_ipa_resources *res,
989 qdf_ipa_wdi_conn_in_params_t *in)
990 {
991 qdf_ipa_wdi_pipe_setup_info_smmu_t *tx_smmu = NULL;
992 qdf_ipa_wdi_pipe_setup_info_t *tx = NULL;
993 qdf_ipa_ep_cfg_t *tx_cfg;
994
995 QDF_IPA_WDI_CONN_IN_PARAMS_IS_TX1_USED(in) = true;
996
997 if (qdf_mem_smmu_s1_enabled(soc->osdev)) {
998 tx_smmu = &QDF_IPA_WDI_CONN_IN_PARAMS_TX_ALT_PIPE_SMMU(in);
999 tx_cfg = &QDF_IPA_WDI_SETUP_INFO_SMMU_EP_CFG(tx_smmu);
1000 dp_ipa_wdi_tx_alt_pipe_smmu_params(soc, res, tx_smmu);
1001 } else {
1002 tx = &QDF_IPA_WDI_CONN_IN_PARAMS_TX_ALT_PIPE(in);
1003 tx_cfg = &QDF_IPA_WDI_SETUP_INFO_SMMU_EP_CFG(tx);
1004 dp_ipa_wdi_tx_alt_pipe_params(soc, res, tx);
1005 }
1006
1007 QDF_IPA_EP_CFG_NAT_EN(tx_cfg) = IPA_BYPASS_NAT;
1008 QDF_IPA_EP_CFG_HDR_LEN(tx_cfg) = DP_IPA_UC_WLAN_TX_HDR_LEN;
1009 QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE_VALID(tx_cfg) = 0;
1010 QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE(tx_cfg) = 0;
1011 QDF_IPA_EP_CFG_HDR_ADDITIONAL_CONST_LEN(tx_cfg) = 0;
1012 QDF_IPA_EP_CFG_MODE(tx_cfg) = IPA_BASIC;
1013 QDF_IPA_EP_CFG_HDR_LITTLE_ENDIAN(tx_cfg) = true;
1014 }
1015
dp_ipa_set_pipe_db(struct dp_ipa_resources * res,qdf_ipa_wdi_conn_out_params_t * out)1016 static void dp_ipa_set_pipe_db(struct dp_ipa_resources *res,
1017 qdf_ipa_wdi_conn_out_params_t *out)
1018 {
1019 res->tx_comp_doorbell_paddr =
1020 QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(out);
1021 res->rx_ready_doorbell_paddr =
1022 QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(out);
1023 res->tx_alt_comp_doorbell_paddr =
1024 QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_ALT_DB_PA(out);
1025 }
1026
dp_ipa_setup_iface_session_id(qdf_ipa_wdi_reg_intf_in_params_t * in,uint8_t session_id)1027 static void dp_ipa_setup_iface_session_id(qdf_ipa_wdi_reg_intf_in_params_t *in,
1028 uint8_t session_id)
1029 {
1030 bool is_2g_iface = session_id & IPA_SESSION_ID_SHIFT;
1031
1032 session_id = session_id >> IPA_SESSION_ID_SHIFT;
1033 dp_debug("session_id %u is_2g_iface %d", session_id, is_2g_iface);
1034
1035 QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA(in) = htonl(session_id << 16);
1036 QDF_IPA_WDI_REG_INTF_IN_PARAMS_IS_TX1_USED(in) = is_2g_iface;
1037 }
1038
dp_ipa_tx_comp_ring_init_hp(struct dp_soc * soc,struct dp_ipa_resources * res)1039 static void dp_ipa_tx_comp_ring_init_hp(struct dp_soc *soc,
1040 struct dp_ipa_resources *res)
1041 {
1042 struct hal_srng *wbm_srng;
1043
1044 /* Init first TX comp ring */
1045 wbm_srng = (struct hal_srng *)
1046 soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
1047
1048 hal_srng_dst_init_hp(soc->hal_soc, wbm_srng,
1049 res->tx_comp_doorbell_vaddr);
1050
1051 /* Init the alternate TX comp ring */
1052 if (!res->tx_alt_comp_doorbell_paddr)
1053 return;
1054
1055 wbm_srng = (struct hal_srng *)
1056 soc->tx_comp_ring[IPA_TX_ALT_COMP_RING_IDX].hal_srng;
1057
1058 hal_srng_dst_init_hp(soc->hal_soc, wbm_srng,
1059 res->tx_alt_comp_doorbell_vaddr);
1060 }
1061
1062 static void
dp_ipa_tx_comp_ring_update_hp_addr(struct dp_soc * soc,struct dp_ipa_resources * res)1063 dp_ipa_tx_comp_ring_update_hp_addr(struct dp_soc *soc,
1064 struct dp_ipa_resources *res)
1065 {
1066 hal_ring_handle_t wbm_srng;
1067
1068 /* Ring doorbell to WBM2IPA ring with current HW HP value */
1069 wbm_srng = soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
1070 hal_srng_dst_update_hp_addr(soc->hal_soc, wbm_srng);
1071
1072 if (!res->tx_alt_comp_doorbell_paddr)
1073 return;
1074
1075 wbm_srng = soc->tx_comp_ring[IPA_TX_ALT_COMP_RING_IDX].hal_srng;
1076 hal_srng_dst_update_hp_addr(soc->hal_soc, wbm_srng);
1077 }
1078
dp_ipa_set_tx_doorbell_paddr(struct dp_soc * soc,struct dp_ipa_resources * ipa_res)1079 static void dp_ipa_set_tx_doorbell_paddr(struct dp_soc *soc,
1080 struct dp_ipa_resources *ipa_res)
1081 {
1082 struct hal_srng *wbm_srng;
1083
1084 wbm_srng = (struct hal_srng *)
1085 soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
1086
1087 hal_srng_dst_set_hp_paddr_confirm(wbm_srng,
1088 ipa_res->tx_comp_doorbell_paddr);
1089
1090 dp_info("paddr %pK vaddr %pK",
1091 (void *)ipa_res->tx_comp_doorbell_paddr,
1092 (void *)ipa_res->tx_comp_doorbell_vaddr);
1093
1094 /* Setup for alternative TX comp ring */
1095 if (!ipa_res->tx_alt_comp_doorbell_paddr)
1096 return;
1097
1098 wbm_srng = (struct hal_srng *)
1099 soc->tx_comp_ring[IPA_TX_ALT_COMP_RING_IDX].hal_srng;
1100
1101 hal_srng_dst_set_hp_paddr_confirm(wbm_srng,
1102 ipa_res->tx_alt_comp_doorbell_paddr);
1103
1104 dp_info("paddr %pK vaddr %pK",
1105 (void *)ipa_res->tx_alt_comp_doorbell_paddr,
1106 (void *)ipa_res->tx_alt_comp_doorbell_vaddr);
1107 }
1108
1109 #ifdef IPA_SET_RESET_TX_DB_PA
dp_ipa_reset_tx_doorbell_pa(struct dp_soc * soc,struct dp_ipa_resources * ipa_res)1110 static QDF_STATUS dp_ipa_reset_tx_doorbell_pa(struct dp_soc *soc,
1111 struct dp_ipa_resources *ipa_res)
1112 {
1113 hal_ring_handle_t wbm_srng;
1114 qdf_dma_addr_t hp_addr;
1115
1116 wbm_srng = soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
1117 if (!wbm_srng)
1118 return QDF_STATUS_E_FAILURE;
1119
1120 hp_addr = soc->ipa_uc_tx_rsc.ipa_wbm_hp_shadow_paddr;
1121
1122 hal_srng_dst_set_hp_paddr_confirm((struct hal_srng *)wbm_srng, hp_addr);
1123
1124 dp_info("Reset WBM HP addr paddr: %pK", (void *)hp_addr);
1125
1126 /* Reset alternative TX comp ring */
1127 wbm_srng = soc->tx_comp_ring[IPA_TX_ALT_COMP_RING_IDX].hal_srng;
1128 if (!wbm_srng)
1129 return QDF_STATUS_E_FAILURE;
1130
1131 hp_addr = soc->ipa_uc_tx_rsc_alt.ipa_wbm_hp_shadow_paddr;
1132
1133 hal_srng_dst_set_hp_paddr_confirm((struct hal_srng *)wbm_srng, hp_addr);
1134
1135 dp_info("Reset WBM HP addr paddr: %pK", (void *)hp_addr);
1136
1137 return QDF_STATUS_SUCCESS;
1138 }
1139 #endif /* IPA_SET_RESET_TX_DB_PA */
1140
1141 #else /* !IPA_WDI3_TX_TWO_PIPES */
1142
1143 static inline
dp_ipa_tx_alt_pool_detach(struct dp_soc * soc,struct dp_pdev * pdev)1144 void dp_ipa_tx_alt_pool_detach(struct dp_soc *soc, struct dp_pdev *pdev)
1145 {
1146 }
1147
dp_ipa_tx_alt_ring_resource_setup(struct dp_soc * soc)1148 static inline void dp_ipa_tx_alt_ring_resource_setup(struct dp_soc *soc)
1149 {
1150 }
1151
dp_ipa_tx_alt_pool_attach(struct dp_soc * soc)1152 static inline int dp_ipa_tx_alt_pool_attach(struct dp_soc *soc)
1153 {
1154 return 0;
1155 }
1156
dp_ipa_tx_alt_ring_get_resource(struct dp_pdev * pdev)1157 static inline QDF_STATUS dp_ipa_tx_alt_ring_get_resource(struct dp_pdev *pdev)
1158 {
1159 return QDF_STATUS_SUCCESS;
1160 }
1161
dp_ipa_map_ring_doorbell_paddr(struct dp_pdev * pdev)1162 static void dp_ipa_map_ring_doorbell_paddr(struct dp_pdev *pdev)
1163 {
1164 struct dp_ipa_resources *ipa_res = &pdev->ipa_resource;
1165 uint32_t rx_ready_doorbell_dmaaddr;
1166 uint32_t tx_comp_doorbell_dmaaddr;
1167 struct dp_soc *soc = pdev->soc;
1168 int ret = 0;
1169
1170 if (ipa_res->is_db_ddr_mapped)
1171 ipa_res->tx_comp_doorbell_vaddr =
1172 phys_to_virt(ipa_res->tx_comp_doorbell_paddr);
1173 else
1174 ipa_res->tx_comp_doorbell_vaddr =
1175 ioremap(ipa_res->tx_comp_doorbell_paddr, 4);
1176
1177 if (qdf_mem_smmu_s1_enabled(soc->osdev)) {
1178 ret = pld_smmu_map(soc->osdev->dev,
1179 ipa_res->tx_comp_doorbell_paddr,
1180 &tx_comp_doorbell_dmaaddr,
1181 sizeof(uint32_t));
1182 ipa_res->tx_comp_doorbell_paddr = tx_comp_doorbell_dmaaddr;
1183 qdf_assert_always(!ret);
1184
1185 ret = pld_smmu_map(soc->osdev->dev,
1186 ipa_res->rx_ready_doorbell_paddr,
1187 &rx_ready_doorbell_dmaaddr,
1188 sizeof(uint32_t));
1189 ipa_res->rx_ready_doorbell_paddr = rx_ready_doorbell_dmaaddr;
1190 qdf_assert_always(!ret);
1191 }
1192 }
1193
dp_ipa_unmap_ring_doorbell_paddr(struct dp_pdev * pdev)1194 static inline void dp_ipa_unmap_ring_doorbell_paddr(struct dp_pdev *pdev)
1195 {
1196 struct dp_ipa_resources *ipa_res = &pdev->ipa_resource;
1197 struct dp_soc *soc = pdev->soc;
1198 int ret = 0;
1199
1200 if (!qdf_mem_smmu_s1_enabled(soc->osdev))
1201 return;
1202
1203 ret = pld_smmu_unmap(soc->osdev->dev,
1204 ipa_res->rx_ready_doorbell_paddr,
1205 sizeof(uint32_t));
1206 qdf_assert_always(!ret);
1207
1208 ret = pld_smmu_unmap(soc->osdev->dev,
1209 ipa_res->tx_comp_doorbell_paddr,
1210 sizeof(uint32_t));
1211 qdf_assert_always(!ret);
1212 }
1213
dp_ipa_tx_alt_buf_smmu_mapping(struct dp_soc * soc,struct dp_pdev * pdev,bool create,const char * func,uint32_t line)1214 static inline QDF_STATUS dp_ipa_tx_alt_buf_smmu_mapping(struct dp_soc *soc,
1215 struct dp_pdev *pdev,
1216 bool create,
1217 const char *func,
1218 uint32_t line)
1219 {
1220 return QDF_STATUS_SUCCESS;
1221 }
1222
1223 static inline
dp_ipa_setup_tx_alt_pipe(struct dp_soc * soc,struct dp_ipa_resources * res,qdf_ipa_wdi_conn_in_params_t * in)1224 void dp_ipa_setup_tx_alt_pipe(struct dp_soc *soc, struct dp_ipa_resources *res,
1225 qdf_ipa_wdi_conn_in_params_t *in)
1226 {
1227 }
1228
dp_ipa_set_pipe_db(struct dp_ipa_resources * res,qdf_ipa_wdi_conn_out_params_t * out)1229 static void dp_ipa_set_pipe_db(struct dp_ipa_resources *res,
1230 qdf_ipa_wdi_conn_out_params_t *out)
1231 {
1232 res->tx_comp_doorbell_paddr =
1233 QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(out);
1234 res->rx_ready_doorbell_paddr =
1235 QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(out);
1236 }
1237
1238 #ifdef IPA_WDS_EASYMESH_FEATURE
1239 /**
1240 * dp_ipa_setup_iface_session_id() - Pass vdev id to IPA
1241 * @in: ipa in params
1242 * @session_id: vdev id
1243 *
1244 * Pass Vdev id to IPA, IPA metadata order is changed and vdev id
1245 * is stored at higher nibble so, no shift is required.
1246 *
1247 * Return: none
1248 */
dp_ipa_setup_iface_session_id(qdf_ipa_wdi_reg_intf_in_params_t * in,uint8_t session_id)1249 static void dp_ipa_setup_iface_session_id(qdf_ipa_wdi_reg_intf_in_params_t *in,
1250 uint8_t session_id)
1251 {
1252 if (ucfg_ipa_is_wds_enabled())
1253 QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA(in) = htonl(session_id);
1254 else
1255 QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA(in) = htonl(session_id << 16);
1256 }
1257 #else
dp_ipa_setup_iface_session_id(qdf_ipa_wdi_reg_intf_in_params_t * in,uint8_t session_id)1258 static void dp_ipa_setup_iface_session_id(qdf_ipa_wdi_reg_intf_in_params_t *in,
1259 uint8_t session_id)
1260 {
1261 QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA(in) = htonl(session_id << 16);
1262 }
1263 #endif
1264
dp_ipa_tx_comp_ring_init_hp(struct dp_soc * soc,struct dp_ipa_resources * res)1265 static inline void dp_ipa_tx_comp_ring_init_hp(struct dp_soc *soc,
1266 struct dp_ipa_resources *res)
1267 {
1268 struct hal_srng *wbm_srng = (struct hal_srng *)
1269 soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
1270
1271 hal_srng_dst_init_hp(soc->hal_soc, wbm_srng,
1272 res->tx_comp_doorbell_vaddr);
1273 }
1274
1275 static void
dp_ipa_tx_comp_ring_update_hp_addr(struct dp_soc * soc,struct dp_ipa_resources * res)1276 dp_ipa_tx_comp_ring_update_hp_addr(struct dp_soc *soc,
1277 struct dp_ipa_resources *res)
1278 {
1279 hal_ring_handle_t wbm_srng;
1280
1281 /* Ring doorbell to WBM2IPA ring with current HW HP value */
1282 wbm_srng = soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
1283 hal_srng_dst_update_hp_addr(soc->hal_soc, wbm_srng);
1284 }
1285
dp_ipa_set_tx_doorbell_paddr(struct dp_soc * soc,struct dp_ipa_resources * ipa_res)1286 static void dp_ipa_set_tx_doorbell_paddr(struct dp_soc *soc,
1287 struct dp_ipa_resources *ipa_res)
1288 {
1289 struct hal_srng *wbm_srng = (struct hal_srng *)
1290 soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
1291
1292 hal_srng_dst_set_hp_paddr_confirm(wbm_srng,
1293 ipa_res->tx_comp_doorbell_paddr);
1294
1295 dp_info("paddr %pK vaddr %pK",
1296 (void *)ipa_res->tx_comp_doorbell_paddr,
1297 (void *)ipa_res->tx_comp_doorbell_vaddr);
1298 }
1299
1300 #ifdef IPA_SET_RESET_TX_DB_PA
dp_ipa_reset_tx_doorbell_pa(struct dp_soc * soc,struct dp_ipa_resources * ipa_res)1301 static QDF_STATUS dp_ipa_reset_tx_doorbell_pa(struct dp_soc *soc,
1302 struct dp_ipa_resources *ipa_res)
1303 {
1304 hal_ring_handle_t wbm_srng =
1305 soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
1306 qdf_dma_addr_t hp_addr;
1307
1308 if (!wbm_srng)
1309 return QDF_STATUS_E_FAILURE;
1310
1311 hp_addr = soc->ipa_uc_tx_rsc.ipa_wbm_hp_shadow_paddr;
1312
1313 hal_srng_dst_set_hp_paddr_confirm((struct hal_srng *)wbm_srng, hp_addr);
1314
1315 dp_info("Reset WBM HP addr paddr: %pK", (void *)hp_addr);
1316
1317 return QDF_STATUS_SUCCESS;
1318 }
1319 #endif /* IPA_SET_RESET_TX_DB_PA */
1320
1321 #endif /* IPA_WDI3_TX_TWO_PIPES */
1322
1323 /**
1324 * dp_tx_ipa_uc_detach() - Free autonomy TX resources
1325 * @soc: data path instance
1326 * @pdev: core txrx pdev context
1327 *
1328 * Free allocated TX buffers with WBM SRNG
1329 *
1330 * Return: none
1331 */
dp_tx_ipa_uc_detach(struct dp_soc * soc,struct dp_pdev * pdev)1332 static void dp_tx_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev)
1333 {
1334 int idx;
1335 qdf_nbuf_t nbuf;
1336 struct dp_ipa_resources *ipa_res;
1337
1338 for (idx = 0; idx < soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt; idx++) {
1339 nbuf = (qdf_nbuf_t)
1340 soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned[idx];
1341 if (!nbuf)
1342 continue;
1343 qdf_nbuf_unmap_single(soc->osdev, nbuf, QDF_DMA_BIDIRECTIONAL);
1344 qdf_mem_dp_tx_skb_cnt_dec();
1345 qdf_mem_dp_tx_skb_dec(qdf_nbuf_get_end_offset(nbuf));
1346 qdf_nbuf_free(nbuf);
1347 soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned[idx] =
1348 (void *)NULL;
1349 }
1350
1351 qdf_mem_free(soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned);
1352 soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned = NULL;
1353
1354 ipa_res = &pdev->ipa_resource;
1355
1356 qdf_mem_free_sgtable(&ipa_res->tx_ring.sgtable);
1357 qdf_mem_free_sgtable(&ipa_res->tx_comp_ring.sgtable);
1358 }
1359
1360 /**
1361 * dp_rx_ipa_uc_detach() - free autonomy RX resources
1362 * @soc: data path instance
1363 * @pdev: core txrx pdev context
1364 *
1365 * This function will detach DP RX into main device context
1366 * will free DP Rx resources.
1367 *
1368 * Return: none
1369 */
dp_rx_ipa_uc_detach(struct dp_soc * soc,struct dp_pdev * pdev)1370 static void dp_rx_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev)
1371 {
1372 struct dp_ipa_resources *ipa_res = &pdev->ipa_resource;
1373
1374 qdf_mem_free_sgtable(&ipa_res->rx_rdy_ring.sgtable);
1375 qdf_mem_free_sgtable(&ipa_res->rx_refill_ring.sgtable);
1376 }
1377
1378 /**
1379 * dp_rx_alt_ipa_uc_detach() - free autonomy RX resources
1380 * @soc: data path instance
1381 * @pdev: core txrx pdev context
1382 *
1383 * This function will detach DP RX into main device context
1384 * will free DP Rx resources.
1385 *
1386 * Return: none
1387 */
1388 #ifdef IPA_WDI3_VLAN_SUPPORT
dp_rx_alt_ipa_uc_detach(struct dp_soc * soc,struct dp_pdev * pdev)1389 static void dp_rx_alt_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev)
1390 {
1391 struct dp_ipa_resources *ipa_res = &pdev->ipa_resource;
1392
1393 if (!wlan_ipa_is_vlan_enabled())
1394 return;
1395
1396 qdf_mem_free_sgtable(&ipa_res->rx_alt_rdy_ring.sgtable);
1397 qdf_mem_free_sgtable(&ipa_res->rx_alt_refill_ring.sgtable);
1398 }
1399 #else
1400 static inline
dp_rx_alt_ipa_uc_detach(struct dp_soc * soc,struct dp_pdev * pdev)1401 void dp_rx_alt_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev)
1402 { }
1403 #endif
1404
1405 /**
1406 * dp_ipa_opt_wifi_dp_cleanup() - Cleanup ipa opt wifi dp filter setup
1407 * @soc: data path instance
1408 * @pdev: core txrx pdev context
1409 *
1410 * This function will cleanup filter setup for optional wifi dp.
1411 *
1412 * Return: none
1413 */
1414
1415 #ifdef IPA_OPT_WIFI_DP
dp_ipa_opt_wifi_dp_cleanup(struct dp_soc * soc,struct dp_pdev * pdev)1416 static void dp_ipa_opt_wifi_dp_cleanup(struct dp_soc *soc, struct dp_pdev *pdev)
1417 {
1418 struct hal_soc *hal_soc = (struct hal_soc *)soc->hal_soc;
1419 struct hif_softc *hif = (struct hif_softc *)(hal_soc->hif_handle);
1420 int count = qdf_atomic_read(&hif->opt_wifi_dp_rtpm_cnt);
1421 int i;
1422
1423 for (i = count; i > 0; i--) {
1424 dp_info("opt_dp: cleanup call pcie link down");
1425 dp_ipa_pcie_link_down((struct cdp_soc_t *)soc);
1426 }
1427 }
1428 #else
1429 static inline
dp_ipa_opt_wifi_dp_cleanup(struct dp_soc * soc,struct dp_pdev * pdev)1430 void dp_ipa_opt_wifi_dp_cleanup(struct dp_soc *soc, struct dp_pdev *pdev)
1431 {
1432 }
1433 #endif
1434
dp_ipa_uc_detach(struct dp_soc * soc,struct dp_pdev * pdev)1435 int dp_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev)
1436 {
1437 if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
1438 return QDF_STATUS_SUCCESS;
1439
1440 /* TX resource detach */
1441 dp_tx_ipa_uc_detach(soc, pdev);
1442
1443 /* Cleanup 2nd TX pipe resources */
1444 dp_ipa_tx_alt_pool_detach(soc, pdev);
1445
1446 /* RX resource detach */
1447 dp_rx_ipa_uc_detach(soc, pdev);
1448
1449 /* Cleanup 2nd RX pipe resources */
1450 dp_rx_alt_ipa_uc_detach(soc, pdev);
1451
1452 dp_ipa_opt_wifi_dp_cleanup(soc, pdev);
1453
1454 return QDF_STATUS_SUCCESS; /* success */
1455 }
1456
1457 /**
1458 * dp_tx_ipa_uc_attach() - Allocate autonomy TX resources
1459 * @soc: data path instance
1460 * @pdev: Physical device handle
1461 *
1462 * Allocate TX buffer from non-cacheable memory
1463 * Attach allocated TX buffers with WBM SRNG
1464 *
1465 * Return: int
1466 */
dp_tx_ipa_uc_attach(struct dp_soc * soc,struct dp_pdev * pdev)1467 static int dp_tx_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev)
1468 {
1469 uint32_t tx_buffer_count;
1470 uint32_t ring_base_align = 8;
1471 qdf_dma_addr_t buffer_paddr;
1472 struct hal_srng *wbm_srng = (struct hal_srng *)
1473 soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
1474 struct hal_srng_params srng_params;
1475 void *ring_entry;
1476 int num_entries;
1477 qdf_nbuf_t nbuf;
1478 int retval = QDF_STATUS_SUCCESS;
1479 int max_alloc_count = 0;
1480 uint32_t wbm_bm_id;
1481
1482 /*
1483 * Uncomment when dp_ops_cfg.cfg_attach is implemented
1484 * unsigned int uc_tx_buf_sz =
1485 * dp_cfg_ipa_uc_tx_buf_size(pdev->osif_pdev);
1486 */
1487 unsigned int uc_tx_buf_sz = CFG_IPA_UC_TX_BUF_SIZE_DEFAULT;
1488 unsigned int alloc_size = uc_tx_buf_sz + ring_base_align - 1;
1489
1490 wbm_bm_id = wlan_cfg_get_rbm_id_for_index(soc->wlan_cfg_ctx,
1491 IPA_TCL_DATA_RING_IDX);
1492
1493 hal_get_srng_params(soc->hal_soc, hal_srng_to_hal_ring_handle(wbm_srng),
1494 &srng_params);
1495 num_entries = srng_params.num_entries;
1496
1497 max_alloc_count =
1498 num_entries - DP_IPA_WAR_WBM2SW_REL_RING_NO_BUF_ENTRIES;
1499 if (max_alloc_count <= 0) {
1500 dp_err("incorrect value for buffer count %u", max_alloc_count);
1501 return -EINVAL;
1502 }
1503
1504 dp_info("requested %d buffers to be posted to wbm ring",
1505 max_alloc_count);
1506
1507 soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned =
1508 qdf_mem_malloc(num_entries *
1509 sizeof(*soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned));
1510 if (!soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned) {
1511 dp_err("IPA WBM Ring Tx buf pool vaddr alloc fail");
1512 return -ENOMEM;
1513 }
1514
1515 hal_srng_access_start_unlocked(soc->hal_soc,
1516 hal_srng_to_hal_ring_handle(wbm_srng));
1517
1518 /*
1519 * Allocate Tx buffers as many as possible.
1520 * Leave DP_IPA_WAR_WBM2SW_REL_RING_NO_BUF_ENTRIES empty
1521 * Populate Tx buffers into WBM2IPA ring
1522 * This initial buffer population will simulate H/W as source ring,
1523 * and update HP
1524 */
1525 for (tx_buffer_count = 0;
1526 tx_buffer_count < max_alloc_count - 1; tx_buffer_count++) {
1527 nbuf = qdf_nbuf_frag_alloc(soc->osdev, alloc_size, 0,
1528 256, FALSE);
1529 if (!nbuf)
1530 break;
1531
1532 ring_entry = hal_srng_dst_get_next_hp(soc->hal_soc,
1533 hal_srng_to_hal_ring_handle(wbm_srng));
1534 if (!ring_entry) {
1535 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
1536 "%s: Failed to get WBM ring entry",
1537 __func__);
1538 qdf_nbuf_free(nbuf);
1539 break;
1540 }
1541
1542 retval = qdf_nbuf_map_single(soc->osdev, nbuf,
1543 QDF_DMA_BIDIRECTIONAL);
1544 if (qdf_unlikely(retval != QDF_STATUS_SUCCESS)) {
1545 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1546 "%s: nbuf map failed", __func__);
1547 qdf_nbuf_free(nbuf);
1548 retval = -EFAULT;
1549 break;
1550 }
1551 buffer_paddr = qdf_nbuf_get_frag_paddr(nbuf, 0);
1552 qdf_mem_dp_tx_skb_cnt_inc();
1553 qdf_mem_dp_tx_skb_inc(qdf_nbuf_get_end_offset(nbuf));
1554
1555 /*
1556 * TODO - KIWI code can directly call the be handler
1557 * instead of hal soc ops.
1558 */
1559 hal_rxdma_buff_addr_info_set(soc->hal_soc, ring_entry,
1560 buffer_paddr, 0, wbm_bm_id);
1561
1562 soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned[tx_buffer_count]
1563 = (void *)nbuf;
1564 }
1565
1566 hal_srng_access_end_unlocked(soc->hal_soc,
1567 hal_srng_to_hal_ring_handle(wbm_srng));
1568
1569 soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt = tx_buffer_count;
1570
1571 if (tx_buffer_count) {
1572 dp_info("IPA WDI TX buffer: %d allocated", tx_buffer_count);
1573 } else {
1574 dp_err("No IPA WDI TX buffer allocated!");
1575 qdf_mem_free(soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned);
1576 soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned = NULL;
1577 retval = -ENOMEM;
1578 }
1579
1580 return retval;
1581 }
1582
1583 /**
1584 * dp_rx_ipa_uc_attach() - Allocate autonomy RX resources
1585 * @soc: data path instance
1586 * @pdev: core txrx pdev context
1587 *
1588 * This function will attach a DP RX instance into the main
1589 * device (SOC) context.
1590 *
1591 * Return: QDF_STATUS_SUCCESS: success
1592 * QDF_STATUS_E_RESOURCES: Error return
1593 */
dp_rx_ipa_uc_attach(struct dp_soc * soc,struct dp_pdev * pdev)1594 static int dp_rx_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev)
1595 {
1596 return QDF_STATUS_SUCCESS;
1597 }
1598
dp_ipa_uc_attach(struct dp_soc * soc,struct dp_pdev * pdev)1599 int dp_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev)
1600 {
1601 int error;
1602
1603 if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
1604 return QDF_STATUS_SUCCESS;
1605
1606 /* TX resource attach */
1607 error = dp_tx_ipa_uc_attach(soc, pdev);
1608 if (error) {
1609 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1610 "%s: DP IPA UC TX attach fail code %d",
1611 __func__, error);
1612 if (error == -EFAULT)
1613 dp_tx_ipa_uc_detach(soc, pdev);
1614 return error;
1615 }
1616
1617 /* Setup 2nd TX pipe */
1618 error = dp_ipa_tx_alt_pool_attach(soc);
1619 if (error) {
1620 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1621 "%s: DP IPA TX pool2 attach fail code %d",
1622 __func__, error);
1623 dp_tx_ipa_uc_detach(soc, pdev);
1624 return error;
1625 }
1626
1627 /* RX resource attach */
1628 error = dp_rx_ipa_uc_attach(soc, pdev);
1629 if (error) {
1630 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
1631 "%s: DP IPA UC RX attach fail code %d",
1632 __func__, error);
1633 dp_ipa_tx_alt_pool_detach(soc, pdev);
1634 dp_tx_ipa_uc_detach(soc, pdev);
1635 return error;
1636 }
1637
1638 return QDF_STATUS_SUCCESS; /* success */
1639 }
1640
1641 #ifdef IPA_WDI3_VLAN_SUPPORT
1642 /**
1643 * dp_ipa_rx_alt_ring_resource_setup() - setup IPA 2nd RX ring resources
1644 * @soc: data path SoC handle
1645 * @pdev: data path pdev handle
1646 *
1647 * Return: none
1648 */
1649 static
dp_ipa_rx_alt_ring_resource_setup(struct dp_soc * soc,struct dp_pdev * pdev)1650 void dp_ipa_rx_alt_ring_resource_setup(struct dp_soc *soc, struct dp_pdev *pdev)
1651 {
1652 struct hal_soc *hal_soc = (struct hal_soc *)soc->hal_soc;
1653 struct hal_srng *hal_srng;
1654 struct hal_srng_params srng_params;
1655 unsigned long addr_offset, dev_base_paddr;
1656 qdf_dma_addr_t hp_addr;
1657
1658 if (!wlan_ipa_is_vlan_enabled())
1659 return;
1660
1661 dev_base_paddr =
1662 (unsigned long)
1663 ((struct hif_softc *)(hal_soc->hif_handle))->mem_pa;
1664
1665 /* IPA REO_DEST Ring - HAL_SRNG_REO2SW3 */
1666 hal_srng = (struct hal_srng *)
1667 soc->reo_dest_ring[IPA_ALT_REO_DEST_RING_IDX].hal_srng;
1668 hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc),
1669 hal_srng_to_hal_ring_handle(hal_srng),
1670 &srng_params);
1671
1672 soc->ipa_uc_rx_rsc_alt.ipa_reo_ring_base_paddr =
1673 srng_params.ring_base_paddr;
1674 soc->ipa_uc_rx_rsc_alt.ipa_reo_ring_base_vaddr =
1675 srng_params.ring_base_vaddr;
1676 soc->ipa_uc_rx_rsc_alt.ipa_reo_ring_size =
1677 (srng_params.num_entries * srng_params.entry_size) << 2;
1678 addr_offset = (unsigned long)(hal_srng->u.dst_ring.tp_addr) -
1679 (unsigned long)(hal_soc->dev_base_addr);
1680 soc->ipa_uc_rx_rsc_alt.ipa_reo_tp_paddr =
1681 (qdf_dma_addr_t)(addr_offset + dev_base_paddr);
1682
1683 dp_info("IPA REO_DEST Ring addr_offset=%x, dev_base_paddr=%x, tp_paddr=%x paddr=%pK vaddr=%pK size= %u(%u bytes)",
1684 (unsigned int)addr_offset,
1685 (unsigned int)dev_base_paddr,
1686 (unsigned int)(soc->ipa_uc_rx_rsc_alt.ipa_reo_tp_paddr),
1687 (void *)soc->ipa_uc_rx_rsc_alt.ipa_reo_ring_base_paddr,
1688 (void *)soc->ipa_uc_rx_rsc_alt.ipa_reo_ring_base_vaddr,
1689 srng_params.num_entries,
1690 soc->ipa_uc_rx_rsc_alt.ipa_reo_ring_size);
1691
1692 hal_srng = (struct hal_srng *)
1693 pdev->rx_refill_buf_ring3.hal_srng;
1694 hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc),
1695 hal_srng_to_hal_ring_handle(hal_srng),
1696 &srng_params);
1697 soc->ipa_uc_rx_rsc_alt.ipa_rx_refill_buf_ring_base_paddr =
1698 srng_params.ring_base_paddr;
1699 soc->ipa_uc_rx_rsc_alt.ipa_rx_refill_buf_ring_base_vaddr =
1700 srng_params.ring_base_vaddr;
1701 soc->ipa_uc_rx_rsc_alt.ipa_rx_refill_buf_ring_size =
1702 (srng_params.num_entries * srng_params.entry_size) << 2;
1703 hp_addr = hal_srng_get_hp_addr(hal_soc_to_hal_soc_handle(hal_soc),
1704 hal_srng_to_hal_ring_handle(hal_srng));
1705 soc->ipa_uc_rx_rsc_alt.ipa_rx_refill_buf_hp_paddr =
1706 qdf_mem_paddr_from_dmaaddr(soc->osdev, hp_addr);
1707
1708 dp_info("IPA REFILL_BUF Ring hp_paddr=%x paddr=%pK vaddr=%pK size= %u(%u bytes)",
1709 (unsigned int)(soc->ipa_uc_rx_rsc_alt.ipa_rx_refill_buf_hp_paddr),
1710 (void *)soc->ipa_uc_rx_rsc_alt.ipa_rx_refill_buf_ring_base_paddr,
1711 (void *)soc->ipa_uc_rx_rsc_alt.ipa_rx_refill_buf_ring_base_vaddr,
1712 srng_params.num_entries,
1713 soc->ipa_uc_rx_rsc_alt.ipa_rx_refill_buf_ring_size);
1714 }
1715 #else
1716 static inline
dp_ipa_rx_alt_ring_resource_setup(struct dp_soc * soc,struct dp_pdev * pdev)1717 void dp_ipa_rx_alt_ring_resource_setup(struct dp_soc *soc, struct dp_pdev *pdev)
1718 { }
1719 #endif
dp_ipa_ring_resource_setup(struct dp_soc * soc,struct dp_pdev * pdev)1720 int dp_ipa_ring_resource_setup(struct dp_soc *soc,
1721 struct dp_pdev *pdev)
1722 {
1723 struct hal_soc *hal_soc = (struct hal_soc *)soc->hal_soc;
1724 struct hal_srng *hal_srng;
1725 struct hal_srng_params srng_params;
1726 qdf_dma_addr_t hp_addr;
1727 unsigned long addr_offset, dev_base_paddr;
1728 uint32_t ix0;
1729 uint8_t ix0_map[8];
1730
1731 if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
1732 return QDF_STATUS_SUCCESS;
1733
1734 /* IPA TCL_DATA Ring - HAL_SRNG_SW2TCL3 */
1735 hal_srng = (struct hal_srng *)
1736 soc->tcl_data_ring[IPA_TCL_DATA_RING_IDX].hal_srng;
1737 hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc),
1738 hal_srng_to_hal_ring_handle(hal_srng),
1739 &srng_params);
1740
1741 soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_paddr =
1742 srng_params.ring_base_paddr;
1743 soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_vaddr =
1744 srng_params.ring_base_vaddr;
1745 soc->ipa_uc_tx_rsc.ipa_tcl_ring_size =
1746 (srng_params.num_entries * srng_params.entry_size) << 2;
1747 /*
1748 * For the register backed memory addresses, use the scn->mem_pa to
1749 * calculate the physical address of the shadow registers
1750 */
1751 dev_base_paddr =
1752 (unsigned long)
1753 ((struct hif_softc *)(hal_soc->hif_handle))->mem_pa;
1754 addr_offset = (unsigned long)(hal_srng->u.src_ring.hp_addr) -
1755 (unsigned long)(hal_soc->dev_base_addr);
1756 soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr =
1757 (qdf_dma_addr_t)(addr_offset + dev_base_paddr);
1758
1759 dp_info("IPA TCL_DATA Ring addr_offset=%x, dev_base_paddr=%x, hp_paddr=%x paddr=%pK vaddr=%pK size= %u(%u bytes)",
1760 (unsigned int)addr_offset,
1761 (unsigned int)dev_base_paddr,
1762 (unsigned int)(soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr),
1763 (void *)soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_paddr,
1764 (void *)soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_vaddr,
1765 srng_params.num_entries,
1766 soc->ipa_uc_tx_rsc.ipa_tcl_ring_size);
1767
1768 /* IPA TX COMP Ring - HAL_SRNG_WBM2SW2_RELEASE */
1769 hal_srng = (struct hal_srng *)
1770 soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
1771 hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc),
1772 hal_srng_to_hal_ring_handle(hal_srng),
1773 &srng_params);
1774
1775 soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_paddr =
1776 srng_params.ring_base_paddr;
1777 soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_vaddr =
1778 srng_params.ring_base_vaddr;
1779 soc->ipa_uc_tx_rsc.ipa_wbm_ring_size =
1780 (srng_params.num_entries * srng_params.entry_size) << 2;
1781 soc->ipa_uc_tx_rsc.ipa_wbm_hp_shadow_paddr =
1782 hal_srng_get_hp_addr(hal_soc_to_hal_soc_handle(hal_soc),
1783 hal_srng_to_hal_ring_handle(hal_srng));
1784 addr_offset = (unsigned long)(hal_srng->u.dst_ring.tp_addr) -
1785 (unsigned long)(hal_soc->dev_base_addr);
1786 soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr =
1787 (qdf_dma_addr_t)(addr_offset + dev_base_paddr);
1788
1789 dp_info("IPA TX COMP Ring addr_offset=%x, dev_base_paddr=%x, ipa_wbm_tp_paddr=%x paddr=%pK vaddr=0%pK size= %u(%u bytes)",
1790 (unsigned int)addr_offset,
1791 (unsigned int)dev_base_paddr,
1792 (unsigned int)(soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr),
1793 (void *)soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_paddr,
1794 (void *)soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_vaddr,
1795 srng_params.num_entries,
1796 soc->ipa_uc_tx_rsc.ipa_wbm_ring_size);
1797
1798 dp_ipa_tx_alt_ring_resource_setup(soc);
1799
1800 /* IPA REO_DEST Ring - HAL_SRNG_REO2SW4 */
1801 hal_srng = (struct hal_srng *)
1802 soc->reo_dest_ring[IPA_REO_DEST_RING_IDX].hal_srng;
1803 hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc),
1804 hal_srng_to_hal_ring_handle(hal_srng),
1805 &srng_params);
1806
1807 soc->ipa_uc_rx_rsc.ipa_reo_ring_base_paddr =
1808 srng_params.ring_base_paddr;
1809 soc->ipa_uc_rx_rsc.ipa_reo_ring_base_vaddr =
1810 srng_params.ring_base_vaddr;
1811 soc->ipa_uc_rx_rsc.ipa_reo_ring_size =
1812 (srng_params.num_entries * srng_params.entry_size) << 2;
1813 addr_offset = (unsigned long)(hal_srng->u.dst_ring.tp_addr) -
1814 (unsigned long)(hal_soc->dev_base_addr);
1815 soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr =
1816 (qdf_dma_addr_t)(addr_offset + dev_base_paddr);
1817
1818 dp_info("IPA REO_DEST Ring addr_offset=%x, dev_base_paddr=%x, tp_paddr=%x paddr=%pK vaddr=%pK size= %u(%u bytes)",
1819 (unsigned int)addr_offset,
1820 (unsigned int)dev_base_paddr,
1821 (unsigned int)(soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr),
1822 (void *)soc->ipa_uc_rx_rsc.ipa_reo_ring_base_paddr,
1823 (void *)soc->ipa_uc_rx_rsc.ipa_reo_ring_base_vaddr,
1824 srng_params.num_entries,
1825 soc->ipa_uc_rx_rsc.ipa_reo_ring_size);
1826
1827 hal_srng = (struct hal_srng *)
1828 pdev->rx_refill_buf_ring2.hal_srng;
1829 hal_get_srng_params(hal_soc_to_hal_soc_handle(hal_soc),
1830 hal_srng_to_hal_ring_handle(hal_srng),
1831 &srng_params);
1832 soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_paddr =
1833 srng_params.ring_base_paddr;
1834 soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_vaddr =
1835 srng_params.ring_base_vaddr;
1836 soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_size =
1837 (srng_params.num_entries * srng_params.entry_size) << 2;
1838 hp_addr = hal_srng_get_hp_addr(hal_soc_to_hal_soc_handle(hal_soc),
1839 hal_srng_to_hal_ring_handle(hal_srng));
1840 soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr =
1841 qdf_mem_paddr_from_dmaaddr(soc->osdev, hp_addr);
1842
1843 dp_info("IPA REFILL_BUF Ring hp_paddr=%x paddr=%pK vaddr=%pK size= %u(%u bytes)",
1844 (unsigned int)(soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr),
1845 (void *)soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_paddr,
1846 (void *)soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_vaddr,
1847 srng_params.num_entries,
1848 soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_size);
1849
1850 /*
1851 * Set DEST_RING_MAPPING_4 to SW2 as default value for
1852 * DESTINATION_RING_CTRL_IX_0.
1853 */
1854 ix0_map[0] = REO_REMAP_SW1;
1855 ix0_map[1] = REO_REMAP_SW1;
1856 ix0_map[2] = REO_REMAP_SW2;
1857 ix0_map[3] = REO_REMAP_SW3;
1858 ix0_map[4] = REO_REMAP_SW2;
1859 ix0_map[5] = REO_REMAP_RELEASE;
1860 ix0_map[6] = REO_REMAP_FW;
1861 ix0_map[7] = REO_REMAP_FW;
1862
1863 dp_ipa_opt_dp_ixo_remap(ix0_map);
1864 ix0 = hal_gen_reo_remap_val(soc->hal_soc, HAL_REO_REMAP_REG_IX0,
1865 ix0_map);
1866
1867 hal_reo_read_write_ctrl_ix(soc->hal_soc, false, &ix0, NULL, NULL, NULL);
1868
1869 dp_ipa_rx_alt_ring_resource_setup(soc, pdev);
1870 return 0;
1871 }
1872
1873 #ifdef IPA_WDI3_VLAN_SUPPORT
1874 /**
1875 * dp_ipa_rx_alt_ring_get_resource() - get IPA 2nd RX ring resources
1876 * @pdev: data path pdev handle
1877 *
1878 * Return: Success if resourece is found
1879 */
dp_ipa_rx_alt_ring_get_resource(struct dp_pdev * pdev)1880 static QDF_STATUS dp_ipa_rx_alt_ring_get_resource(struct dp_pdev *pdev)
1881 {
1882 struct dp_soc *soc = pdev->soc;
1883 struct dp_ipa_resources *ipa_res = &pdev->ipa_resource;
1884
1885 if (!wlan_ipa_is_vlan_enabled())
1886 return QDF_STATUS_SUCCESS;
1887
1888 dp_ipa_get_shared_mem_info(soc->osdev, &ipa_res->rx_alt_rdy_ring,
1889 soc->ipa_uc_rx_rsc_alt.ipa_reo_ring_base_vaddr,
1890 soc->ipa_uc_rx_rsc_alt.ipa_reo_ring_base_paddr,
1891 soc->ipa_uc_rx_rsc_alt.ipa_reo_ring_size);
1892
1893 dp_ipa_get_shared_mem_info(
1894 soc->osdev, &ipa_res->rx_alt_refill_ring,
1895 soc->ipa_uc_rx_rsc_alt.ipa_rx_refill_buf_ring_base_vaddr,
1896 soc->ipa_uc_rx_rsc_alt.ipa_rx_refill_buf_ring_base_paddr,
1897 soc->ipa_uc_rx_rsc_alt.ipa_rx_refill_buf_ring_size);
1898
1899 if (!qdf_mem_get_dma_addr(soc->osdev,
1900 &ipa_res->rx_alt_rdy_ring.mem_info) ||
1901 !qdf_mem_get_dma_addr(soc->osdev,
1902 &ipa_res->rx_alt_refill_ring.mem_info))
1903 return QDF_STATUS_E_FAILURE;
1904
1905 return QDF_STATUS_SUCCESS;
1906 }
1907 #else
dp_ipa_rx_alt_ring_get_resource(struct dp_pdev * pdev)1908 static inline QDF_STATUS dp_ipa_rx_alt_ring_get_resource(struct dp_pdev *pdev)
1909 {
1910 return QDF_STATUS_SUCCESS;
1911 }
1912 #endif
1913
dp_ipa_get_resource(struct cdp_soc_t * soc_hdl,uint8_t pdev_id)1914 QDF_STATUS dp_ipa_get_resource(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
1915 {
1916 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
1917 struct dp_pdev *pdev =
1918 dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
1919 struct dp_ipa_resources *ipa_res;
1920
1921 if (!pdev) {
1922 dp_err("Invalid instance");
1923 return QDF_STATUS_E_FAILURE;
1924 }
1925
1926 ipa_res = &pdev->ipa_resource;
1927 if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
1928 return QDF_STATUS_SUCCESS;
1929
1930 ipa_res->tx_num_alloc_buffer =
1931 (uint32_t)soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt;
1932
1933 dp_ipa_get_shared_mem_info(soc->osdev, &ipa_res->tx_ring,
1934 soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_vaddr,
1935 soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_paddr,
1936 soc->ipa_uc_tx_rsc.ipa_tcl_ring_size);
1937
1938 dp_ipa_get_shared_mem_info(soc->osdev, &ipa_res->tx_comp_ring,
1939 soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_vaddr,
1940 soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_paddr,
1941 soc->ipa_uc_tx_rsc.ipa_wbm_ring_size);
1942
1943 dp_ipa_get_shared_mem_info(soc->osdev, &ipa_res->rx_rdy_ring,
1944 soc->ipa_uc_rx_rsc.ipa_reo_ring_base_vaddr,
1945 soc->ipa_uc_rx_rsc.ipa_reo_ring_base_paddr,
1946 soc->ipa_uc_rx_rsc.ipa_reo_ring_size);
1947
1948 dp_ipa_get_shared_mem_info(
1949 soc->osdev, &ipa_res->rx_refill_ring,
1950 soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_vaddr,
1951 soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_paddr,
1952 soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_size);
1953
1954 if (!qdf_mem_get_dma_addr(soc->osdev, &ipa_res->tx_ring.mem_info) ||
1955 !qdf_mem_get_dma_addr(soc->osdev,
1956 &ipa_res->tx_comp_ring.mem_info) ||
1957 !qdf_mem_get_dma_addr(soc->osdev, &ipa_res->rx_rdy_ring.mem_info) ||
1958 !qdf_mem_get_dma_addr(soc->osdev,
1959 &ipa_res->rx_refill_ring.mem_info))
1960 return QDF_STATUS_E_FAILURE;
1961
1962 if (dp_ipa_tx_alt_ring_get_resource(pdev))
1963 return QDF_STATUS_E_FAILURE;
1964
1965 if (dp_ipa_rx_alt_ring_get_resource(pdev))
1966 return QDF_STATUS_E_FAILURE;
1967
1968 return QDF_STATUS_SUCCESS;
1969 }
1970
1971 #ifdef IPA_SET_RESET_TX_DB_PA
1972 #define DP_IPA_SET_TX_DB_PADDR(soc, ipa_res)
1973 #else
1974 #define DP_IPA_SET_TX_DB_PADDR(soc, ipa_res) \
1975 dp_ipa_set_tx_doorbell_paddr(soc, ipa_res)
1976 #endif
1977
1978 #ifdef IPA_WDI3_VLAN_SUPPORT
1979 /**
1980 * dp_ipa_map_rx_alt_ring_doorbell_paddr() - Map 2nd rx ring doorbell paddr
1981 * @pdev: data path pdev handle
1982 *
1983 * Return: none
1984 */
dp_ipa_map_rx_alt_ring_doorbell_paddr(struct dp_pdev * pdev)1985 static void dp_ipa_map_rx_alt_ring_doorbell_paddr(struct dp_pdev *pdev)
1986 {
1987 struct dp_ipa_resources *ipa_res = &pdev->ipa_resource;
1988 uint32_t rx_ready_doorbell_dmaaddr;
1989 struct dp_soc *soc = pdev->soc;
1990 struct hal_srng *reo_srng = (struct hal_srng *)
1991 soc->reo_dest_ring[IPA_ALT_REO_DEST_RING_IDX].hal_srng;
1992 int ret = 0;
1993
1994 if (!wlan_ipa_is_vlan_enabled())
1995 return;
1996
1997 if (qdf_mem_smmu_s1_enabled(soc->osdev)) {
1998 ret = pld_smmu_map(soc->osdev->dev,
1999 ipa_res->rx_alt_ready_doorbell_paddr,
2000 &rx_ready_doorbell_dmaaddr,
2001 sizeof(uint32_t));
2002 ipa_res->rx_alt_ready_doorbell_paddr =
2003 rx_ready_doorbell_dmaaddr;
2004 qdf_assert_always(!ret);
2005 }
2006
2007 hal_srng_dst_set_hp_paddr_confirm(reo_srng,
2008 ipa_res->rx_alt_ready_doorbell_paddr);
2009 }
2010
2011 /**
2012 * dp_ipa_unmap_rx_alt_ring_doorbell_paddr() - Unmap 2nd rx ring doorbell paddr
2013 * @pdev: data path pdev handle
2014 *
2015 * Return: none
2016 */
dp_ipa_unmap_rx_alt_ring_doorbell_paddr(struct dp_pdev * pdev)2017 static void dp_ipa_unmap_rx_alt_ring_doorbell_paddr(struct dp_pdev *pdev)
2018 {
2019 struct dp_ipa_resources *ipa_res = &pdev->ipa_resource;
2020 struct dp_soc *soc = pdev->soc;
2021 int ret = 0;
2022
2023 if (!wlan_ipa_is_vlan_enabled())
2024 return;
2025
2026 if (!qdf_mem_smmu_s1_enabled(soc->osdev))
2027 return;
2028
2029 ret = pld_smmu_unmap(soc->osdev->dev,
2030 ipa_res->rx_alt_ready_doorbell_paddr,
2031 sizeof(uint32_t));
2032 qdf_assert_always(!ret);
2033 }
2034 #else
dp_ipa_map_rx_alt_ring_doorbell_paddr(struct dp_pdev * pdev)2035 static inline void dp_ipa_map_rx_alt_ring_doorbell_paddr(struct dp_pdev *pdev)
2036 { }
2037
dp_ipa_unmap_rx_alt_ring_doorbell_paddr(struct dp_pdev * pdev)2038 static inline void dp_ipa_unmap_rx_alt_ring_doorbell_paddr(struct dp_pdev *pdev)
2039 { }
2040 #endif
2041
dp_ipa_set_doorbell_paddr(struct cdp_soc_t * soc_hdl,uint8_t pdev_id)2042 QDF_STATUS dp_ipa_set_doorbell_paddr(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
2043 {
2044 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
2045 struct dp_pdev *pdev =
2046 dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
2047 struct dp_ipa_resources *ipa_res;
2048 struct hal_srng *reo_srng = (struct hal_srng *)
2049 soc->reo_dest_ring[IPA_REO_DEST_RING_IDX].hal_srng;
2050
2051 if (!pdev) {
2052 dp_err("Invalid instance");
2053 return QDF_STATUS_E_FAILURE;
2054 }
2055
2056 ipa_res = &pdev->ipa_resource;
2057 if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
2058 return QDF_STATUS_SUCCESS;
2059
2060 dp_ipa_map_ring_doorbell_paddr(pdev);
2061 dp_ipa_map_rx_alt_ring_doorbell_paddr(pdev);
2062
2063 DP_IPA_SET_TX_DB_PADDR(soc, ipa_res);
2064
2065 /*
2066 * For RX, REO module on Napier/Hastings does reordering on incoming
2067 * Ethernet packets and writes one or more descriptors to REO2IPA Rx
2068 * ring.It then updates the ring’s Write/Head ptr and rings a doorbell
2069 * to IPA.
2070 * Set the doorbell addr for the REO ring.
2071 */
2072 hal_srng_dst_set_hp_paddr_confirm(reo_srng,
2073 ipa_res->rx_ready_doorbell_paddr);
2074 return QDF_STATUS_SUCCESS;
2075 }
2076
dp_ipa_iounmap_doorbell_vaddr(struct cdp_soc_t * soc_hdl,uint8_t pdev_id)2077 QDF_STATUS dp_ipa_iounmap_doorbell_vaddr(struct cdp_soc_t *soc_hdl,
2078 uint8_t pdev_id)
2079 {
2080 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
2081 struct dp_pdev *pdev =
2082 dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
2083 struct dp_ipa_resources *ipa_res;
2084
2085 if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
2086 return QDF_STATUS_SUCCESS;
2087
2088 if (!pdev) {
2089 dp_err("Invalid instance");
2090 return QDF_STATUS_E_FAILURE;
2091 }
2092
2093 ipa_res = &pdev->ipa_resource;
2094 if (!ipa_res->is_db_ddr_mapped)
2095 iounmap(ipa_res->tx_comp_doorbell_vaddr);
2096
2097 return QDF_STATUS_SUCCESS;
2098 }
2099
dp_ipa_op_response(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,uint8_t * op_msg)2100 QDF_STATUS dp_ipa_op_response(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
2101 uint8_t *op_msg)
2102 {
2103 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
2104 struct dp_pdev *pdev =
2105 dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
2106
2107 if (!pdev) {
2108 dp_err("Invalid instance");
2109 return QDF_STATUS_E_FAILURE;
2110 }
2111
2112 if (!wlan_cfg_is_ipa_enabled(pdev->soc->wlan_cfg_ctx))
2113 return QDF_STATUS_SUCCESS;
2114
2115 if (pdev->ipa_uc_op_cb) {
2116 pdev->ipa_uc_op_cb(op_msg, pdev->usr_ctxt);
2117 } else {
2118 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2119 "%s: IPA callback function is not registered", __func__);
2120 qdf_mem_free(op_msg);
2121 return QDF_STATUS_E_FAILURE;
2122 }
2123
2124 return QDF_STATUS_SUCCESS;
2125 }
2126
dp_ipa_register_op_cb(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,ipa_uc_op_cb_type op_cb,void * usr_ctxt)2127 QDF_STATUS dp_ipa_register_op_cb(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
2128 ipa_uc_op_cb_type op_cb,
2129 void *usr_ctxt)
2130 {
2131 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
2132 struct dp_pdev *pdev =
2133 dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
2134
2135 if (!pdev) {
2136 dp_err("Invalid instance");
2137 return QDF_STATUS_E_FAILURE;
2138 }
2139
2140 if (!wlan_cfg_is_ipa_enabled(pdev->soc->wlan_cfg_ctx))
2141 return QDF_STATUS_SUCCESS;
2142
2143 pdev->ipa_uc_op_cb = op_cb;
2144 pdev->usr_ctxt = usr_ctxt;
2145
2146 return QDF_STATUS_SUCCESS;
2147 }
2148
dp_ipa_deregister_op_cb(struct cdp_soc_t * soc_hdl,uint8_t pdev_id)2149 void dp_ipa_deregister_op_cb(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
2150 {
2151 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
2152 struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
2153
2154 if (!pdev) {
2155 dp_err("Invalid instance");
2156 return;
2157 }
2158
2159 dp_debug("Deregister OP handler callback");
2160 pdev->ipa_uc_op_cb = NULL;
2161 pdev->usr_ctxt = NULL;
2162 }
2163
dp_ipa_get_stat(struct cdp_soc_t * soc_hdl,uint8_t pdev_id)2164 QDF_STATUS dp_ipa_get_stat(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
2165 {
2166 /* TBD */
2167 return QDF_STATUS_SUCCESS;
2168 }
2169
dp_tx_send_ipa_data_frame(struct cdp_soc_t * soc_hdl,uint8_t vdev_id,qdf_nbuf_t skb)2170 qdf_nbuf_t dp_tx_send_ipa_data_frame(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
2171 qdf_nbuf_t skb)
2172 {
2173 qdf_nbuf_t ret;
2174
2175 /* Terminate the (single-element) list of tx frames */
2176 qdf_nbuf_set_next(skb, NULL);
2177 ret = dp_tx_send(soc_hdl, vdev_id, skb);
2178 if (ret) {
2179 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2180 "%s: Failed to tx", __func__);
2181 return ret;
2182 }
2183
2184 return NULL;
2185 }
2186
2187 #ifdef QCA_IPA_LL_TX_FLOW_CONTROL
2188 /**
2189 * dp_ipa_is_target_ready() - check if target is ready or not
2190 * @soc: datapath soc handle
2191 *
2192 * Return: true if target is ready
2193 */
2194 static inline
dp_ipa_is_target_ready(struct dp_soc * soc)2195 bool dp_ipa_is_target_ready(struct dp_soc *soc)
2196 {
2197 if (hif_get_target_status(soc->hif_handle) == TARGET_STATUS_RESET)
2198 return false;
2199 else
2200 return true;
2201 }
2202
2203 /**
2204 * dp_ipa_update_txr_db_status() - Indicate transfer ring DB is SMMU mapped or not
2205 * @dev: Pointer to device
2206 * @txrx_smmu: WDI TX/RX configuration
2207 *
2208 * Return: None
2209 */
2210 static inline
dp_ipa_update_txr_db_status(struct device * dev,qdf_ipa_wdi_pipe_setup_info_smmu_t * txrx_smmu)2211 void dp_ipa_update_txr_db_status(struct device *dev,
2212 qdf_ipa_wdi_pipe_setup_info_smmu_t *txrx_smmu)
2213 {
2214 int pcie_slot = pld_get_pci_slot(dev);
2215
2216 if (pcie_slot)
2217 QDF_IPA_WDI_SETUP_INFO_SMMU_IS_TXR_RN_DB_PCIE_ADDR(txrx_smmu) = false;
2218 else
2219 QDF_IPA_WDI_SETUP_INFO_SMMU_IS_TXR_RN_DB_PCIE_ADDR(txrx_smmu) = true;
2220 }
2221
2222 /**
2223 * dp_ipa_update_evt_db_status() - Indicate evt ring DB is SMMU mapped or not
2224 * @dev: Pointer to device
2225 * @txrx_smmu: WDI TX/RX configuration
2226 *
2227 * Return: None
2228 */
2229 static inline
dp_ipa_update_evt_db_status(struct device * dev,qdf_ipa_wdi_pipe_setup_info_smmu_t * txrx_smmu)2230 void dp_ipa_update_evt_db_status(struct device *dev,
2231 qdf_ipa_wdi_pipe_setup_info_smmu_t *txrx_smmu)
2232 {
2233 int pcie_slot = pld_get_pci_slot(dev);
2234
2235 if (pcie_slot)
2236 QDF_IPA_WDI_SETUP_INFO_SMMU_IS_EVT_RN_DB_PCIE_ADDR(txrx_smmu) = false;
2237 else
2238 QDF_IPA_WDI_SETUP_INFO_SMMU_IS_EVT_RN_DB_PCIE_ADDR(txrx_smmu) = true;
2239 }
2240 #else
2241 static inline
dp_ipa_is_target_ready(struct dp_soc * soc)2242 bool dp_ipa_is_target_ready(struct dp_soc *soc)
2243 {
2244 return true;
2245 }
2246
2247 static inline
dp_ipa_update_txr_db_status(struct device * dev,qdf_ipa_wdi_pipe_setup_info_smmu_t * txrx_smmu)2248 void dp_ipa_update_txr_db_status(struct device *dev,
2249 qdf_ipa_wdi_pipe_setup_info_smmu_t *txrx_smmu)
2250 {
2251 QDF_IPA_WDI_SETUP_INFO_SMMU_IS_TXR_RN_DB_PCIE_ADDR(txrx_smmu) = true;
2252 }
2253
2254 static inline
dp_ipa_update_evt_db_status(struct device * dev,qdf_ipa_wdi_pipe_setup_info_smmu_t * txrx_smmu)2255 void dp_ipa_update_evt_db_status(struct device *dev,
2256 qdf_ipa_wdi_pipe_setup_info_smmu_t *txrx_smmu)
2257 {
2258 QDF_IPA_WDI_SETUP_INFO_SMMU_IS_EVT_RN_DB_PCIE_ADDR(txrx_smmu) = true;
2259 }
2260 #endif
2261
dp_ipa_enable_autonomy(struct cdp_soc_t * soc_hdl,uint8_t pdev_id)2262 QDF_STATUS dp_ipa_enable_autonomy(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
2263 {
2264 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
2265 struct dp_pdev *pdev =
2266 dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
2267 uint32_t ix0;
2268 uint32_t ix2;
2269 uint8_t ix_map[8];
2270
2271 if (!pdev) {
2272 dp_err("Invalid instance");
2273 return QDF_STATUS_E_FAILURE;
2274 }
2275
2276 if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
2277 return QDF_STATUS_SUCCESS;
2278
2279 if (!hif_is_target_ready(HIF_GET_SOFTC(soc->hif_handle)))
2280 return QDF_STATUS_E_AGAIN;
2281
2282 if (!dp_ipa_is_target_ready(soc))
2283 return QDF_STATUS_E_AGAIN;
2284
2285 /* Call HAL API to remap REO rings to REO2IPA ring */
2286 ix_map[0] = REO_REMAP_SW1;
2287 ix_map[1] = REO_REMAP_SW4;
2288 ix_map[2] = REO_REMAP_SW1;
2289 if (wlan_ipa_is_vlan_enabled())
2290 ix_map[3] = REO_REMAP_SW3;
2291 else
2292 ix_map[3] = REO_REMAP_SW4;
2293 ix_map[4] = REO_REMAP_SW4;
2294 ix_map[5] = REO_REMAP_RELEASE;
2295 ix_map[6] = REO_REMAP_FW;
2296 ix_map[7] = REO_REMAP_FW;
2297
2298 ix0 = hal_gen_reo_remap_val(soc->hal_soc, HAL_REO_REMAP_REG_IX0,
2299 ix_map);
2300
2301 if (wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
2302 ix_map[0] = REO_REMAP_SW4;
2303 ix_map[1] = REO_REMAP_SW4;
2304 ix_map[2] = REO_REMAP_SW4;
2305 ix_map[3] = REO_REMAP_SW4;
2306 ix_map[4] = REO_REMAP_SW4;
2307 ix_map[5] = REO_REMAP_SW4;
2308 ix_map[6] = REO_REMAP_SW4;
2309 ix_map[7] = REO_REMAP_SW4;
2310
2311 ix2 = hal_gen_reo_remap_val(soc->hal_soc, HAL_REO_REMAP_REG_IX2,
2312 ix_map);
2313
2314 hal_reo_read_write_ctrl_ix(soc->hal_soc, false, &ix0, NULL,
2315 &ix2, &ix2);
2316 dp_ipa_reo_remap_history_add(ix0, ix2, ix2);
2317 } else {
2318 hal_reo_read_write_ctrl_ix(soc->hal_soc, false, &ix0, NULL,
2319 NULL, NULL);
2320 dp_ipa_reo_remap_history_add(ix0, 0, 0);
2321 }
2322
2323 return QDF_STATUS_SUCCESS;
2324 }
2325
dp_ipa_disable_autonomy(struct cdp_soc_t * soc_hdl,uint8_t pdev_id)2326 QDF_STATUS dp_ipa_disable_autonomy(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
2327 {
2328 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
2329 struct dp_pdev *pdev =
2330 dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
2331 uint8_t ix0_map[8];
2332 uint32_t ix0;
2333 uint32_t ix1;
2334 uint32_t ix2;
2335 uint32_t ix3;
2336
2337 if (!pdev) {
2338 dp_err("Invalid instance");
2339 return QDF_STATUS_E_FAILURE;
2340 }
2341
2342 if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
2343 return QDF_STATUS_SUCCESS;
2344
2345 if (!hif_is_target_ready(HIF_GET_SOFTC(soc->hif_handle)))
2346 return QDF_STATUS_E_AGAIN;
2347
2348 if (!dp_ipa_is_target_ready(soc))
2349 return QDF_STATUS_E_AGAIN;
2350
2351 ix0_map[0] = REO_REMAP_SW1;
2352 ix0_map[1] = REO_REMAP_SW1;
2353 ix0_map[2] = REO_REMAP_SW2;
2354 ix0_map[3] = REO_REMAP_SW3;
2355 ix0_map[4] = REO_REMAP_SW2;
2356 ix0_map[5] = REO_REMAP_RELEASE;
2357 ix0_map[6] = REO_REMAP_FW;
2358 ix0_map[7] = REO_REMAP_FW;
2359
2360 /* Call HAL API to remap REO rings to REO2IPA ring */
2361 ix0 = hal_gen_reo_remap_val(soc->hal_soc, HAL_REO_REMAP_REG_IX0,
2362 ix0_map);
2363
2364 if (wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx)) {
2365 dp_reo_remap_config(soc, &ix1, &ix2, &ix3);
2366
2367 hal_reo_read_write_ctrl_ix(soc->hal_soc, false, &ix0, NULL,
2368 &ix2, &ix3);
2369 dp_ipa_reo_remap_history_add(ix0, ix2, ix3);
2370 } else {
2371 hal_reo_read_write_ctrl_ix(soc->hal_soc, false, &ix0, NULL,
2372 NULL, NULL);
2373 dp_ipa_reo_remap_history_add(ix0, 0, 0);
2374 }
2375
2376 return QDF_STATUS_SUCCESS;
2377 }
2378
2379 /* This should be configurable per H/W configuration enable status */
2380 #define L3_HEADER_PADDING 2
2381
2382 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)) || \
2383 defined(CONFIG_IPA_WDI_UNIFIED_API)
2384
2385 #if !defined(QCA_LL_TX_FLOW_CONTROL_V2) && !defined(QCA_IPA_LL_TX_FLOW_CONTROL)
dp_setup_mcc_sys_pipes(qdf_ipa_sys_connect_params_t * sys_in,qdf_ipa_wdi_conn_in_params_t * pipe_in)2386 static inline void dp_setup_mcc_sys_pipes(
2387 qdf_ipa_sys_connect_params_t *sys_in,
2388 qdf_ipa_wdi_conn_in_params_t *pipe_in)
2389 {
2390 int i = 0;
2391 /* Setup MCC sys pipe */
2392 QDF_IPA_WDI_CONN_IN_PARAMS_NUM_SYS_PIPE_NEEDED(pipe_in) =
2393 DP_IPA_MAX_IFACE;
2394 for (i = 0; i < DP_IPA_MAX_IFACE; i++)
2395 memcpy(&QDF_IPA_WDI_CONN_IN_PARAMS_SYS_IN(pipe_in)[i],
2396 &sys_in[i], sizeof(qdf_ipa_sys_connect_params_t));
2397 }
2398 #else
dp_setup_mcc_sys_pipes(qdf_ipa_sys_connect_params_t * sys_in,qdf_ipa_wdi_conn_in_params_t * pipe_in)2399 static inline void dp_setup_mcc_sys_pipes(
2400 qdf_ipa_sys_connect_params_t *sys_in,
2401 qdf_ipa_wdi_conn_in_params_t *pipe_in)
2402 {
2403 QDF_IPA_WDI_CONN_IN_PARAMS_NUM_SYS_PIPE_NEEDED(pipe_in) = 0;
2404 }
2405 #endif
2406
dp_ipa_wdi_tx_params(struct dp_soc * soc,struct dp_ipa_resources * ipa_res,qdf_ipa_wdi_pipe_setup_info_t * tx,bool over_gsi)2407 static void dp_ipa_wdi_tx_params(struct dp_soc *soc,
2408 struct dp_ipa_resources *ipa_res,
2409 qdf_ipa_wdi_pipe_setup_info_t *tx,
2410 bool over_gsi)
2411 {
2412 if (over_gsi)
2413 QDF_IPA_WDI_SETUP_INFO_CLIENT(tx) = IPA_CLIENT_WLAN2_CONS;
2414 else
2415 QDF_IPA_WDI_SETUP_INFO_CLIENT(tx) = IPA_CLIENT_WLAN1_CONS;
2416
2417 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(tx) =
2418 qdf_mem_get_dma_addr(soc->osdev,
2419 &ipa_res->tx_comp_ring.mem_info);
2420 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(tx) =
2421 qdf_mem_get_dma_size(soc->osdev,
2422 &ipa_res->tx_comp_ring.mem_info);
2423
2424 /* WBM Tail Pointer Address */
2425 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(tx) =
2426 soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr;
2427 QDF_IPA_WDI_SETUP_INFO_IS_TXR_RN_DB_PCIE_ADDR(tx) = true;
2428
2429 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(tx) =
2430 qdf_mem_get_dma_addr(soc->osdev,
2431 &ipa_res->tx_ring.mem_info);
2432 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(tx) =
2433 qdf_mem_get_dma_size(soc->osdev,
2434 &ipa_res->tx_ring.mem_info);
2435
2436 /* TCL Head Pointer Address */
2437 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(tx) =
2438 soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr;
2439 QDF_IPA_WDI_SETUP_INFO_IS_EVT_RN_DB_PCIE_ADDR(tx) = true;
2440
2441 QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(tx) =
2442 ipa_res->tx_num_alloc_buffer;
2443
2444 QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(tx) = 0;
2445
2446 dp_ipa_setup_tx_params_bank_id(soc, tx);
2447
2448 /* Set Pmac ID, extract pmac_id from pdev_id 0 for TX ring */
2449 dp_ipa_setup_tx_params_pmac_id(soc, tx);
2450 }
2451
dp_ipa_wdi_rx_params(struct dp_soc * soc,struct dp_ipa_resources * ipa_res,qdf_ipa_wdi_pipe_setup_info_t * rx,bool over_gsi)2452 static void dp_ipa_wdi_rx_params(struct dp_soc *soc,
2453 struct dp_ipa_resources *ipa_res,
2454 qdf_ipa_wdi_pipe_setup_info_t *rx,
2455 bool over_gsi)
2456 {
2457 if (over_gsi)
2458 QDF_IPA_WDI_SETUP_INFO_CLIENT(rx) =
2459 IPA_CLIENT_WLAN2_PROD;
2460 else
2461 QDF_IPA_WDI_SETUP_INFO_CLIENT(rx) =
2462 IPA_CLIENT_WLAN1_PROD;
2463
2464 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(rx) =
2465 qdf_mem_get_dma_addr(soc->osdev,
2466 &ipa_res->rx_rdy_ring.mem_info);
2467 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(rx) =
2468 qdf_mem_get_dma_size(soc->osdev,
2469 &ipa_res->rx_rdy_ring.mem_info);
2470
2471 /* REO Tail Pointer Address */
2472 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(rx) =
2473 soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr;
2474 QDF_IPA_WDI_SETUP_INFO_IS_TXR_RN_DB_PCIE_ADDR(rx) = true;
2475
2476 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(rx) =
2477 qdf_mem_get_dma_addr(soc->osdev,
2478 &ipa_res->rx_refill_ring.mem_info);
2479 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(rx) =
2480 qdf_mem_get_dma_size(soc->osdev,
2481 &ipa_res->rx_refill_ring.mem_info);
2482
2483 /* FW Head Pointer Address */
2484 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(rx) =
2485 soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr;
2486 QDF_IPA_WDI_SETUP_INFO_IS_EVT_RN_DB_PCIE_ADDR(rx) = false;
2487
2488 QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(rx) =
2489 soc->rx_pkt_tlv_size + L3_HEADER_PADDING;
2490 }
2491
2492 static void
dp_ipa_wdi_tx_smmu_params(struct dp_soc * soc,struct dp_ipa_resources * ipa_res,qdf_ipa_wdi_pipe_setup_info_smmu_t * tx_smmu,bool over_gsi,qdf_ipa_wdi_hdl_t hdl)2493 dp_ipa_wdi_tx_smmu_params(struct dp_soc *soc,
2494 struct dp_ipa_resources *ipa_res,
2495 qdf_ipa_wdi_pipe_setup_info_smmu_t *tx_smmu,
2496 bool over_gsi,
2497 qdf_ipa_wdi_hdl_t hdl)
2498 {
2499 if (over_gsi) {
2500 if (hdl == DP_IPA_HDL_FIRST)
2501 QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(tx_smmu) =
2502 IPA_CLIENT_WLAN2_CONS;
2503 else if (hdl == DP_IPA_HDL_SECOND)
2504 QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(tx_smmu) =
2505 IPA_CLIENT_WLAN4_CONS;
2506 else if (hdl == DP_IPA_HDL_THIRD)
2507 QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(tx_smmu) =
2508 IPA_CLIENT_WLAN1_CONS;
2509 } else {
2510 QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(tx_smmu) =
2511 IPA_CLIENT_WLAN1_CONS;
2512 }
2513
2514 qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_BASE(tx_smmu),
2515 &ipa_res->tx_comp_ring.sgtable,
2516 sizeof(sgtable_t));
2517 QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_SIZE(tx_smmu) =
2518 qdf_mem_get_dma_size(soc->osdev,
2519 &ipa_res->tx_comp_ring.mem_info);
2520 /* WBM Tail Pointer Address */
2521 QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_DOORBELL_PA(tx_smmu) =
2522 soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr;
2523 dp_ipa_update_txr_db_status(soc->osdev->dev, tx_smmu);
2524
2525 qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_BASE(tx_smmu),
2526 &ipa_res->tx_ring.sgtable,
2527 sizeof(sgtable_t));
2528 QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_SIZE(tx_smmu) =
2529 qdf_mem_get_dma_size(soc->osdev,
2530 &ipa_res->tx_ring.mem_info);
2531 /* TCL Head Pointer Address */
2532 QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_DOORBELL_PA(tx_smmu) =
2533 soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr;
2534 dp_ipa_update_evt_db_status(soc->osdev->dev, tx_smmu);
2535
2536 QDF_IPA_WDI_SETUP_INFO_SMMU_NUM_PKT_BUFFERS(tx_smmu) =
2537 ipa_res->tx_num_alloc_buffer;
2538 QDF_IPA_WDI_SETUP_INFO_SMMU_PKT_OFFSET(tx_smmu) = 0;
2539
2540 dp_ipa_setup_tx_smmu_params_bank_id(soc, tx_smmu);
2541
2542 /* Set Pmac ID, extract pmac_id from first pdev for TX ring */
2543 dp_ipa_setup_tx_smmu_params_pmac_id(soc, tx_smmu);
2544 }
2545
2546 static void
dp_ipa_wdi_rx_smmu_params(struct dp_soc * soc,struct dp_ipa_resources * ipa_res,qdf_ipa_wdi_pipe_setup_info_smmu_t * rx_smmu,bool over_gsi,qdf_ipa_wdi_hdl_t hdl)2547 dp_ipa_wdi_rx_smmu_params(struct dp_soc *soc,
2548 struct dp_ipa_resources *ipa_res,
2549 qdf_ipa_wdi_pipe_setup_info_smmu_t *rx_smmu,
2550 bool over_gsi,
2551 qdf_ipa_wdi_hdl_t hdl)
2552 {
2553 if (over_gsi) {
2554 if (hdl == DP_IPA_HDL_FIRST)
2555 QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(rx_smmu) =
2556 IPA_CLIENT_WLAN2_PROD;
2557 else if (hdl == DP_IPA_HDL_SECOND)
2558 QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(rx_smmu) =
2559 IPA_CLIENT_WLAN3_PROD;
2560 else if (hdl == DP_IPA_HDL_THIRD)
2561 QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(rx_smmu) =
2562 IPA_CLIENT_WLAN1_PROD;
2563 } else {
2564 QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(rx_smmu) =
2565 IPA_CLIENT_WLAN1_PROD;
2566 }
2567
2568 qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_BASE(rx_smmu),
2569 &ipa_res->rx_rdy_ring.sgtable,
2570 sizeof(sgtable_t));
2571 QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_SIZE(rx_smmu) =
2572 qdf_mem_get_dma_size(soc->osdev,
2573 &ipa_res->rx_rdy_ring.mem_info);
2574 /* REO Tail Pointer Address */
2575 QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_DOORBELL_PA(rx_smmu) =
2576 soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr;
2577 dp_ipa_update_txr_db_status(soc->osdev->dev, rx_smmu);
2578
2579 qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_BASE(rx_smmu),
2580 &ipa_res->rx_refill_ring.sgtable,
2581 sizeof(sgtable_t));
2582 QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_SIZE(rx_smmu) =
2583 qdf_mem_get_dma_size(soc->osdev,
2584 &ipa_res->rx_refill_ring.mem_info);
2585
2586 /* FW Head Pointer Address */
2587 QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_DOORBELL_PA(rx_smmu) =
2588 soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr;
2589 QDF_IPA_WDI_SETUP_INFO_SMMU_IS_EVT_RN_DB_PCIE_ADDR(rx_smmu) = false;
2590
2591 QDF_IPA_WDI_SETUP_INFO_SMMU_PKT_OFFSET(rx_smmu) =
2592 soc->rx_pkt_tlv_size + L3_HEADER_PADDING;
2593 }
2594
2595 #ifdef IPA_WDI3_VLAN_SUPPORT
2596 /**
2597 * dp_ipa_wdi_rx_alt_pipe_smmu_params() - Setup 2nd rx pipe smmu params
2598 * @soc: data path soc handle
2599 * @ipa_res: ipa resource pointer
2600 * @rx_smmu: smmu pipe info handle
2601 * @over_gsi: flag for IPA offload over gsi
2602 * @hdl: ipa registered handle
2603 *
2604 * Return: none
2605 */
2606 static void
dp_ipa_wdi_rx_alt_pipe_smmu_params(struct dp_soc * soc,struct dp_ipa_resources * ipa_res,qdf_ipa_wdi_pipe_setup_info_smmu_t * rx_smmu,bool over_gsi,qdf_ipa_wdi_hdl_t hdl)2607 dp_ipa_wdi_rx_alt_pipe_smmu_params(struct dp_soc *soc,
2608 struct dp_ipa_resources *ipa_res,
2609 qdf_ipa_wdi_pipe_setup_info_smmu_t *rx_smmu,
2610 bool over_gsi,
2611 qdf_ipa_wdi_hdl_t hdl)
2612 {
2613 if (!wlan_ipa_is_vlan_enabled())
2614 return;
2615
2616 if (over_gsi) {
2617 if (hdl == DP_IPA_HDL_FIRST)
2618 QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(rx_smmu) =
2619 IPA_CLIENT_WLAN2_PROD1;
2620 else if (hdl == DP_IPA_HDL_SECOND)
2621 QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(rx_smmu) =
2622 IPA_CLIENT_WLAN3_PROD1;
2623 else if (hdl == DP_IPA_HDL_THIRD)
2624 QDF_IPA_WDI_SETUP_INFO_CLIENT(rx_smmu) =
2625 IPA_CLIENT_WLAN1_PROD1;
2626 } else {
2627 QDF_IPA_WDI_SETUP_INFO_SMMU_CLIENT(rx_smmu) =
2628 IPA_CLIENT_WLAN1_PROD;
2629 }
2630
2631 qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_BASE(rx_smmu),
2632 &ipa_res->rx_alt_rdy_ring.sgtable,
2633 sizeof(sgtable_t));
2634 QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_SIZE(rx_smmu) =
2635 qdf_mem_get_dma_size(soc->osdev,
2636 &ipa_res->rx_alt_rdy_ring.mem_info);
2637 /* REO Tail Pointer Address */
2638 QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_DOORBELL_PA(rx_smmu) =
2639 soc->ipa_uc_rx_rsc_alt.ipa_reo_tp_paddr;
2640 QDF_IPA_WDI_SETUP_INFO_SMMU_IS_TXR_RN_DB_PCIE_ADDR(rx_smmu) = true;
2641
2642 qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_BASE(rx_smmu),
2643 &ipa_res->rx_alt_refill_ring.sgtable,
2644 sizeof(sgtable_t));
2645 QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_SIZE(rx_smmu) =
2646 qdf_mem_get_dma_size(soc->osdev,
2647 &ipa_res->rx_alt_refill_ring.mem_info);
2648
2649 /* FW Head Pointer Address */
2650 QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_DOORBELL_PA(rx_smmu) =
2651 soc->ipa_uc_rx_rsc_alt.ipa_rx_refill_buf_hp_paddr;
2652 QDF_IPA_WDI_SETUP_INFO_SMMU_IS_EVT_RN_DB_PCIE_ADDR(rx_smmu) = false;
2653
2654 QDF_IPA_WDI_SETUP_INFO_SMMU_PKT_OFFSET(rx_smmu) =
2655 soc->rx_pkt_tlv_size + L3_HEADER_PADDING;
2656 }
2657
2658 /**
2659 * dp_ipa_wdi_rx_alt_pipe_params() - Setup 2nd rx pipe params
2660 * @soc: data path soc handle
2661 * @ipa_res: ipa resource pointer
2662 * @rx: pipe info handle
2663 * @over_gsi: flag for IPA offload over gsi
2664 * @hdl: ipa registered handle
2665 *
2666 * Return: none
2667 */
dp_ipa_wdi_rx_alt_pipe_params(struct dp_soc * soc,struct dp_ipa_resources * ipa_res,qdf_ipa_wdi_pipe_setup_info_t * rx,bool over_gsi,qdf_ipa_wdi_hdl_t hdl)2668 static void dp_ipa_wdi_rx_alt_pipe_params(struct dp_soc *soc,
2669 struct dp_ipa_resources *ipa_res,
2670 qdf_ipa_wdi_pipe_setup_info_t *rx,
2671 bool over_gsi,
2672 qdf_ipa_wdi_hdl_t hdl)
2673 {
2674 if (!wlan_ipa_is_vlan_enabled())
2675 return;
2676
2677 if (over_gsi) {
2678 if (hdl == DP_IPA_HDL_FIRST)
2679 QDF_IPA_WDI_SETUP_INFO_CLIENT(rx) =
2680 IPA_CLIENT_WLAN2_PROD1;
2681 else if (hdl == DP_IPA_HDL_SECOND)
2682 QDF_IPA_WDI_SETUP_INFO_CLIENT(rx) =
2683 IPA_CLIENT_WLAN3_PROD1;
2684 else if (hdl == DP_IPA_HDL_THIRD)
2685 QDF_IPA_WDI_SETUP_INFO_CLIENT(rx) =
2686 IPA_CLIENT_WLAN1_PROD1;
2687 } else {
2688 QDF_IPA_WDI_SETUP_INFO_CLIENT(rx) =
2689 IPA_CLIENT_WLAN1_PROD;
2690 }
2691
2692 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(rx) =
2693 qdf_mem_get_dma_addr(soc->osdev,
2694 &ipa_res->rx_alt_rdy_ring.mem_info);
2695 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(rx) =
2696 qdf_mem_get_dma_size(soc->osdev,
2697 &ipa_res->rx_alt_rdy_ring.mem_info);
2698
2699 /* REO Tail Pointer Address */
2700 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(rx) =
2701 soc->ipa_uc_rx_rsc_alt.ipa_reo_tp_paddr;
2702 QDF_IPA_WDI_SETUP_INFO_IS_TXR_RN_DB_PCIE_ADDR(rx) = true;
2703
2704 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(rx) =
2705 qdf_mem_get_dma_addr(soc->osdev,
2706 &ipa_res->rx_alt_refill_ring.mem_info);
2707 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(rx) =
2708 qdf_mem_get_dma_size(soc->osdev,
2709 &ipa_res->rx_alt_refill_ring.mem_info);
2710
2711 /* FW Head Pointer Address */
2712 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(rx) =
2713 soc->ipa_uc_rx_rsc_alt.ipa_rx_refill_buf_hp_paddr;
2714 QDF_IPA_WDI_SETUP_INFO_IS_EVT_RN_DB_PCIE_ADDR(rx) = false;
2715
2716 QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(rx) =
2717 soc->rx_pkt_tlv_size + L3_HEADER_PADDING;
2718 }
2719
2720 /**
2721 * dp_ipa_setup_rx_alt_pipe() - Setup 2nd rx pipe for IPA offload
2722 * @soc: data path soc handle
2723 * @res: ipa resource pointer
2724 * @in: pipe in handle
2725 * @over_gsi: flag for IPA offload over gsi
2726 * @hdl: ipa registered handle
2727 *
2728 * Return: none
2729 */
dp_ipa_setup_rx_alt_pipe(struct dp_soc * soc,struct dp_ipa_resources * res,qdf_ipa_wdi_conn_in_params_t * in,bool over_gsi,qdf_ipa_wdi_hdl_t hdl)2730 static void dp_ipa_setup_rx_alt_pipe(struct dp_soc *soc,
2731 struct dp_ipa_resources *res,
2732 qdf_ipa_wdi_conn_in_params_t *in,
2733 bool over_gsi,
2734 qdf_ipa_wdi_hdl_t hdl)
2735 {
2736 qdf_ipa_wdi_pipe_setup_info_smmu_t *rx_smmu = NULL;
2737 qdf_ipa_wdi_pipe_setup_info_t *rx = NULL;
2738 qdf_ipa_ep_cfg_t *rx_cfg;
2739
2740 if (!wlan_ipa_is_vlan_enabled())
2741 return;
2742
2743 QDF_IPA_WDI_CONN_IN_PARAMS_IS_RX1_USED(in) = true;
2744 if (qdf_mem_smmu_s1_enabled(soc->osdev)) {
2745 rx_smmu = &QDF_IPA_WDI_CONN_IN_PARAMS_RX_ALT_SMMU(in);
2746 rx_cfg = &QDF_IPA_WDI_SETUP_INFO_SMMU_EP_CFG(rx_smmu);
2747 dp_ipa_wdi_rx_alt_pipe_smmu_params(soc, res, rx_smmu,
2748 over_gsi, hdl);
2749 } else {
2750 rx = &QDF_IPA_WDI_CONN_IN_PARAMS_RX_ALT(in);
2751 rx_cfg = &QDF_IPA_WDI_SETUP_INFO_SMMU_EP_CFG(rx);
2752 dp_ipa_wdi_rx_alt_pipe_params(soc, res, rx, over_gsi, hdl);
2753 }
2754
2755 QDF_IPA_EP_CFG_NAT_EN(rx_cfg) = IPA_BYPASS_NAT;
2756 /* Update with wds len(96) + 4 if wds support is enabled */
2757 if (ucfg_ipa_is_wds_enabled())
2758 QDF_IPA_EP_CFG_HDR_LEN(rx_cfg) = DP_IPA_UC_WLAN_RX_HDR_LEN_AST_VLAN;
2759 else
2760 QDF_IPA_EP_CFG_HDR_LEN(rx_cfg) = DP_IPA_UC_WLAN_TX_VLAN_HDR_LEN;
2761 QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE_VALID(rx_cfg) = 1;
2762 QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE(rx_cfg) = 0;
2763 QDF_IPA_EP_CFG_HDR_ADDITIONAL_CONST_LEN(rx_cfg) = 0;
2764 QDF_IPA_EP_CFG_HDR_OFST_METADATA_VALID(rx_cfg) = 0;
2765 QDF_IPA_EP_CFG_HDR_METADATA_REG_VALID(rx_cfg) = 1;
2766 QDF_IPA_EP_CFG_MODE(rx_cfg) = IPA_BASIC;
2767 QDF_IPA_EP_CFG_HDR_LITTLE_ENDIAN(rx_cfg) = true;
2768 }
2769
2770 /**
2771 * dp_ipa_set_rx_alt_pipe_db() - Setup 2nd rx pipe doorbell
2772 * @res: ipa resource pointer
2773 * @out: pipe out handle
2774 *
2775 * Return: none
2776 */
dp_ipa_set_rx_alt_pipe_db(struct dp_ipa_resources * res,qdf_ipa_wdi_conn_out_params_t * out)2777 static void dp_ipa_set_rx_alt_pipe_db(struct dp_ipa_resources *res,
2778 qdf_ipa_wdi_conn_out_params_t *out)
2779 {
2780 if (!wlan_ipa_is_vlan_enabled())
2781 return;
2782
2783 res->rx_alt_ready_doorbell_paddr =
2784 QDF_IPA_WDI_CONN_OUT_PARAMS_RX_ALT_UC_DB_PA(out);
2785 dp_debug("Setting DB 0x%x for RX alt pipe",
2786 res->rx_alt_ready_doorbell_paddr);
2787 }
2788 #else
2789 static inline
dp_ipa_setup_rx_alt_pipe(struct dp_soc * soc,struct dp_ipa_resources * res,qdf_ipa_wdi_conn_in_params_t * in,bool over_gsi,qdf_ipa_wdi_hdl_t hdl)2790 void dp_ipa_setup_rx_alt_pipe(struct dp_soc *soc,
2791 struct dp_ipa_resources *res,
2792 qdf_ipa_wdi_conn_in_params_t *in,
2793 bool over_gsi,
2794 qdf_ipa_wdi_hdl_t hdl)
2795 { }
2796
2797 static inline
dp_ipa_set_rx_alt_pipe_db(struct dp_ipa_resources * res,qdf_ipa_wdi_conn_out_params_t * out)2798 void dp_ipa_set_rx_alt_pipe_db(struct dp_ipa_resources *res,
2799 qdf_ipa_wdi_conn_out_params_t *out)
2800 { }
2801 #endif
2802
dp_ipa_setup(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,void * ipa_i2w_cb,void * ipa_w2i_cb,void * ipa_wdi_meter_notifier_cb,uint32_t ipa_desc_size,void * ipa_priv,bool is_rm_enabled,uint32_t * tx_pipe_handle,uint32_t * rx_pipe_handle,bool is_smmu_enabled,qdf_ipa_sys_connect_params_t * sys_in,bool over_gsi,qdf_ipa_wdi_hdl_t hdl,qdf_ipa_wdi_hdl_t id,void * ipa_ast_notify_cb)2803 QDF_STATUS dp_ipa_setup(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
2804 void *ipa_i2w_cb, void *ipa_w2i_cb,
2805 void *ipa_wdi_meter_notifier_cb,
2806 uint32_t ipa_desc_size, void *ipa_priv,
2807 bool is_rm_enabled, uint32_t *tx_pipe_handle,
2808 uint32_t *rx_pipe_handle, bool is_smmu_enabled,
2809 qdf_ipa_sys_connect_params_t *sys_in, bool over_gsi,
2810 qdf_ipa_wdi_hdl_t hdl, qdf_ipa_wdi_hdl_t id,
2811 void *ipa_ast_notify_cb)
2812 {
2813 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
2814 struct dp_pdev *pdev =
2815 dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
2816 struct dp_ipa_resources *ipa_res;
2817 qdf_ipa_ep_cfg_t *tx_cfg;
2818 qdf_ipa_ep_cfg_t *rx_cfg;
2819 qdf_ipa_wdi_pipe_setup_info_t *tx = NULL;
2820 qdf_ipa_wdi_pipe_setup_info_t *rx = NULL;
2821 qdf_ipa_wdi_pipe_setup_info_smmu_t *tx_smmu;
2822 qdf_ipa_wdi_pipe_setup_info_smmu_t *rx_smmu = NULL;
2823 qdf_ipa_wdi_conn_in_params_t *pipe_in = NULL;
2824 qdf_ipa_wdi_conn_out_params_t pipe_out;
2825 int ret;
2826
2827 if (!pdev) {
2828 dp_err("Invalid instance");
2829 return QDF_STATUS_E_FAILURE;
2830 }
2831
2832 ipa_res = &pdev->ipa_resource;
2833 if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
2834 return QDF_STATUS_SUCCESS;
2835
2836 pipe_in = qdf_mem_malloc(sizeof(*pipe_in));
2837 if (!pipe_in)
2838 return QDF_STATUS_E_NOMEM;
2839
2840 qdf_mem_zero(&pipe_out, sizeof(pipe_out));
2841
2842 if (is_smmu_enabled)
2843 QDF_IPA_WDI_CONN_IN_PARAMS_SMMU_ENABLED(pipe_in) = true;
2844 else
2845 QDF_IPA_WDI_CONN_IN_PARAMS_SMMU_ENABLED(pipe_in) = false;
2846
2847 dp_setup_mcc_sys_pipes(sys_in, pipe_in);
2848
2849 /* TX PIPE */
2850 if (QDF_IPA_WDI_CONN_IN_PARAMS_SMMU_ENABLED(pipe_in)) {
2851 tx_smmu = &QDF_IPA_WDI_CONN_IN_PARAMS_TX_SMMU(pipe_in);
2852 tx_cfg = &QDF_IPA_WDI_SETUP_INFO_SMMU_EP_CFG(tx_smmu);
2853 } else {
2854 tx = &QDF_IPA_WDI_CONN_IN_PARAMS_TX(pipe_in);
2855 tx_cfg = &QDF_IPA_WDI_SETUP_INFO_EP_CFG(tx);
2856 }
2857
2858 QDF_IPA_EP_CFG_NAT_EN(tx_cfg) = IPA_BYPASS_NAT;
2859 QDF_IPA_EP_CFG_HDR_LEN(tx_cfg) = DP_IPA_UC_WLAN_TX_HDR_LEN;
2860 QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE_VALID(tx_cfg) = 0;
2861 QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE(tx_cfg) = 0;
2862 QDF_IPA_EP_CFG_HDR_ADDITIONAL_CONST_LEN(tx_cfg) = 0;
2863 QDF_IPA_EP_CFG_MODE(tx_cfg) = IPA_BASIC;
2864 QDF_IPA_EP_CFG_HDR_LITTLE_ENDIAN(tx_cfg) = true;
2865
2866 /*
2867 * Transfer Ring: WBM Ring
2868 * Transfer Ring Doorbell PA: WBM Tail Pointer Address
2869 * Event Ring: TCL ring
2870 * Event Ring Doorbell PA: TCL Head Pointer Address
2871 */
2872 if (is_smmu_enabled)
2873 dp_ipa_wdi_tx_smmu_params(soc, ipa_res, tx_smmu, over_gsi, id);
2874 else
2875 dp_ipa_wdi_tx_params(soc, ipa_res, tx, over_gsi);
2876
2877 dp_ipa_setup_tx_alt_pipe(soc, ipa_res, pipe_in);
2878
2879 /* RX PIPE */
2880 if (QDF_IPA_WDI_CONN_IN_PARAMS_SMMU_ENABLED(pipe_in)) {
2881 rx_smmu = &QDF_IPA_WDI_CONN_IN_PARAMS_RX_SMMU(pipe_in);
2882 rx_cfg = &QDF_IPA_WDI_SETUP_INFO_SMMU_EP_CFG(rx_smmu);
2883 } else {
2884 rx = &QDF_IPA_WDI_CONN_IN_PARAMS_RX(pipe_in);
2885 rx_cfg = &QDF_IPA_WDI_SETUP_INFO_EP_CFG(rx);
2886 }
2887
2888 QDF_IPA_EP_CFG_NAT_EN(rx_cfg) = IPA_BYPASS_NAT;
2889 if (ucfg_ipa_is_wds_enabled())
2890 QDF_IPA_EP_CFG_HDR_LEN(rx_cfg) = DP_IPA_UC_WLAN_RX_HDR_LEN_AST;
2891 else
2892 QDF_IPA_EP_CFG_HDR_LEN(rx_cfg) = DP_IPA_UC_WLAN_RX_HDR_LEN;
2893
2894 QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE_VALID(rx_cfg) = 1;
2895 QDF_IPA_EP_CFG_HDR_OFST_PKT_SIZE(rx_cfg) = 0;
2896 QDF_IPA_EP_CFG_HDR_ADDITIONAL_CONST_LEN(rx_cfg) = 0;
2897 QDF_IPA_EP_CFG_HDR_OFST_METADATA_VALID(rx_cfg) = 0;
2898 QDF_IPA_EP_CFG_HDR_METADATA_REG_VALID(rx_cfg) = 1;
2899 QDF_IPA_EP_CFG_MODE(rx_cfg) = IPA_BASIC;
2900 QDF_IPA_EP_CFG_HDR_LITTLE_ENDIAN(rx_cfg) = true;
2901
2902 /*
2903 * Transfer Ring: REO Ring
2904 * Transfer Ring Doorbell PA: REO Tail Pointer Address
2905 * Event Ring: FW ring
2906 * Event Ring Doorbell PA: FW Head Pointer Address
2907 */
2908 if (is_smmu_enabled)
2909 dp_ipa_wdi_rx_smmu_params(soc, ipa_res, rx_smmu, over_gsi, id);
2910 else
2911 dp_ipa_wdi_rx_params(soc, ipa_res, rx, over_gsi);
2912
2913 /* setup 2nd rx pipe */
2914 dp_ipa_setup_rx_alt_pipe(soc, ipa_res, pipe_in, over_gsi, id);
2915
2916 QDF_IPA_WDI_CONN_IN_PARAMS_NOTIFY(pipe_in) = ipa_w2i_cb;
2917 QDF_IPA_WDI_CONN_IN_PARAMS_PRIV(pipe_in) = ipa_priv;
2918 QDF_IPA_WDI_CONN_IN_PARAMS_HANDLE(pipe_in) = hdl;
2919 dp_ipa_ast_notify_cb(pipe_in, ipa_ast_notify_cb);
2920
2921 /* Connect WDI IPA PIPEs */
2922 ret = qdf_ipa_wdi_conn_pipes(pipe_in, &pipe_out);
2923
2924 if (ret) {
2925 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
2926 "%s: ipa_wdi_conn_pipes: IPA pipe setup failed: ret=%d",
2927 __func__, ret);
2928 qdf_mem_free(pipe_in);
2929 return QDF_STATUS_E_FAILURE;
2930 }
2931
2932 /* IPA uC Doorbell registers */
2933 dp_info("Tx DB PA=0x%x, Rx DB PA=0x%x",
2934 (unsigned int)QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(&pipe_out),
2935 (unsigned int)QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(&pipe_out));
2936
2937 dp_ipa_set_pipe_db(ipa_res, &pipe_out);
2938 dp_ipa_set_rx_alt_pipe_db(ipa_res, &pipe_out);
2939
2940 ipa_res->is_db_ddr_mapped =
2941 QDF_IPA_WDI_CONN_OUT_PARAMS_IS_DB_DDR_MAPPED(&pipe_out);
2942
2943 soc->ipa_first_tx_db_access = true;
2944 qdf_mem_free(pipe_in);
2945
2946 qdf_spinlock_create(&soc->ipa_rx_buf_map_lock);
2947 soc->ipa_rx_buf_map_lock_initialized = true;
2948
2949 return QDF_STATUS_SUCCESS;
2950 }
2951
2952 #ifdef IPA_WDI3_VLAN_SUPPORT
2953 /**
2954 * dp_ipa_set_rx1_used() - Set rx1 used flag for 2nd rx offload ring
2955 * @in: pipe in handle
2956 *
2957 * Return: none
2958 */
2959 static inline
dp_ipa_set_rx1_used(qdf_ipa_wdi_reg_intf_in_params_t * in)2960 void dp_ipa_set_rx1_used(qdf_ipa_wdi_reg_intf_in_params_t *in)
2961 {
2962 QDF_IPA_WDI_REG_INTF_IN_PARAMS_IS_RX1_USED(in) = true;
2963 }
2964
2965 /**
2966 * dp_ipa_set_v4_vlan_hdr() - Set v4 vlan hdr
2967 * @in: pipe in handle
2968 * @hdr: pointer to hdr
2969 *
2970 * Return: none
2971 */
2972 static inline
dp_ipa_set_v4_vlan_hdr(qdf_ipa_wdi_reg_intf_in_params_t * in,qdf_ipa_wdi_hdr_info_t * hdr)2973 void dp_ipa_set_v4_vlan_hdr(qdf_ipa_wdi_reg_intf_in_params_t *in,
2974 qdf_ipa_wdi_hdr_info_t *hdr)
2975 {
2976 qdf_mem_copy(&(QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(in)[IPA_IP_v4_VLAN]),
2977 hdr, sizeof(qdf_ipa_wdi_hdr_info_t));
2978 }
2979
2980 /**
2981 * dp_ipa_set_v6_vlan_hdr() - Set v6 vlan hdr
2982 * @in: pipe in handle
2983 * @hdr: pointer to hdr
2984 *
2985 * Return: none
2986 */
2987 static inline
dp_ipa_set_v6_vlan_hdr(qdf_ipa_wdi_reg_intf_in_params_t * in,qdf_ipa_wdi_hdr_info_t * hdr)2988 void dp_ipa_set_v6_vlan_hdr(qdf_ipa_wdi_reg_intf_in_params_t *in,
2989 qdf_ipa_wdi_hdr_info_t *hdr)
2990 {
2991 qdf_mem_copy(&(QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(in)[IPA_IP_v6_VLAN]),
2992 hdr, sizeof(qdf_ipa_wdi_hdr_info_t));
2993 }
2994 #else
2995 static inline
dp_ipa_set_rx1_used(qdf_ipa_wdi_reg_intf_in_params_t * in)2996 void dp_ipa_set_rx1_used(qdf_ipa_wdi_reg_intf_in_params_t *in)
2997 { }
2998
2999 static inline
dp_ipa_set_v4_vlan_hdr(qdf_ipa_wdi_reg_intf_in_params_t * in,qdf_ipa_wdi_hdr_info_t * hdr)3000 void dp_ipa_set_v4_vlan_hdr(qdf_ipa_wdi_reg_intf_in_params_t *in,
3001 qdf_ipa_wdi_hdr_info_t *hdr)
3002 { }
3003
3004 static inline
dp_ipa_set_v6_vlan_hdr(qdf_ipa_wdi_reg_intf_in_params_t * in,qdf_ipa_wdi_hdr_info_t * hdr)3005 void dp_ipa_set_v6_vlan_hdr(qdf_ipa_wdi_reg_intf_in_params_t *in,
3006 qdf_ipa_wdi_hdr_info_t *hdr)
3007 { }
3008 #endif
3009
3010 #ifdef IPA_WDS_EASYMESH_FEATURE
3011 /**
3012 * dp_ipa_set_wdi_hdr_type() - Set wdi hdr type for IPA
3013 * @hdr_info: Header info
3014 *
3015 * Return: None
3016 */
3017 static inline void
dp_ipa_set_wdi_hdr_type(qdf_ipa_wdi_hdr_info_t * hdr_info)3018 dp_ipa_set_wdi_hdr_type(qdf_ipa_wdi_hdr_info_t *hdr_info)
3019 {
3020 if (ucfg_ipa_is_wds_enabled())
3021 QDF_IPA_WDI_HDR_INFO_HDR_TYPE(hdr_info) =
3022 IPA_HDR_L2_ETHERNET_II_AST;
3023 else
3024 QDF_IPA_WDI_HDR_INFO_HDR_TYPE(hdr_info) =
3025 IPA_HDR_L2_ETHERNET_II;
3026 }
3027
3028 /**
3029 * dp_ipa_setup_meta_data_mask() - Pass meta data mask to IPA
3030 * @in: ipa in params
3031 *
3032 * Pass meta data mask to IPA.
3033 *
3034 * Return: none
3035 */
dp_ipa_setup_meta_data_mask(qdf_ipa_wdi_reg_intf_in_params_t * in)3036 static void dp_ipa_setup_meta_data_mask(qdf_ipa_wdi_reg_intf_in_params_t *in)
3037 {
3038 if (ucfg_ipa_is_wds_enabled())
3039 QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA_MASK(in) = WLAN_IPA_AST_META_DATA_MASK;
3040 else
3041 QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA_MASK(in) = WLAN_IPA_META_DATA_MASK;
3042 }
3043 #else
3044 static inline void
dp_ipa_set_wdi_hdr_type(qdf_ipa_wdi_hdr_info_t * hdr_info)3045 dp_ipa_set_wdi_hdr_type(qdf_ipa_wdi_hdr_info_t *hdr_info)
3046 {
3047 QDF_IPA_WDI_HDR_INFO_HDR_TYPE(hdr_info) = IPA_HDR_L2_ETHERNET_II;
3048 }
3049
dp_ipa_setup_meta_data_mask(qdf_ipa_wdi_reg_intf_in_params_t * in)3050 static void dp_ipa_setup_meta_data_mask(qdf_ipa_wdi_reg_intf_in_params_t *in)
3051 {
3052 QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA_MASK(in) = WLAN_IPA_META_DATA_MASK;
3053 }
3054 #endif
3055
3056 #ifdef IPA_WDI3_VLAN_SUPPORT
3057 /**
3058 * dp_ipa_set_wdi_vlan_hdr_type() - Set wdi vlan hdr type for IPA
3059 * @hdr_info: Header info
3060 *
3061 * Return: None
3062 */
3063 static inline void
dp_ipa_set_wdi_vlan_hdr_type(qdf_ipa_wdi_hdr_info_t * hdr_info)3064 dp_ipa_set_wdi_vlan_hdr_type(qdf_ipa_wdi_hdr_info_t *hdr_info)
3065 {
3066 if (ucfg_ipa_is_wds_enabled())
3067 QDF_IPA_WDI_HDR_INFO_HDR_TYPE(hdr_info) =
3068 IPA_HDR_L2_802_1Q_AST;
3069 else
3070 QDF_IPA_WDI_HDR_INFO_HDR_TYPE(hdr_info) =
3071 IPA_HDR_L2_802_1Q;
3072 }
3073 #else
3074 static inline void
dp_ipa_set_wdi_vlan_hdr_type(qdf_ipa_wdi_hdr_info_t * hdr_info)3075 dp_ipa_set_wdi_vlan_hdr_type(qdf_ipa_wdi_hdr_info_t *hdr_info)
3076 { }
3077 #endif
3078
dp_ipa_setup_iface(char * ifname,uint8_t * mac_addr,qdf_ipa_client_type_t prod_client,qdf_ipa_client_type_t cons_client,uint8_t session_id,bool is_ipv6_enabled,qdf_ipa_wdi_hdl_t hdl)3079 QDF_STATUS dp_ipa_setup_iface(char *ifname, uint8_t *mac_addr,
3080 qdf_ipa_client_type_t prod_client,
3081 qdf_ipa_client_type_t cons_client,
3082 uint8_t session_id, bool is_ipv6_enabled,
3083 qdf_ipa_wdi_hdl_t hdl)
3084 {
3085 qdf_ipa_wdi_reg_intf_in_params_t in;
3086 qdf_ipa_wdi_hdr_info_t hdr_info;
3087 struct dp_ipa_uc_tx_hdr uc_tx_hdr;
3088 struct dp_ipa_uc_tx_hdr uc_tx_hdr_v6;
3089 struct dp_ipa_uc_tx_vlan_hdr uc_tx_vlan_hdr;
3090 struct dp_ipa_uc_tx_vlan_hdr uc_tx_vlan_hdr_v6;
3091 int ret = -EINVAL;
3092
3093 qdf_mem_zero(&in, sizeof(qdf_ipa_wdi_reg_intf_in_params_t));
3094
3095 /* Need to reset the values to 0 as all the fields are not
3096 * updated in the Header, Unused fields will be set to 0.
3097 */
3098 qdf_mem_zero(&uc_tx_vlan_hdr, sizeof(struct dp_ipa_uc_tx_vlan_hdr));
3099 qdf_mem_zero(&uc_tx_vlan_hdr_v6, sizeof(struct dp_ipa_uc_tx_vlan_hdr));
3100
3101 dp_debug("Add Partial hdr: %s, "QDF_MAC_ADDR_FMT, ifname,
3102 QDF_MAC_ADDR_REF(mac_addr));
3103 qdf_mem_zero(&hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t));
3104 qdf_ether_addr_copy(uc_tx_hdr.eth.h_source, mac_addr);
3105
3106 /* IPV4 header */
3107 uc_tx_hdr.eth.h_proto = qdf_htons(ETH_P_IP);
3108
3109 QDF_IPA_WDI_HDR_INFO_HDR(&hdr_info) = (uint8_t *)&uc_tx_hdr;
3110 QDF_IPA_WDI_HDR_INFO_HDR_LEN(&hdr_info) = DP_IPA_UC_WLAN_TX_HDR_LEN;
3111 dp_ipa_set_wdi_hdr_type(&hdr_info);
3112
3113 QDF_IPA_WDI_HDR_INFO_DST_MAC_ADDR_OFFSET(&hdr_info) =
3114 DP_IPA_UC_WLAN_HDR_DES_MAC_OFFSET;
3115
3116 QDF_IPA_WDI_REG_INTF_IN_PARAMS_NETDEV_NAME(&in) = ifname;
3117 qdf_mem_copy(&(QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(&in)[IPA_IP_v4]),
3118 &hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t));
3119 QDF_IPA_WDI_REG_INTF_IN_PARAMS_ALT_DST_PIPE(&in) = cons_client;
3120 QDF_IPA_WDI_REG_INTF_IN_PARAMS_IS_META_DATA_VALID(&in) = 1;
3121 dp_ipa_setup_meta_data_mask(&in);
3122 QDF_IPA_WDI_REG_INTF_IN_PARAMS_HANDLE(&in) = hdl;
3123 dp_ipa_setup_iface_session_id(&in, session_id);
3124 dp_debug("registering for session_id: %u", session_id);
3125
3126 /* IPV6 header */
3127 if (is_ipv6_enabled) {
3128 qdf_mem_copy(&uc_tx_hdr_v6, &uc_tx_hdr,
3129 DP_IPA_UC_WLAN_TX_HDR_LEN);
3130 uc_tx_hdr_v6.eth.h_proto = qdf_htons(ETH_P_IPV6);
3131 QDF_IPA_WDI_HDR_INFO_HDR(&hdr_info) = (uint8_t *)&uc_tx_hdr_v6;
3132 qdf_mem_copy(&(QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(&in)[IPA_IP_v6]),
3133 &hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t));
3134 }
3135
3136 if (wlan_ipa_is_vlan_enabled()) {
3137 /* Add vlan specific headers if vlan supporti is enabled */
3138 qdf_mem_zero(&hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t));
3139 dp_ipa_set_rx1_used(&in);
3140 qdf_ether_addr_copy(uc_tx_vlan_hdr.eth.h_source, mac_addr);
3141 /* IPV4 Vlan header */
3142 uc_tx_vlan_hdr.eth.h_vlan_proto = qdf_htons(ETH_P_8021Q);
3143 uc_tx_vlan_hdr.eth.h_vlan_encapsulated_proto = qdf_htons(ETH_P_IP);
3144
3145 QDF_IPA_WDI_HDR_INFO_HDR(&hdr_info) =
3146 (uint8_t *)&uc_tx_vlan_hdr;
3147 QDF_IPA_WDI_HDR_INFO_HDR_LEN(&hdr_info) =
3148 DP_IPA_UC_WLAN_TX_VLAN_HDR_LEN;
3149 dp_ipa_set_wdi_vlan_hdr_type(&hdr_info);
3150
3151 QDF_IPA_WDI_HDR_INFO_DST_MAC_ADDR_OFFSET(&hdr_info) =
3152 DP_IPA_UC_WLAN_HDR_DES_MAC_OFFSET;
3153
3154 dp_ipa_set_v4_vlan_hdr(&in, &hdr_info);
3155
3156 /* IPV6 Vlan header */
3157 if (is_ipv6_enabled) {
3158 qdf_mem_copy(&uc_tx_vlan_hdr_v6, &uc_tx_vlan_hdr,
3159 DP_IPA_UC_WLAN_TX_VLAN_HDR_LEN);
3160 uc_tx_vlan_hdr_v6.eth.h_vlan_proto =
3161 qdf_htons(ETH_P_8021Q);
3162 uc_tx_vlan_hdr_v6.eth.h_vlan_encapsulated_proto =
3163 qdf_htons(ETH_P_IPV6);
3164 QDF_IPA_WDI_HDR_INFO_HDR(&hdr_info) =
3165 (uint8_t *)&uc_tx_vlan_hdr_v6;
3166 dp_ipa_set_v6_vlan_hdr(&in, &hdr_info);
3167 }
3168 }
3169
3170 ret = qdf_ipa_wdi_reg_intf(&in);
3171 if (ret) {
3172 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3173 "%s: ipa_wdi_reg_intf: register IPA interface failed: ret=%d",
3174 __func__, ret);
3175 return QDF_STATUS_E_FAILURE;
3176 }
3177
3178 return QDF_STATUS_SUCCESS;
3179 }
3180
3181 #else /* !CONFIG_IPA_WDI_UNIFIED_API */
dp_ipa_setup(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,void * ipa_i2w_cb,void * ipa_w2i_cb,void * ipa_wdi_meter_notifier_cb,uint32_t ipa_desc_size,void * ipa_priv,bool is_rm_enabled,uint32_t * tx_pipe_handle,uint32_t * rx_pipe_handle)3182 QDF_STATUS dp_ipa_setup(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
3183 void *ipa_i2w_cb, void *ipa_w2i_cb,
3184 void *ipa_wdi_meter_notifier_cb,
3185 uint32_t ipa_desc_size, void *ipa_priv,
3186 bool is_rm_enabled, uint32_t *tx_pipe_handle,
3187 uint32_t *rx_pipe_handle)
3188 {
3189 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3190 struct dp_pdev *pdev =
3191 dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
3192 struct dp_ipa_resources *ipa_res;
3193 qdf_ipa_wdi_pipe_setup_info_t *tx;
3194 qdf_ipa_wdi_pipe_setup_info_t *rx;
3195 qdf_ipa_wdi_conn_in_params_t pipe_in;
3196 qdf_ipa_wdi_conn_out_params_t pipe_out;
3197 struct tcl_data_cmd *tcl_desc_ptr;
3198 uint8_t *desc_addr;
3199 uint32_t desc_size;
3200 int ret;
3201
3202 if (!pdev) {
3203 dp_err("Invalid instance");
3204 return QDF_STATUS_E_FAILURE;
3205 }
3206
3207 ipa_res = &pdev->ipa_resource;
3208 if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
3209 return QDF_STATUS_SUCCESS;
3210
3211 qdf_mem_zero(&tx, sizeof(qdf_ipa_wdi_pipe_setup_info_t));
3212 qdf_mem_zero(&rx, sizeof(qdf_ipa_wdi_pipe_setup_info_t));
3213 qdf_mem_zero(&pipe_in, sizeof(pipe_in));
3214 qdf_mem_zero(&pipe_out, sizeof(pipe_out));
3215
3216 /* TX PIPE */
3217 /*
3218 * Transfer Ring: WBM Ring
3219 * Transfer Ring Doorbell PA: WBM Tail Pointer Address
3220 * Event Ring: TCL ring
3221 * Event Ring Doorbell PA: TCL Head Pointer Address
3222 */
3223 tx = &QDF_IPA_WDI_CONN_IN_PARAMS_TX(&pipe_in);
3224 QDF_IPA_WDI_SETUP_INFO_NAT_EN(tx) = IPA_BYPASS_NAT;
3225 QDF_IPA_WDI_SETUP_INFO_HDR_LEN(tx) = DP_IPA_UC_WLAN_TX_HDR_LEN;
3226 QDF_IPA_WDI_SETUP_INFO_HDR_OFST_PKT_SIZE_VALID(tx) = 0;
3227 QDF_IPA_WDI_SETUP_INFO_HDR_OFST_PKT_SIZE(tx) = 0;
3228 QDF_IPA_WDI_SETUP_INFO_HDR_ADDITIONAL_CONST_LEN(tx) = 0;
3229 QDF_IPA_WDI_SETUP_INFO_MODE(tx) = IPA_BASIC;
3230 QDF_IPA_WDI_SETUP_INFO_HDR_LITTLE_ENDIAN(tx) = true;
3231 QDF_IPA_WDI_SETUP_INFO_CLIENT(tx) = IPA_CLIENT_WLAN1_CONS;
3232 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(tx) =
3233 ipa_res->tx_comp_ring_base_paddr;
3234 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(tx) =
3235 ipa_res->tx_comp_ring_size;
3236 /* WBM Tail Pointer Address */
3237 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(tx) =
3238 soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr;
3239 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(tx) =
3240 ipa_res->tx_ring_base_paddr;
3241 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(tx) = ipa_res->tx_ring_size;
3242 /* TCL Head Pointer Address */
3243 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(tx) =
3244 soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr;
3245 QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(tx) =
3246 ipa_res->tx_num_alloc_buffer;
3247 QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(tx) = 0;
3248
3249 /* Preprogram TCL descriptor */
3250 desc_addr =
3251 (uint8_t *)QDF_IPA_WDI_SETUP_INFO_DESC_FORMAT_TEMPLATE(tx);
3252 desc_size = sizeof(struct tcl_data_cmd);
3253 HAL_TX_DESC_SET_TLV_HDR(desc_addr, HAL_TX_TCL_DATA_TAG, desc_size);
3254 tcl_desc_ptr = (struct tcl_data_cmd *)
3255 (QDF_IPA_WDI_SETUP_INFO_DESC_FORMAT_TEMPLATE(tx) + 1);
3256 tcl_desc_ptr->buf_addr_info.return_buffer_manager =
3257 HAL_RX_BUF_RBM_SW2_BM;
3258 tcl_desc_ptr->addrx_en = 1; /* Address X search enable in ASE */
3259 tcl_desc_ptr->encap_type = HAL_TX_ENCAP_TYPE_ETHERNET;
3260 tcl_desc_ptr->packet_offset = 2; /* padding for alignment */
3261
3262 /* RX PIPE */
3263 /*
3264 * Transfer Ring: REO Ring
3265 * Transfer Ring Doorbell PA: REO Tail Pointer Address
3266 * Event Ring: FW ring
3267 * Event Ring Doorbell PA: FW Head Pointer Address
3268 */
3269 rx = &QDF_IPA_WDI_CONN_IN_PARAMS_RX(&pipe_in);
3270 QDF_IPA_WDI_SETUP_INFO_NAT_EN(rx) = IPA_BYPASS_NAT;
3271 QDF_IPA_WDI_SETUP_INFO_HDR_LEN(rx) = DP_IPA_UC_WLAN_RX_HDR_LEN;
3272 QDF_IPA_WDI_SETUP_INFO_HDR_OFST_PKT_SIZE_VALID(rx) = 0;
3273 QDF_IPA_WDI_SETUP_INFO_HDR_OFST_PKT_SIZE(rx) = 0;
3274 QDF_IPA_WDI_SETUP_INFO_HDR_ADDITIONAL_CONST_LEN(rx) = 0;
3275 QDF_IPA_WDI_SETUP_INFO_HDR_OFST_METADATA_VALID(rx) = 0;
3276 QDF_IPA_WDI_SETUP_INFO_HDR_METADATA_REG_VALID(rx) = 1;
3277 QDF_IPA_WDI_SETUP_INFO_MODE(rx) = IPA_BASIC;
3278 QDF_IPA_WDI_SETUP_INFO_HDR_LITTLE_ENDIAN(rx) = true;
3279 QDF_IPA_WDI_SETUP_INFO_CLIENT(rx) = IPA_CLIENT_WLAN1_PROD;
3280 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(rx) =
3281 ipa_res->rx_rdy_ring_base_paddr;
3282 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(rx) =
3283 ipa_res->rx_rdy_ring_size;
3284 /* REO Tail Pointer Address */
3285 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(rx) =
3286 soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr;
3287 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(rx) =
3288 ipa_res->rx_refill_ring_base_paddr;
3289 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(rx) =
3290 ipa_res->rx_refill_ring_size;
3291 /* FW Head Pointer Address */
3292 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(rx) =
3293 soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr;
3294 QDF_IPA_WDI_SETUP_INFO_PKT_OFFSET(rx) = soc->rx_pkt_tlv_size +
3295 L3_HEADER_PADDING;
3296 QDF_IPA_WDI_CONN_IN_PARAMS_NOTIFY(&pipe_in) = ipa_w2i_cb;
3297 QDF_IPA_WDI_CONN_IN_PARAMS_PRIV(&pipe_in) = ipa_priv;
3298
3299 /* Connect WDI IPA PIPE */
3300 ret = qdf_ipa_wdi_conn_pipes(&pipe_in, &pipe_out);
3301 if (ret) {
3302 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3303 "%s: ipa_wdi_conn_pipes: IPA pipe setup failed: ret=%d",
3304 __func__, ret);
3305 return QDF_STATUS_E_FAILURE;
3306 }
3307
3308 /* IPA uC Doorbell registers */
3309 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
3310 "%s: Tx DB PA=0x%x, Rx DB PA=0x%x",
3311 __func__,
3312 (unsigned int)QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(&pipe_out),
3313 (unsigned int)QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(&pipe_out));
3314
3315 ipa_res->tx_comp_doorbell_paddr =
3316 QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_PA(&pipe_out);
3317 ipa_res->tx_comp_doorbell_vaddr =
3318 QDF_IPA_WDI_CONN_OUT_PARAMS_TX_UC_DB_VA(&pipe_out);
3319 ipa_res->rx_ready_doorbell_paddr =
3320 QDF_IPA_WDI_CONN_OUT_PARAMS_RX_UC_DB_PA(&pipe_out);
3321
3322 soc->ipa_first_tx_db_access = true;
3323
3324 qdf_spinlock_create(&soc->ipa_rx_buf_map_lock);
3325 soc->ipa_rx_buf_map_lock_initialized = true;
3326
3327 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
3328 "%s: Tx: %s=%pK, %s=%d, %s=%pK, %s=%pK, %s=%d, %s=%pK, %s=%d, %s=%pK",
3329 __func__,
3330 "transfer_ring_base_pa",
3331 (void *)QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(tx),
3332 "transfer_ring_size",
3333 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(tx),
3334 "transfer_ring_doorbell_pa",
3335 (void *)QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(tx),
3336 "event_ring_base_pa",
3337 (void *)QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(tx),
3338 "event_ring_size",
3339 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(tx),
3340 "event_ring_doorbell_pa",
3341 (void *)QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(tx),
3342 "num_pkt_buffers",
3343 QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(tx),
3344 "tx_comp_doorbell_paddr",
3345 (void *)ipa_res->tx_comp_doorbell_paddr);
3346
3347 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
3348 "%s: Rx: %s=%pK, %s=%d, %s=%pK, %s=%pK, %s=%d, %s=%pK, %s=%d, %s=%pK",
3349 __func__,
3350 "transfer_ring_base_pa",
3351 (void *)QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_BASE_PA(rx),
3352 "transfer_ring_size",
3353 QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_SIZE(rx),
3354 "transfer_ring_doorbell_pa",
3355 (void *)QDF_IPA_WDI_SETUP_INFO_TRANSFER_RING_DOORBELL_PA(rx),
3356 "event_ring_base_pa",
3357 (void *)QDF_IPA_WDI_SETUP_INFO_EVENT_RING_BASE_PA(rx),
3358 "event_ring_size",
3359 QDF_IPA_WDI_SETUP_INFO_EVENT_RING_SIZE(rx),
3360 "event_ring_doorbell_pa",
3361 (void *)QDF_IPA_WDI_SETUP_INFO_EVENT_RING_DOORBELL_PA(rx),
3362 "num_pkt_buffers",
3363 QDF_IPA_WDI_SETUP_INFO_NUM_PKT_BUFFERS(rx),
3364 "tx_comp_doorbell_paddr",
3365 (void *)ipa_res->rx_ready_doorbell_paddr);
3366
3367 return QDF_STATUS_SUCCESS;
3368 }
3369
dp_ipa_setup_iface(char * ifname,uint8_t * mac_addr,qdf_ipa_client_type_t prod_client,qdf_ipa_client_type_t cons_client,uint8_t session_id,bool is_ipv6_enabled,qdf_ipa_wdi_hdl_t hdl)3370 QDF_STATUS dp_ipa_setup_iface(char *ifname, uint8_t *mac_addr,
3371 qdf_ipa_client_type_t prod_client,
3372 qdf_ipa_client_type_t cons_client,
3373 uint8_t session_id, bool is_ipv6_enabled,
3374 qdf_ipa_wdi_hdl_t hdl)
3375 {
3376 qdf_ipa_wdi_reg_intf_in_params_t in;
3377 qdf_ipa_wdi_hdr_info_t hdr_info;
3378 struct dp_ipa_uc_tx_hdr uc_tx_hdr;
3379 struct dp_ipa_uc_tx_hdr uc_tx_hdr_v6;
3380 int ret = -EINVAL;
3381
3382 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
3383 "%s: Add Partial hdr: %s, "QDF_MAC_ADDR_FMT,
3384 __func__, ifname, QDF_MAC_ADDR_REF(mac_addr));
3385
3386 qdf_mem_zero(&hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t));
3387 qdf_ether_addr_copy(uc_tx_hdr.eth.h_source, mac_addr);
3388
3389 /* IPV4 header */
3390 uc_tx_hdr.eth.h_proto = qdf_htons(ETH_P_IP);
3391
3392 QDF_IPA_WDI_HDR_INFO_HDR(&hdr_info) = (uint8_t *)&uc_tx_hdr;
3393 QDF_IPA_WDI_HDR_INFO_HDR_LEN(&hdr_info) = DP_IPA_UC_WLAN_TX_HDR_LEN;
3394 QDF_IPA_WDI_HDR_INFO_HDR_TYPE(&hdr_info) = IPA_HDR_L2_ETHERNET_II;
3395 QDF_IPA_WDI_HDR_INFO_DST_MAC_ADDR_OFFSET(&hdr_info) =
3396 DP_IPA_UC_WLAN_HDR_DES_MAC_OFFSET;
3397
3398 QDF_IPA_WDI_REG_INTF_IN_PARAMS_NETDEV_NAME(&in) = ifname;
3399 qdf_mem_copy(&(QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(&in)[IPA_IP_v4]),
3400 &hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t));
3401 QDF_IPA_WDI_REG_INTF_IN_PARAMS_IS_META_DATA_VALID(&in) = 1;
3402 QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA(&in) =
3403 htonl(session_id << 16);
3404 QDF_IPA_WDI_REG_INTF_IN_PARAMS_META_DATA_MASK(&in) = htonl(0x00FF0000);
3405
3406 /* IPV6 header */
3407 if (is_ipv6_enabled) {
3408 qdf_mem_copy(&uc_tx_hdr_v6, &uc_tx_hdr,
3409 DP_IPA_UC_WLAN_TX_HDR_LEN);
3410 uc_tx_hdr_v6.eth.h_proto = qdf_htons(ETH_P_IPV6);
3411 QDF_IPA_WDI_HDR_INFO_HDR(&hdr_info) = (uint8_t *)&uc_tx_hdr_v6;
3412 qdf_mem_copy(&(QDF_IPA_WDI_REG_INTF_IN_PARAMS_HDR_INFO(&in)[IPA_IP_v6]),
3413 &hdr_info, sizeof(qdf_ipa_wdi_hdr_info_t));
3414 }
3415
3416 ret = qdf_ipa_wdi_reg_intf(&in);
3417 if (ret) {
3418 dp_err("ipa_wdi_reg_intf: register IPA interface failed: ret=%d",
3419 ret);
3420 return QDF_STATUS_E_FAILURE;
3421 }
3422
3423 return QDF_STATUS_SUCCESS;
3424 }
3425
3426 #endif /* CONFIG_IPA_WDI_UNIFIED_API */
3427
dp_ipa_cleanup(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,uint32_t tx_pipe_handle,uint32_t rx_pipe_handle,qdf_ipa_wdi_hdl_t hdl)3428 QDF_STATUS dp_ipa_cleanup(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
3429 uint32_t tx_pipe_handle, uint32_t rx_pipe_handle,
3430 qdf_ipa_wdi_hdl_t hdl)
3431 {
3432 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3433 QDF_STATUS status = QDF_STATUS_SUCCESS;
3434 struct dp_pdev *pdev;
3435 int ret;
3436
3437 ret = qdf_ipa_wdi_disconn_pipes(hdl);
3438 if (ret) {
3439 dp_err("ipa_wdi_disconn_pipes: IPA pipe cleanup failed: ret=%d",
3440 ret);
3441 status = QDF_STATUS_E_FAILURE;
3442 }
3443
3444 if (soc->ipa_rx_buf_map_lock_initialized) {
3445 qdf_spinlock_destroy(&soc->ipa_rx_buf_map_lock);
3446 soc->ipa_rx_buf_map_lock_initialized = false;
3447 }
3448
3449 pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
3450 if (qdf_unlikely(!pdev)) {
3451 dp_err_rl("Invalid pdev for pdev_id %d", pdev_id);
3452 status = QDF_STATUS_E_FAILURE;
3453 goto exit;
3454 }
3455
3456 dp_ipa_unmap_ring_doorbell_paddr(pdev);
3457 dp_ipa_unmap_rx_alt_ring_doorbell_paddr(pdev);
3458 exit:
3459 return status;
3460 }
3461
dp_ipa_cleanup_iface(char * ifname,bool is_ipv6_enabled,qdf_ipa_wdi_hdl_t hdl)3462 QDF_STATUS dp_ipa_cleanup_iface(char *ifname, bool is_ipv6_enabled,
3463 qdf_ipa_wdi_hdl_t hdl)
3464 {
3465 int ret;
3466
3467 ret = qdf_ipa_wdi_dereg_intf(ifname, hdl);
3468 if (ret) {
3469 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3470 "%s: ipa_wdi_dereg_intf: IPA pipe deregistration failed: ret=%d",
3471 __func__, ret);
3472 return QDF_STATUS_E_FAILURE;
3473 }
3474
3475 return QDF_STATUS_SUCCESS;
3476 }
3477
3478 #ifdef IPA_SET_RESET_TX_DB_PA
3479 #define DP_IPA_EP_SET_TX_DB_PA(soc, ipa_res) \
3480 dp_ipa_set_tx_doorbell_paddr((soc), (ipa_res))
3481 #define DP_IPA_RESET_TX_DB_PA(soc, ipa_res) \
3482 dp_ipa_reset_tx_doorbell_pa((soc), (ipa_res))
3483 #else
3484 #define DP_IPA_EP_SET_TX_DB_PA(soc, ipa_res)
3485 #define DP_IPA_RESET_TX_DB_PA(soc, ipa_res)
3486 #endif
3487
dp_ipa_enable_pipes(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,qdf_ipa_wdi_hdl_t hdl)3488 QDF_STATUS dp_ipa_enable_pipes(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
3489 qdf_ipa_wdi_hdl_t hdl)
3490 {
3491 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3492 struct dp_pdev *pdev =
3493 dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
3494 struct dp_ipa_resources *ipa_res;
3495 QDF_STATUS result;
3496
3497 if (!pdev) {
3498 dp_err("Invalid instance");
3499 return QDF_STATUS_E_FAILURE;
3500 }
3501
3502 ipa_res = &pdev->ipa_resource;
3503
3504 qdf_atomic_set(&soc->ipa_pipes_enabled, 1);
3505 DP_IPA_EP_SET_TX_DB_PA(soc, ipa_res);
3506
3507 if (!ipa_config_is_opt_wifi_dp_enabled()) {
3508 qdf_atomic_set(&soc->ipa_map_allowed, 1);
3509 dp_ipa_handle_rx_buf_pool_smmu_mapping(soc, pdev, true,
3510 __func__, __LINE__);
3511 }
3512
3513 result = qdf_ipa_wdi_enable_pipes(hdl);
3514 if (result) {
3515 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3516 "%s: Enable WDI PIPE fail, code %d",
3517 __func__, result);
3518 qdf_atomic_set(&soc->ipa_pipes_enabled, 0);
3519 DP_IPA_RESET_TX_DB_PA(soc, ipa_res);
3520 if (qdf_atomic_read(&soc->ipa_map_allowed)) {
3521 qdf_atomic_set(&soc->ipa_map_allowed, 0);
3522 dp_ipa_handle_rx_buf_pool_smmu_mapping(
3523 soc, pdev, false, __func__, __LINE__);
3524 }
3525 return QDF_STATUS_E_FAILURE;
3526 }
3527
3528 if (soc->ipa_first_tx_db_access) {
3529 dp_ipa_tx_comp_ring_init_hp(soc, ipa_res);
3530 soc->ipa_first_tx_db_access = false;
3531 } else {
3532 dp_ipa_tx_comp_ring_update_hp_addr(soc, ipa_res);
3533 }
3534
3535 return QDF_STATUS_SUCCESS;
3536 }
3537
dp_ipa_disable_pipes(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,qdf_ipa_wdi_hdl_t hdl)3538 QDF_STATUS dp_ipa_disable_pipes(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
3539 qdf_ipa_wdi_hdl_t hdl)
3540 {
3541 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3542 struct dp_pdev *pdev =
3543 dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
3544 QDF_STATUS result;
3545 struct dp_ipa_resources *ipa_res;
3546
3547 if (!pdev) {
3548 dp_err("Invalid instance");
3549 return QDF_STATUS_E_FAILURE;
3550 }
3551
3552 ipa_res = &pdev->ipa_resource;
3553
3554 qdf_sleep(TX_COMP_DRAIN_WAIT_TIMEOUT_MS);
3555 /*
3556 * Reset the tx completion doorbell address before invoking IPA disable
3557 * pipes API to ensure that there is no access to IPA tx doorbell
3558 * address post disable pipes.
3559 */
3560 DP_IPA_RESET_TX_DB_PA(soc, ipa_res);
3561
3562 result = qdf_ipa_wdi_disable_pipes(hdl);
3563 if (result) {
3564 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3565 "%s: Disable WDI PIPE fail, code %d",
3566 __func__, result);
3567 qdf_assert_always(0);
3568 return QDF_STATUS_E_FAILURE;
3569 }
3570
3571 qdf_atomic_set(&soc->ipa_pipes_enabled, 0);
3572
3573 if (!ipa_config_is_opt_wifi_dp_enabled()) {
3574 qdf_atomic_set(&soc->ipa_map_allowed, 0);
3575 dp_ipa_handle_rx_buf_pool_smmu_mapping(soc, pdev, false,
3576 __func__, __LINE__);
3577 }
3578
3579 return result ? QDF_STATUS_E_FAILURE : QDF_STATUS_SUCCESS;
3580 }
3581
dp_ipa_set_perf_level(int client,uint32_t max_supported_bw_mbps,qdf_ipa_wdi_hdl_t hdl)3582 QDF_STATUS dp_ipa_set_perf_level(int client, uint32_t max_supported_bw_mbps,
3583 qdf_ipa_wdi_hdl_t hdl)
3584 {
3585 qdf_ipa_wdi_perf_profile_t profile;
3586 QDF_STATUS result;
3587
3588 profile.client = client;
3589 profile.max_supported_bw_mbps = max_supported_bw_mbps;
3590
3591 result = qdf_ipa_wdi_set_perf_profile(hdl, &profile);
3592 if (result) {
3593 QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
3594 "%s: ipa_wdi_set_perf_profile fail, code %d",
3595 __func__, result);
3596 return QDF_STATUS_E_FAILURE;
3597 }
3598
3599 return QDF_STATUS_SUCCESS;
3600 }
3601
3602 #ifdef QCA_SUPPORT_WDS_EXTENDED
3603 /**
3604 * dp_ipa_rx_wdsext_iface() - Forward RX exception packets to wdsext interface
3605 * @soc_hdl: data path soc handle
3606 * @peer_id: Peer id to get respective peer
3607 * @skb: socket buffer
3608 *
3609 * Return: true on success, else false
3610 */
dp_ipa_rx_wdsext_iface(struct cdp_soc_t * soc_hdl,uint8_t peer_id,qdf_nbuf_t skb)3611 bool dp_ipa_rx_wdsext_iface(struct cdp_soc_t *soc_hdl, uint8_t peer_id,
3612 qdf_nbuf_t skb)
3613 {
3614 struct dp_txrx_peer *txrx_peer;
3615 dp_txrx_ref_handle txrx_ref_handle = NULL;
3616 struct dp_soc *dp_soc = cdp_soc_t_to_dp_soc(soc_hdl);
3617 bool status = false;
3618
3619 txrx_peer = dp_tgt_txrx_peer_get_ref_by_id(soc_hdl, peer_id,
3620 &txrx_ref_handle,
3621 DP_MOD_ID_IPA);
3622
3623 if (qdf_likely(txrx_peer)) {
3624 if (dp_rx_deliver_to_stack_ext(dp_soc, txrx_peer->vdev,
3625 txrx_peer, skb)
3626 status = true;
3627 dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_IPA);
3628 }
3629 return status;
3630 }
3631 #endif
3632
3633 /**
3634 * dp_ipa_intrabss_send() - send IPA RX intra-bss frames
3635 * @pdev: pdev
3636 * @vdev: vdev
3637 * @nbuf: skb
3638 *
3639 * Return: nbuf if TX fails and NULL if TX succeeds
3640 */
3641 static qdf_nbuf_t dp_ipa_intrabss_send(struct dp_pdev *pdev,
3642 struct dp_vdev *vdev,
3643 qdf_nbuf_t nbuf)
3644 {
3645 struct dp_peer *vdev_peer;
3646 uint16_t len;
3647
3648 vdev_peer = dp_vdev_bss_peer_ref_n_get(pdev->soc, vdev, DP_MOD_ID_IPA);
3649 if (qdf_unlikely(!vdev_peer))
3650 return nbuf;
3651
3652 if (qdf_unlikely(!vdev_peer->txrx_peer)) {
3653 dp_peer_unref_delete(vdev_peer, DP_MOD_ID_IPA);
3654 return nbuf;
3655 }
3656
3657 qdf_mem_zero(nbuf->cb, sizeof(nbuf->cb));
3658 len = qdf_nbuf_len(nbuf);
3659
3660 if (dp_tx_send((struct cdp_soc_t *)pdev->soc, vdev->vdev_id, nbuf)) {
3661 DP_PEER_PER_PKT_STATS_INC_PKT(vdev_peer->txrx_peer,
3662 rx.intra_bss.fail, 1, len,
3663 0);
3664 dp_peer_unref_delete(vdev_peer, DP_MOD_ID_IPA);
3665 return nbuf;
3666 }
3667
3668 DP_PEER_PER_PKT_STATS_INC_PKT(vdev_peer->txrx_peer,
3669 rx.intra_bss.pkts, 1, len, 0);
3670 dp_peer_unref_delete(vdev_peer, DP_MOD_ID_IPA);
3671 return NULL;
3672 }
3673
3674 #ifdef IPA_OPT_WIFI_DP
3675 /**
3676 * dp_ipa_rx_super_rule_setup()- pass cce super rule params to fw from ipa
3677 *
3678 * @soc_hdl: cdp soc
3679 * @flt_params: filter tuple
3680 *
3681 * Return: QDF_STATUS
3682 */
3683 QDF_STATUS dp_ipa_rx_super_rule_setup(struct cdp_soc_t *soc_hdl,
3684 void *flt_params)
3685 {
3686 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3687
3688 return htt_h2t_rx_cce_super_rule_setup(soc->htt_handle, flt_params);
3689 }
3690
3691 /**
3692 * dp_ipa_wdi_opt_dpath_notify_flt_add_rem_cb()- send cce super rule filter
3693 * add/remove result to ipa
3694 *
3695 * @flt0_rslt : result for filter0 add/remove
3696 * @flt1_rslt : result for filter1 add/remove
3697 *
3698 * Return: void
3699 */
3700 void dp_ipa_wdi_opt_dpath_notify_flt_add_rem_cb(int flt0_rslt, int flt1_rslt)
3701 {
3702 wlan_ipa_wdi_opt_dpath_notify_flt_add_rem_cb(flt0_rslt, flt1_rslt);
3703 }
3704
3705 int dp_ipa_pcie_link_up(struct cdp_soc_t *soc_hdl)
3706 {
3707 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3708 struct hal_soc *hal_soc = (struct hal_soc *)soc->hal_soc;
3709 int response = 0;
3710
3711 response = hif_prevent_l1((hal_soc->hif_handle));
3712 return response;
3713 }
3714
3715 void dp_ipa_pcie_link_down(struct cdp_soc_t *soc_hdl)
3716 {
3717 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3718 struct hal_soc *hal_soc = (struct hal_soc *)soc->hal_soc;
3719
3720 hif_allow_l1(hal_soc->hif_handle);
3721 }
3722
3723 /**
3724 * dp_ipa_wdi_opt_dpath_notify_flt_rlsd()- send cce super rule release
3725 * notification to ipa
3726 *
3727 * @flt0_rslt : result for filter0 release
3728 * @flt1_rslt : result for filter1 release
3729 *
3730 *Return: void
3731 */
3732 void dp_ipa_wdi_opt_dpath_notify_flt_rlsd(int flt0_rslt, int flt1_rslt)
3733 {
3734 wlan_ipa_wdi_opt_dpath_notify_flt_rlsd(flt0_rslt, flt1_rslt);
3735 }
3736
3737 /**
3738 * dp_ipa_wdi_opt_dpath_notify_flt_rsvd()- send cce super rule reserve
3739 * notification to ipa
3740 *
3741 *@is_success : result of filter reservatiom
3742 *
3743 *Return: void
3744 */
3745 void dp_ipa_wdi_opt_dpath_notify_flt_rsvd(bool is_success)
3746 {
3747 wlan_ipa_wdi_opt_dpath_notify_flt_rsvd(is_success);
3748 }
3749 #endif
3750
3751 #ifdef IPA_WDS_EASYMESH_FEATURE
3752 /**
3753 * dp_ipa_peer_check() - Check for peer for given mac
3754 * @soc: dp soc object
3755 * @peer_mac_addr: peer mac address
3756 * @vdev_id: vdev id
3757 *
3758 * Return: true if peer is found, else false
3759 */
3760 static inline bool dp_ipa_peer_check(struct dp_soc *soc,
3761 uint8_t *peer_mac_addr, uint8_t vdev_id)
3762 {
3763 struct dp_ast_entry *ast_entry = NULL;
3764 struct dp_peer *peer = NULL;
3765
3766 qdf_spin_lock_bh(&soc->ast_lock);
3767 ast_entry = dp_peer_ast_hash_find_soc(soc, peer_mac_addr);
3768
3769 if ((!ast_entry) ||
3770 (ast_entry->delete_in_progress && !ast_entry->callback)) {
3771 qdf_spin_unlock_bh(&soc->ast_lock);
3772 return false;
3773 }
3774
3775 peer = dp_peer_get_ref_by_id(soc, ast_entry->peer_id,
3776 DP_MOD_ID_IPA);
3777
3778 if (!peer) {
3779 qdf_spin_unlock_bh(&soc->ast_lock);
3780 return false;
3781 } else {
3782 if (peer->vdev->vdev_id == vdev_id) {
3783 dp_peer_unref_delete(peer, DP_MOD_ID_IPA);
3784 qdf_spin_unlock_bh(&soc->ast_lock);
3785 return true;
3786 }
3787 dp_peer_unref_delete(peer, DP_MOD_ID_IPA);
3788 qdf_spin_unlock_bh(&soc->ast_lock);
3789 return false;
3790 }
3791 }
3792 #else
3793 static inline bool dp_ipa_peer_check(struct dp_soc *soc,
3794 uint8_t *peer_mac_addr, uint8_t vdev_id)
3795 {
3796 struct cdp_peer_info peer_info = {0};
3797 struct dp_peer *peer = NULL;
3798
3799 DP_PEER_INFO_PARAMS_INIT(&peer_info, vdev_id, peer_mac_addr, false,
3800 CDP_WILD_PEER_TYPE);
3801
3802 peer = dp_peer_hash_find_wrapper(soc, &peer_info, DP_MOD_ID_IPA);
3803 if (peer) {
3804 dp_peer_unref_delete(peer, DP_MOD_ID_IPA);
3805 return true;
3806 } else {
3807 return false;
3808 }
3809 }
3810 #endif
3811
3812 bool dp_ipa_rx_intrabss_fwd(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
3813 qdf_nbuf_t nbuf, bool *fwd_success)
3814 {
3815 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3816 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
3817 DP_MOD_ID_IPA);
3818 struct dp_pdev *pdev;
3819 qdf_nbuf_t nbuf_copy;
3820 uint8_t da_is_bcmc;
3821 struct ethhdr *eh;
3822 bool status = false;
3823
3824 *fwd_success = false; /* set default as failure */
3825
3826 /*
3827 * WDI 3.0 skb->cb[] info from IPA driver
3828 * skb->cb[0] = vdev_id
3829 * skb->cb[1].bit#1 = da_is_bcmc
3830 */
3831 da_is_bcmc = ((uint8_t)nbuf->cb[1]) & 0x2;
3832
3833 if (qdf_unlikely(!vdev))
3834 return false;
3835
3836 pdev = vdev->pdev;
3837 if (qdf_unlikely(!pdev))
3838 goto out;
3839
3840 /* no fwd for station mode and just pass up to stack */
3841 if (vdev->opmode == wlan_op_mode_sta)
3842 goto out;
3843
3844 if (da_is_bcmc) {
3845 nbuf_copy = qdf_nbuf_copy(nbuf);
3846 if (!nbuf_copy)
3847 goto out;
3848
3849 if (dp_ipa_intrabss_send(pdev, vdev, nbuf_copy))
3850 qdf_nbuf_free(nbuf_copy);
3851 else
3852 *fwd_success = true;
3853
3854 /* return false to pass original pkt up to stack */
3855 goto out;
3856 }
3857
3858 eh = (struct ethhdr *)qdf_nbuf_data(nbuf);
3859
3860 if (!qdf_mem_cmp(eh->h_dest, vdev->mac_addr.raw, QDF_MAC_ADDR_SIZE))
3861 goto out;
3862
3863 if (!dp_ipa_peer_check(soc, eh->h_dest, vdev->vdev_id))
3864 goto out;
3865
3866 if (!dp_ipa_peer_check(soc, eh->h_source, vdev->vdev_id))
3867 goto out;
3868
3869 /*
3870 * In intra-bss forwarding scenario, skb is allocated by IPA driver.
3871 * Need to add skb to internal tracking table to avoid nbuf memory
3872 * leak check for unallocated skb.
3873 */
3874 qdf_net_buf_debug_acquire_skb(nbuf, __FILE__, __LINE__);
3875
3876 if (dp_ipa_intrabss_send(pdev, vdev, nbuf))
3877 qdf_nbuf_free(nbuf);
3878 else
3879 *fwd_success = true;
3880
3881 status = true;
3882 out:
3883 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_IPA);
3884 return status;
3885 }
3886
3887 #ifdef MDM_PLATFORM
3888 bool dp_ipa_is_mdm_platform(void)
3889 {
3890 return true;
3891 }
3892 #else
3893 bool dp_ipa_is_mdm_platform(void)
3894 {
3895 return false;
3896 }
3897 #endif
3898
3899 /**
3900 * dp_ipa_frag_nbuf_linearize() - linearize nbuf for IPA
3901 * @soc: soc
3902 * @nbuf: source skb
3903 *
3904 * Return: new nbuf if success and otherwise NULL
3905 */
3906 static qdf_nbuf_t dp_ipa_frag_nbuf_linearize(struct dp_soc *soc,
3907 qdf_nbuf_t nbuf)
3908 {
3909 uint8_t *src_nbuf_data;
3910 uint8_t *dst_nbuf_data;
3911 qdf_nbuf_t dst_nbuf;
3912 qdf_nbuf_t temp_nbuf = nbuf;
3913 uint32_t nbuf_len = qdf_nbuf_len(nbuf);
3914 bool is_nbuf_head = true;
3915 uint32_t copy_len = 0;
3916 uint16_t buf_size;
3917
3918 buf_size = wlan_cfg_rx_buffer_size(soc->wlan_cfg_ctx);
3919
3920 dst_nbuf = qdf_nbuf_alloc(soc->osdev, buf_size,
3921 RX_BUFFER_RESERVATION,
3922 RX_DATA_BUFFER_ALIGNMENT, FALSE);
3923
3924 if (!dst_nbuf) {
3925 dp_err_rl("nbuf allocate fail");
3926 return NULL;
3927 }
3928
3929 if ((nbuf_len + L3_HEADER_PADDING) > buf_size) {
3930 qdf_nbuf_free(dst_nbuf);
3931 dp_err_rl("nbuf is jumbo data");
3932 return NULL;
3933 }
3934
3935 /* prepeare to copy all data into new skb */
3936 dst_nbuf_data = qdf_nbuf_data(dst_nbuf);
3937 while (temp_nbuf) {
3938 src_nbuf_data = qdf_nbuf_data(temp_nbuf);
3939 /* first head nbuf */
3940 if (is_nbuf_head) {
3941 qdf_mem_copy(dst_nbuf_data, src_nbuf_data,
3942 soc->rx_pkt_tlv_size);
3943 /* leave extra 2 bytes L3_HEADER_PADDING */
3944 dst_nbuf_data += (soc->rx_pkt_tlv_size +
3945 L3_HEADER_PADDING);
3946 src_nbuf_data += soc->rx_pkt_tlv_size;
3947 copy_len = qdf_nbuf_headlen(temp_nbuf) -
3948 soc->rx_pkt_tlv_size;
3949 temp_nbuf = qdf_nbuf_get_ext_list(temp_nbuf);
3950 is_nbuf_head = false;
3951 } else {
3952 copy_len = qdf_nbuf_len(temp_nbuf);
3953 temp_nbuf = qdf_nbuf_queue_next(temp_nbuf);
3954 }
3955 qdf_mem_copy(dst_nbuf_data, src_nbuf_data, copy_len);
3956 dst_nbuf_data += copy_len;
3957 }
3958
3959 qdf_nbuf_set_len(dst_nbuf, nbuf_len);
3960 /* copy is done, free original nbuf */
3961 qdf_nbuf_free(nbuf);
3962
3963 return dst_nbuf;
3964 }
3965
3966 qdf_nbuf_t dp_ipa_handle_rx_reo_reinject(struct dp_soc *soc, qdf_nbuf_t nbuf)
3967 {
3968
3969 if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
3970 return nbuf;
3971
3972 /* WLAN IPA is run-time disabled */
3973 if (!qdf_atomic_read(&soc->ipa_pipes_enabled))
3974 return nbuf;
3975
3976 if (!qdf_nbuf_is_frag(nbuf))
3977 return nbuf;
3978
3979 /* linearize skb for IPA */
3980 return dp_ipa_frag_nbuf_linearize(soc, nbuf);
3981 }
3982
3983 QDF_STATUS dp_ipa_tx_buf_smmu_mapping(
3984 struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
3985 const char *func, uint32_t line)
3986 {
3987 QDF_STATUS ret;
3988
3989 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
3990 struct dp_pdev *pdev =
3991 dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
3992
3993 if (!pdev) {
3994 dp_err("Invalid instance");
3995 return QDF_STATUS_E_FAILURE;
3996 }
3997
3998 if (!qdf_mem_smmu_s1_enabled(soc->osdev)) {
3999 dp_debug("SMMU S1 disabled");
4000 return QDF_STATUS_SUCCESS;
4001 }
4002 ret = __dp_ipa_tx_buf_smmu_mapping(soc, pdev, true, func, line);
4003 if (ret)
4004 return ret;
4005
4006 ret = dp_ipa_tx_alt_buf_smmu_mapping(soc, pdev, true, func, line);
4007 if (ret)
4008 __dp_ipa_tx_buf_smmu_mapping(soc, pdev, false, func, line);
4009 return ret;
4010 }
4011
4012 QDF_STATUS dp_ipa_tx_buf_smmu_unmapping(
4013 struct cdp_soc_t *soc_hdl, uint8_t pdev_id, const char *func,
4014 uint32_t line)
4015 {
4016 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4017 struct dp_pdev *pdev =
4018 dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
4019
4020 if (!qdf_mem_smmu_s1_enabled(soc->osdev)) {
4021 dp_debug("SMMU S1 disabled");
4022 return QDF_STATUS_SUCCESS;
4023 }
4024
4025 if (!pdev) {
4026 dp_err("Invalid pdev instance pdev_id:%d", pdev_id);
4027 return QDF_STATUS_E_FAILURE;
4028 }
4029
4030 if (__dp_ipa_tx_buf_smmu_mapping(soc, pdev, false, func, line) ||
4031 dp_ipa_tx_alt_buf_smmu_mapping(soc, pdev, false, func, line))
4032 return QDF_STATUS_E_FAILURE;
4033
4034 return QDF_STATUS_SUCCESS;
4035 }
4036
4037 QDF_STATUS dp_ipa_rx_buf_pool_smmu_mapping(
4038 struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
4039 bool create, const char *func, uint32_t line)
4040 {
4041 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4042 struct dp_pdev *pdev =
4043 dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
4044
4045 if (!pdev) {
4046 dp_err("Invalid instance");
4047 return QDF_STATUS_E_FAILURE;
4048 }
4049
4050 if (!qdf_mem_smmu_s1_enabled(soc->osdev)) {
4051 dp_debug("SMMU S1 disabled");
4052 return QDF_STATUS_SUCCESS;
4053 }
4054
4055 dp_ipa_handle_rx_buf_pool_smmu_mapping(soc, pdev, create, func, line);
4056 return QDF_STATUS_SUCCESS;
4057 }
4058 #ifdef IPA_WDS_EASYMESH_FEATURE
4059 QDF_STATUS dp_ipa_ast_create(struct cdp_soc_t *soc_hdl,
4060 qdf_ipa_ast_info_type_t *data)
4061 {
4062 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4063 uint8_t *rx_tlv_hdr;
4064 struct dp_peer *peer;
4065 struct hal_rx_msdu_metadata msdu_metadata;
4066 qdf_ipa_ast_info_type_t *ast_info;
4067
4068 if (!data) {
4069 dp_err("Data is NULL !!!");
4070 return QDF_STATUS_E_FAILURE;
4071 }
4072 ast_info = data;
4073
4074 rx_tlv_hdr = qdf_nbuf_data(ast_info->skb);
4075 peer = dp_peer_get_ref_by_id(soc, ast_info->ta_peer_id,
4076 DP_MOD_ID_IPA);
4077 if (!peer) {
4078 dp_err("Peer is NULL !!!!");
4079 return QDF_STATUS_E_FAILURE;
4080 }
4081
4082 hal_rx_msdu_metadata_get(soc->hal_soc, rx_tlv_hdr, &msdu_metadata);
4083
4084 dp_rx_ipa_wds_srcport_learn(soc, peer, ast_info->skb, msdu_metadata,
4085 ast_info->mac_addr_ad4_valid,
4086 ast_info->first_msdu_in_mpdu_flag);
4087
4088 dp_peer_unref_delete(peer, DP_MOD_ID_IPA);
4089
4090 return QDF_STATUS_SUCCESS;
4091 }
4092 #endif
4093
4094 #ifdef QCA_ENHANCED_STATS_SUPPORT
4095 QDF_STATUS dp_ipa_update_peer_rx_stats(struct cdp_soc_t *soc,
4096 uint8_t vdev_id, uint8_t *peer_mac,
4097 qdf_nbuf_t nbuf)
4098 {
4099 struct dp_peer *peer = dp_peer_find_hash_find((struct dp_soc *)soc,
4100 peer_mac, 0, vdev_id,
4101 DP_MOD_ID_IPA);
4102 struct dp_txrx_peer *txrx_peer;
4103 uint8_t da_is_bcmc;
4104 qdf_ether_header_t *eh;
4105
4106 if (!peer)
4107 return QDF_STATUS_E_FAILURE;
4108
4109 txrx_peer = dp_get_txrx_peer(peer);
4110
4111 if (!txrx_peer) {
4112 dp_peer_unref_delete(peer, DP_MOD_ID_IPA);
4113 return QDF_STATUS_E_FAILURE;
4114 }
4115
4116 da_is_bcmc = ((uint8_t)nbuf->cb[1]) & 0x2;
4117 eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
4118
4119 if (da_is_bcmc) {
4120 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.multicast, 1,
4121 qdf_nbuf_len(nbuf), 0);
4122 if (QDF_IS_ADDR_BROADCAST(eh->ether_dhost))
4123 DP_PEER_PER_PKT_STATS_INC_PKT(txrx_peer, rx.bcast,
4124 1, qdf_nbuf_len(nbuf), 0);
4125 }
4126
4127 dp_peer_unref_delete(peer, DP_MOD_ID_IPA);
4128
4129 return QDF_STATUS_SUCCESS;
4130 }
4131
4132 void
4133 dp_peer_aggregate_tid_stats(struct dp_peer *peer)
4134 {
4135 uint8_t i = 0;
4136 struct dp_rx_tid *rx_tid = NULL;
4137 struct cdp_pkt_info rx_total = {0};
4138 struct dp_txrx_peer *txrx_peer = NULL;
4139
4140 if (!peer->rx_tid)
4141 return;
4142
4143 txrx_peer = dp_get_txrx_peer(peer);
4144
4145 if (!txrx_peer)
4146 return;
4147
4148 for (i = 0; i < DP_MAX_TIDS; i++) {
4149 rx_tid = &peer->rx_tid[i];
4150 rx_total.num += rx_tid->rx_msdu_cnt.num;
4151 rx_total.bytes += rx_tid->rx_msdu_cnt.bytes;
4152 }
4153
4154 DP_PEER_PER_PKT_STATS_UPD(txrx_peer, rx.rx_total.num,
4155 rx_total.num, 0);
4156 DP_PEER_PER_PKT_STATS_UPD(txrx_peer, rx.rx_total.bytes,
4157 rx_total.bytes, 0);
4158 }
4159
4160 /**
4161 * dp_ipa_update_vdev_stats(): update vdev stats
4162 * @soc: soc handle
4163 * @srcobj: DP_PEER object
4164 * @arg: point to vdev stats structure
4165 *
4166 * Return: void
4167 */
4168 static inline
4169 void dp_ipa_update_vdev_stats(struct dp_soc *soc, struct dp_peer *srcobj,
4170 void *arg)
4171 {
4172 dp_peer_aggregate_tid_stats(srcobj);
4173 dp_update_vdev_stats(soc, srcobj, arg);
4174 }
4175
4176 /**
4177 * dp_ipa_aggregate_vdev_stats - Aggregate vdev_stats
4178 * @vdev: Data path vdev
4179 * @vdev_stats: buffer to hold vdev stats
4180 *
4181 * Return: void
4182 */
4183 static inline
4184 void dp_ipa_aggregate_vdev_stats(struct dp_vdev *vdev,
4185 struct cdp_vdev_stats *vdev_stats)
4186 {
4187 struct dp_soc *soc = NULL;
4188
4189 if (!vdev || !vdev->pdev)
4190 return;
4191
4192 soc = vdev->pdev->soc;
4193 dp_update_vdev_ingress_stats(vdev);
4194 dp_copy_vdev_stats_to_tgt_buf(vdev_stats, &vdev->stats, DP_XMIT_LINK);
4195 dp_vdev_iterate_peer(vdev, dp_ipa_update_vdev_stats, vdev_stats,
4196 DP_MOD_ID_GENERIC_STATS);
4197 dp_update_vdev_rate_stats(vdev_stats, &vdev->stats);
4198
4199 vdev_stats->tx.ucast.num = vdev_stats->tx.tx_ucast_total.num;
4200 vdev_stats->tx.ucast.bytes = vdev_stats->tx.tx_ucast_total.bytes;
4201 vdev_stats->tx.tx_success.num = vdev_stats->tx.tx_ucast_success.num;
4202 vdev_stats->tx.tx_success.bytes = vdev_stats->tx.tx_ucast_success.bytes;
4203
4204 if (vdev_stats->rx.rx_total.num >= vdev_stats->rx.multicast.num)
4205 vdev_stats->rx.unicast.num = vdev_stats->rx.rx_total.num -
4206 vdev_stats->rx.multicast.num;
4207 if (vdev_stats->rx.rx_total.bytes >= vdev_stats->rx.multicast.bytes)
4208 vdev_stats->rx.unicast.bytes = vdev_stats->rx.rx_total.bytes -
4209 vdev_stats->rx.multicast.bytes;
4210 vdev_stats->rx.to_stack.num = vdev_stats->rx.rx_total.num;
4211 vdev_stats->rx.to_stack.bytes = vdev_stats->rx.rx_total.bytes;
4212 }
4213
4214 /**
4215 * dp_ipa_aggregate_pdev_stats - Aggregate pdev stats
4216 * @pdev: Data path pdev
4217 *
4218 * Return: void
4219 */
4220 static inline
4221 void dp_ipa_aggregate_pdev_stats(struct dp_pdev *pdev)
4222 {
4223 struct dp_vdev *vdev = NULL;
4224 struct dp_soc *soc;
4225 struct cdp_vdev_stats *vdev_stats =
4226 qdf_mem_malloc_atomic(sizeof(struct cdp_vdev_stats));
4227
4228 if (!vdev_stats) {
4229 dp_err("%pK: DP alloc failure - unable to get alloc vdev stats",
4230 pdev->soc);
4231 return;
4232 }
4233
4234 soc = pdev->soc;
4235
4236 qdf_mem_zero(&pdev->stats.tx, sizeof(pdev->stats.tx));
4237 qdf_mem_zero(&pdev->stats.rx, sizeof(pdev->stats.rx));
4238 qdf_mem_zero(&pdev->stats.tx_i, sizeof(pdev->stats.tx_i));
4239 qdf_mem_zero(&pdev->stats.rx_i, sizeof(pdev->stats.rx_i));
4240
4241 qdf_spin_lock_bh(&pdev->vdev_list_lock);
4242 TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
4243 dp_ipa_aggregate_vdev_stats(vdev, vdev_stats);
4244 dp_update_pdev_stats(pdev, vdev_stats);
4245 dp_update_pdev_ingress_stats(pdev, vdev);
4246 }
4247 qdf_spin_unlock_bh(&pdev->vdev_list_lock);
4248 qdf_mem_free(vdev_stats);
4249 }
4250
4251 /**
4252 * dp_ipa_get_peer_stats - Get peer stats
4253 * @peer: Data path peer
4254 * @peer_stats: buffer to hold peer stats
4255 *
4256 * Return: void
4257 */
4258 static
4259 void dp_ipa_get_peer_stats(struct dp_peer *peer,
4260 struct cdp_peer_stats *peer_stats)
4261 {
4262 dp_peer_aggregate_tid_stats(peer);
4263 dp_get_peer_stats(peer, peer_stats);
4264
4265 peer_stats->tx.tx_success.num =
4266 peer_stats->tx.tx_ucast_success.num;
4267 peer_stats->tx.tx_success.bytes =
4268 peer_stats->tx.tx_ucast_success.bytes;
4269 peer_stats->tx.ucast.num =
4270 peer_stats->tx.tx_ucast_total.num;
4271 peer_stats->tx.ucast.bytes =
4272 peer_stats->tx.tx_ucast_total.bytes;
4273
4274 if (peer_stats->rx.rx_total.num >= peer_stats->rx.multicast.num)
4275 peer_stats->rx.unicast.num = peer_stats->rx.rx_total.num -
4276 peer_stats->rx.multicast.num;
4277
4278 if (peer_stats->rx.rx_total.bytes >= peer_stats->rx.multicast.bytes)
4279 peer_stats->rx.unicast.bytes = peer_stats->rx.rx_total.bytes -
4280 peer_stats->rx.multicast.bytes;
4281 }
4282
4283 QDF_STATUS
4284 dp_ipa_txrx_get_pdev_stats(struct cdp_soc_t *soc, uint8_t pdev_id,
4285 struct cdp_pdev_stats *pdev_stats)
4286 {
4287 struct dp_pdev *pdev =
4288 dp_get_pdev_from_soc_pdev_id_wifi3((struct dp_soc *)soc,
4289 pdev_id);
4290 if (!pdev)
4291 return QDF_STATUS_E_FAILURE;
4292
4293 dp_ipa_aggregate_pdev_stats(pdev);
4294 qdf_mem_copy(pdev_stats, &pdev->stats, sizeof(struct cdp_pdev_stats));
4295
4296 return QDF_STATUS_SUCCESS;
4297 }
4298
4299 int dp_ipa_txrx_get_vdev_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
4300 void *buf, bool is_aggregate)
4301 {
4302 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4303 struct cdp_vdev_stats *vdev_stats;
4304 struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
4305 DP_MOD_ID_IPA);
4306
4307 if (!vdev)
4308 return 1;
4309
4310 vdev_stats = (struct cdp_vdev_stats *)buf;
4311 dp_ipa_aggregate_vdev_stats(vdev, buf);
4312 dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_IPA);
4313
4314 return 0;
4315 }
4316
4317 QDF_STATUS dp_ipa_txrx_get_peer_stats(struct cdp_soc_t *soc, uint8_t vdev_id,
4318 uint8_t *peer_mac,
4319 struct cdp_peer_stats *peer_stats)
4320 {
4321 struct dp_peer *peer = NULL;
4322 struct cdp_peer_info peer_info = { 0 };
4323
4324 DP_PEER_INFO_PARAMS_INIT(&peer_info, vdev_id, peer_mac, false,
4325 CDP_WILD_PEER_TYPE);
4326
4327 peer = dp_peer_hash_find_wrapper((struct dp_soc *)soc, &peer_info,
4328 DP_MOD_ID_IPA);
4329
4330 qdf_mem_zero(peer_stats, sizeof(struct cdp_peer_stats));
4331
4332 if (!peer)
4333 return QDF_STATUS_E_FAILURE;
4334
4335 dp_ipa_get_peer_stats(peer, peer_stats);
4336 dp_peer_unref_delete(peer, DP_MOD_ID_IPA);
4337
4338 return QDF_STATUS_SUCCESS;
4339 }
4340 #endif
4341
4342 /**
4343 * dp_ipa_get_wdi_version() - Get WDI version
4344 * @soc_hdl: data path soc handle
4345 * @wdi_ver: Out parameter for wdi version
4346 *
4347 * Get WDI version based on soc arch
4348 *
4349 * Return: None
4350 */
4351 void dp_ipa_get_wdi_version(struct cdp_soc_t *soc_hdl, uint8_t *wdi_ver)
4352 {
4353 struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
4354
4355 if (soc->arch_ops.ipa_get_wdi_ver)
4356 soc->arch_ops.ipa_get_wdi_ver(wdi_ver);
4357 else
4358 *wdi_ver = IPA_WDI_3;
4359 }
4360
4361 #ifdef IPA_WDI3_TX_TWO_PIPES
4362 bool dp_ipa_is_ring_ipa_tx(struct dp_soc *soc, uint8_t ring_id)
4363 {
4364 if (!soc->wlan_cfg_ctx->ipa_enabled)
4365 return false;
4366
4367 return (ring_id == IPA_TCL_DATA_RING_IDX) ||
4368 ((ring_id == IPA_TX_ALT_RING_IDX) &&
4369 wlan_cfg_is_ipa_two_tx_pipes_enabled(soc->wlan_cfg_ctx));
4370 }
4371 #else
4372 bool dp_ipa_is_ring_ipa_tx(struct dp_soc *soc, uint8_t ring_id)
4373 {
4374 if (!soc->wlan_cfg_ctx->ipa_enabled)
4375 return false;
4376
4377 return (ring_id == IPA_TCL_DATA_RING_IDX);
4378 }
4379 #endif /* IPA_WDI3_TX_TWO_PIPES */
4380 #endif
4381