xref: /wlan-driver/qca-wifi-host-cmn/dp/wifi3.0/dp_ipa.h (revision 5113495b16420b49004c444715d2daae2066e7dc)
1 /*
2  * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #ifndef _DP_IPA_H_
19 #define _DP_IPA_H_
20 
21 #if defined(QCA_WIFI_KIWI) || defined(QCA_WIFI_KIWI_V2)
22 /* Index into soc->tcl_data_ring[] */
23 #define IPA_TCL_DATA_RING_IDX	3
24 #else
25 #define IPA_TCL_DATA_RING_IDX	2
26 #endif
27 /* Index into soc->tx_comp_ring[] */
28 #define IPA_TX_COMP_RING_IDX IPA_TCL_DATA_RING_IDX
29 
30 #ifdef IPA_OFFLOAD
31 
32 #define DP_IPA_MAX_IFACE	3
33 #define IPA_REO_DEST_RING_IDX	3
34 #define IPA_REO_DEST_RING_IDX_2	7
35 
36 #define IPA_RX_REFILL_BUF_RING_IDX	2
37 
38 #define IPA_ALT_REO_DEST_RING_IDX	2
39 #define IPA_RX_ALT_REFILL_BUF_RING_IDX	3
40 
41 /* Adding delay before disabling ipa pipes if any Tx Completions are pending */
42 #define TX_COMP_DRAIN_WAIT_MS	50
43 #define TX_COMP_DRAIN_WAIT_TIMEOUT_MS	100
44 
45 #ifdef IPA_WDI3_TX_TWO_PIPES
46 #if defined(QCA_WIFI_KIWI) || defined(QCA_WIFI_KIWI_V2)
47 /* Index into soc->tcl_data_ring[] and soc->tx_comp_ring[] */
48 #define IPA_TX_ALT_RING_IDX 4
49 #define IPA_TX_ALT_COMP_RING_IDX IPA_TX_ALT_RING_IDX
50 #elif defined(QCA_WIFI_QCN9224)
51 #define IPA_TX_ALT_RING_IDX 3
52 #define IPA_TX_ALT_COMP_RING_IDX IPA_TX_ALT_RING_IDX
53 #else /* !KIWI */
54 #define IPA_TX_ALT_RING_IDX 1
55 /*
56  * must be same as IPA_TX_ALT_RING_IDX as tcl and wbm ring
57  * are initialized with same index as a pair.
58  */
59 #define IPA_TX_ALT_COMP_RING_IDX 1
60 #endif /* KIWI */
61 
62 #define IPA_SESSION_ID_SHIFT 1
63 #endif /* IPA_WDI3_TX_TWO_PIPES */
64 
65 /**
66  * struct dp_ipa_uc_tx_hdr - full tx header registered to IPA hardware
67  * @eth:     ether II header
68  */
69 struct dp_ipa_uc_tx_hdr {
70 	struct ethhdr eth;
71 } __packed;
72 
73 /**
74  * struct dp_ipa_uc_tx_vlan_hdr - full tx header registered to IPA hardware
75  * @eth:     ether II header
76  */
77 struct dp_ipa_uc_tx_vlan_hdr {
78 	struct vlan_ethhdr eth;
79 } __packed;
80 
81 /**
82  * struct dp_ipa_uc_rx_hdr - full rx header registered to IPA hardware
83  * @eth:     ether II header
84  */
85 struct dp_ipa_uc_rx_hdr {
86 	struct ethhdr eth;
87 } __packed;
88 
89 #define DP_IPA_UC_WLAN_TX_HDR_LEN      sizeof(struct dp_ipa_uc_tx_hdr)
90 #define DP_IPA_UC_WLAN_TX_VLAN_HDR_LEN sizeof(struct dp_ipa_uc_tx_vlan_hdr)
91 #define DP_IPA_UC_WLAN_RX_HDR_LEN      sizeof(struct dp_ipa_uc_rx_hdr)
92 /* 28 <bytes of rx_msdu_end_tlv> + 16 <bytes of attn tlv> +
93  * 52 <bytes of rx_mpdu_start_tlv> + <L2 Header>
94  */
95 #define DP_IPA_UC_WLAN_RX_HDR_LEN_AST  110
96 #define DP_IPA_UC_WLAN_RX_HDR_LEN_AST_VLAN 114
97 #define DP_IPA_UC_WLAN_HDR_DES_MAC_OFFSET	0
98 
99 #define DP_IPA_HDL_INVALID	0xFF
100 #define DP_IPA_HDL_FIRST	0
101 #define DP_IPA_HDL_SECOND	1
102 #define DP_IPA_HDL_THIRD	2
103 /**
104  * wlan_ipa_get_hdl() - Get ipa handle from IPA component
105  * @psoc: control psoc object
106  * @pdev_id: pdev id
107  *
108  * IPA component will return the IPA handle based on pdev_id
109  *
110  * Return: IPA handle
111  */
112 qdf_ipa_wdi_hdl_t wlan_ipa_get_hdl(void *psoc, uint8_t pdev_id);
113 
114 /**
115  * dp_ipa_get_resource() - Client request resource information
116  * @soc_hdl: data path soc handle
117  * @pdev_id: device instance id
118  *
119  *  IPA client will request IPA UC related resource information
120  *  Resource information will be distributed to IPA module
121  *  All of the required resources should be pre-allocated
122  *
123  * Return: QDF_STATUS
124  */
125 QDF_STATUS dp_ipa_get_resource(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
126 
127 /**
128  * dp_ipa_set_doorbell_paddr() - Set doorbell register physical address to SRNG
129  * @soc_hdl: data path soc handle
130  * @pdev_id: device instance id
131  *
132  * Set TX_COMP_DOORBELL register physical address to WBM Head_Ptr_MemAddr_LSB
133  * Set RX_READ_DOORBELL register physical address to REO Head_Ptr_MemAddr_LSB
134  *
135  * Return: QDF_STATUS
136  */
137 QDF_STATUS dp_ipa_set_doorbell_paddr(struct cdp_soc_t *soc_hdl,
138 				     uint8_t pdev_id);
139 
140 /**
141  * dp_ipa_iounmap_doorbell_vaddr() - unmap ipa RX db vaddr
142  * @soc_hdl: data path soc handle
143  * @pdev_id: device instance id
144  *
145  * Return: QDF_STATUS
146  */
147 QDF_STATUS dp_ipa_iounmap_doorbell_vaddr(struct cdp_soc_t *soc_hdl,
148 					 uint8_t pdev_id);
149 
150 /**
151  * dp_ipa_op_response() - Handle OP command response from firmware
152  * @soc_hdl: data path soc handle
153  * @pdev_id: device instance id
154  * @op_msg: op response message from firmware
155  *
156  * Return: QDF_STATUS
157  */
158 QDF_STATUS dp_ipa_op_response(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
159 			      uint8_t *op_msg);
160 
161 /**
162  * dp_ipa_register_op_cb() - Register OP handler function
163  * @soc_hdl: data path soc handle
164  * @pdev_id: device instance id
165  * @op_cb: handler function pointer
166  * @usr_ctxt: user context passed back to handler function
167  *
168  * Return: QDF_STATUS
169  */
170 QDF_STATUS dp_ipa_register_op_cb(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
171 				 ipa_uc_op_cb_type op_cb, void *usr_ctxt);
172 
173 /**
174  * dp_ipa_deregister_op_cb() - Deregister OP handler function
175  * @soc_hdl: data path soc handle
176  * @pdev_id: device instance id
177  *
178  * Return: none
179  */
180 void dp_ipa_deregister_op_cb(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
181 
182 /**
183  * dp_ipa_get_stat() - Get firmware wdi status
184  * @soc_hdl: data path soc handle
185  * @pdev_id: device instance id
186  *
187  * Return: QDF_STATUS
188  */
189 QDF_STATUS dp_ipa_get_stat(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
190 
191 /**
192  * dp_tx_send_ipa_data_frame() - send IPA data frame
193  * @soc_hdl: datapath soc handle
194  * @vdev_id: virtual device/interface id
195  * @skb: skb
196  *
197  * Return: skb/ NULL is for success
198  */
199 qdf_nbuf_t dp_tx_send_ipa_data_frame(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
200 				     qdf_nbuf_t skb);
201 
202 /**
203  * dp_ipa_enable_autonomy() - Enable autonomy RX path
204  * @soc_hdl: data path soc handle
205  * @pdev_id: device instance id
206  *
207  * Set all RX packet route to IPA REO ring
208  * Program Destination_Ring_Ctrl_IX_0 REO register to point IPA REO ring
209  *
210  * Return: QDF_STATUS
211  */
212 QDF_STATUS dp_ipa_enable_autonomy(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
213 
214 /**
215  * dp_ipa_disable_autonomy() - Disable autonomy RX path
216  * @soc_hdl: data path soc handle
217  * @pdev_id: device instance id
218  *
219  * Disable RX packet routing to IPA REO
220  * Program Destination_Ring_Ctrl_IX_0 REO register to disable
221  *
222  * Return: QDF_STATUS
223  */
224 QDF_STATUS dp_ipa_disable_autonomy(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
225 
226 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)) || \
227 	defined(CONFIG_IPA_WDI_UNIFIED_API)
228 /**
229  * dp_ipa_setup() - Setup and connect IPA pipes
230  * @soc_hdl: data path soc handle
231  * @pdev_id: device instance id
232  * @ipa_i2w_cb: IPA to WLAN callback
233  * @ipa_w2i_cb: WLAN to IPA callback
234  * @ipa_wdi_meter_notifier_cb: IPA WDI metering callback
235  * @ipa_desc_size: IPA descriptor size
236  * @ipa_priv: handle to the HTT instance
237  * @is_rm_enabled: Is IPA RM enabled or not
238  * @tx_pipe_handle: pointer to Tx pipe handle
239  * @rx_pipe_handle: pointer to Rx pipe handle
240  * @is_smmu_enabled: Is SMMU enabled or not
241  * @sys_in: parameters to setup sys pipe in mcc mode
242  * @over_gsi:
243  * @hdl: IPA handle
244  * @id: IPA instance id
245  * @ipa_ast_notify_cb: IPA to WLAN callback for ast create and update
246  *
247  * Return: QDF_STATUS
248  */
249 QDF_STATUS dp_ipa_setup(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
250 			void *ipa_i2w_cb, void *ipa_w2i_cb,
251 			void *ipa_wdi_meter_notifier_cb,
252 			uint32_t ipa_desc_size, void *ipa_priv,
253 			bool is_rm_enabled, uint32_t *tx_pipe_handle,
254 			uint32_t *rx_pipe_handle,
255 			bool is_smmu_enabled,
256 			qdf_ipa_sys_connect_params_t *sys_in, bool over_gsi,
257 			qdf_ipa_wdi_hdl_t hdl, qdf_ipa_wdi_hdl_t id,
258 			void *ipa_ast_notify_cb);
259 #else /* CONFIG_IPA_WDI_UNIFIED_API */
260 /**
261  * dp_ipa_setup() - Setup and connect IPA pipes
262  * @soc_hdl: data path soc handle
263  * @pdev_id: device instance id
264  * @ipa_i2w_cb: IPA to WLAN callback
265  * @ipa_w2i_cb: WLAN to IPA callback
266  * @ipa_wdi_meter_notifier_cb: IPA WDI metering callback
267  * @ipa_desc_size: IPA descriptor size
268  * @ipa_priv: handle to the HTT instance
269  * @is_rm_enabled: Is IPA RM enabled or not
270  * @tx_pipe_handle: pointer to Tx pipe handle
271  * @rx_pipe_handle: pointer to Rx pipe handle
272  *
273  * Return: QDF_STATUS
274  */
275 QDF_STATUS dp_ipa_setup(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
276 			void *ipa_i2w_cb, void *ipa_w2i_cb,
277 			void *ipa_wdi_meter_notifier_cb,
278 			uint32_t ipa_desc_size, void *ipa_priv,
279 			bool is_rm_enabled, uint32_t *tx_pipe_handle,
280 			uint32_t *rx_pipe_handle);
281 #endif /* CONFIG_IPA_WDI_UNIFIED_API */
282 
283 /**
284  * dp_ipa_cleanup() - Disconnect IPA pipes
285  * @soc_hdl: dp soc handle
286  * @pdev_id: dp pdev id
287  * @tx_pipe_handle: Tx pipe handle
288  * @rx_pipe_handle: Rx pipe handle
289  * @hdl: IPA handle
290  *
291  * Return: QDF_STATUS
292  */
293 QDF_STATUS dp_ipa_cleanup(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
294 			  uint32_t tx_pipe_handle, uint32_t rx_pipe_handle,
295 			  qdf_ipa_wdi_hdl_t hdl);
296 
297 /**
298  * dp_ipa_setup_iface() - Setup IPA header and register interface
299  * @ifname: Interface name
300  * @mac_addr: Interface MAC address
301  * @prod_client: IPA prod client type
302  * @cons_client: IPA cons client type
303  * @session_id: Session ID
304  * @is_ipv6_enabled: Is IPV6 enabled or not
305  * @hdl: IPA handle
306  *
307  * Return: QDF_STATUS
308  */
309 QDF_STATUS dp_ipa_setup_iface(char *ifname, uint8_t *mac_addr,
310 			      qdf_ipa_client_type_t prod_client,
311 			      qdf_ipa_client_type_t cons_client,
312 			      uint8_t session_id, bool is_ipv6_enabled,
313 			      qdf_ipa_wdi_hdl_t hdl);
314 
315 /**
316  * dp_ipa_cleanup_iface() - Cleanup IPA header and deregister interface
317  * @ifname: Interface name
318  * @is_ipv6_enabled: Is IPV6 enabled or not
319  * @hdl: IPA handle
320  *
321  * Return: QDF_STATUS
322  */
323 QDF_STATUS dp_ipa_cleanup_iface(char *ifname, bool is_ipv6_enabled,
324 				qdf_ipa_wdi_hdl_t hdl);
325 
326 /**
327  * dp_ipa_enable_pipes() - Enable and resume traffic on Tx/Rx pipes
328  * @soc_hdl: handle to the soc
329  * @pdev_id: pdev id number, to get the handle
330  * @hdl: IPA handle
331  *
332  * Return: QDF_STATUS
333  */
334 QDF_STATUS dp_ipa_enable_pipes(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
335 			       qdf_ipa_wdi_hdl_t hdl);
336 
337 /**
338  * dp_ipa_disable_pipes() - Suspend traffic and disable Tx/Rx pipes
339  * @soc_hdl: handle to the soc
340  * @pdev_id: pdev id number, to get the handle
341  * @hdl: IPA handle
342  *
343  * Return: QDF_STATUS
344  */
345 QDF_STATUS dp_ipa_disable_pipes(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
346 				qdf_ipa_wdi_hdl_t hdl);
347 
348 /**
349  * dp_ipa_set_perf_level() - Set IPA clock bandwidth based on data rates
350  * @client: Client type
351  * @max_supported_bw_mbps: Maximum bandwidth needed (in Mbps)
352  * @hdl: IPA handle
353  *
354  * Return: QDF_STATUS
355  */
356 QDF_STATUS dp_ipa_set_perf_level(int client, uint32_t max_supported_bw_mbps,
357 				 qdf_ipa_wdi_hdl_t hdl);
358 #ifdef IPA_OPT_WIFI_DP
359 QDF_STATUS dp_ipa_rx_super_rule_setup(struct cdp_soc_t *soc_hdl,
360 				      void *flt_params);
361 int dp_ipa_pcie_link_up(struct cdp_soc_t *soc_hdl);
362 void dp_ipa_pcie_link_down(struct cdp_soc_t *soc_hdl);
363 #endif
364 
365 #ifdef QCA_SUPPORT_WDS_EXTENDED
366 /**
367  * dp_ipa_rx_wdsext_iface() -  Forward RX exception packets to wdsext interface
368  * @soc_hdl: data path SoC handle
369  * @peer_id: Peer ID to get respective peer
370  * @skb: socket buffer
371  *
372  * Return: bool
373  */
374 bool dp_ipa_rx_wdsext_iface(struct cdp_soc_t *soc_hdl, uint8_t peer_id,
375 			    qdf_nbuf_t skb);
376 #endif
377 
378 /**
379  * dp_ipa_rx_intrabss_fwd() - Perform intra-bss fwd for IPA RX path
380  *
381  * @soc_hdl: data path soc handle
382  * @vdev_id: virtual device/interface id
383  * @nbuf: pointer to skb of ethernet packet received from IPA RX path
384  * @fwd_success: pointer to indicate if skb succeeded in intra-bss TX
385  *
386  * This function performs intra-bss forwarding for WDI 3.0 IPA RX path.
387  *
388  * Return: true if packet is intra-bss fwd-ed and no need to pass to
389  *	   network stack. false if packet needs to be passed to network stack.
390  */
391 bool dp_ipa_rx_intrabss_fwd(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
392 			    qdf_nbuf_t nbuf, bool *fwd_success);
393 int dp_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev);
394 int dp_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev);
395 
396 /**
397  * dp_ipa_ring_resource_setup() - setup IPA ring resources
398  * @soc: data path SoC handle
399  * @pdev:
400  *
401  * Return: status
402  */
403 int dp_ipa_ring_resource_setup(struct dp_soc *soc,
404 			       struct dp_pdev *pdev);
405 
406 bool dp_reo_remap_config(struct dp_soc *soc, uint32_t *remap0,
407 			 uint32_t *remap1, uint32_t *remap2);
408 bool dp_ipa_is_mdm_platform(void);
409 
410 /**
411  * dp_ipa_handle_rx_reo_reinject() - Handle RX REO reinject skb buffer
412  * @soc: soc
413  * @nbuf: skb
414  *
415  * Return: nbuf if success and otherwise NULL
416  */
417 qdf_nbuf_t dp_ipa_handle_rx_reo_reinject(struct dp_soc *soc, qdf_nbuf_t nbuf);
418 
419 QDF_STATUS dp_ipa_handle_rx_buf_smmu_mapping(struct dp_soc *soc,
420 					     qdf_nbuf_t nbuf,
421 					     uint32_t size,
422 					     bool create,
423 					     const char *func,
424 					     uint32_t line);
425 /**
426  * dp_ipa_tx_buf_smmu_mapping() - Create SMMU mappings for IPA
427  *				  allocated TX buffers
428  * @soc_hdl: handle to the soc
429  * @pdev_id: pdev id number, to get the handle
430  * @func: caller function
431  * @line: line number
432  *
433  * Return: QDF_STATUS
434  */
435 QDF_STATUS dp_ipa_tx_buf_smmu_mapping(struct cdp_soc_t *soc_hdl,
436 				      uint8_t pdev_id, const char *func,
437 				      uint32_t line);
438 
439 /**
440  * dp_ipa_tx_buf_smmu_unmapping() - Release SMMU mappings for IPA
441  *				    allocated TX buffers
442  * @soc_hdl: handle to the soc
443  * @pdev_id: pdev id number, to get the handle
444  * @func: caller function
445  * @line: line number
446  *
447  * Return: QDF_STATUS
448  */
449 QDF_STATUS dp_ipa_tx_buf_smmu_unmapping(struct cdp_soc_t *soc_hdl,
450 					uint8_t pdev_id, const char *func,
451 					uint32_t line);
452 QDF_STATUS dp_ipa_rx_buf_pool_smmu_mapping(struct cdp_soc_t *soc_hdl,
453 					   uint8_t pdev_id,
454 					   bool create,
455 					   const char *func,
456 					   uint32_t line);
457 QDF_STATUS dp_ipa_set_smmu_mapped(struct cdp_soc_t *soc, int val);
458 int dp_ipa_get_smmu_mapped(struct cdp_soc_t *soc);
459 
460 #ifndef QCA_OL_DP_SRNG_LOCK_LESS_ACCESS
461 static inline void
dp_ipa_rx_buf_smmu_mapping_lock(struct dp_soc * soc)462 dp_ipa_rx_buf_smmu_mapping_lock(struct dp_soc *soc)
463 {
464 	if (soc->ipa_rx_buf_map_lock_initialized)
465 		qdf_spin_lock_bh(&soc->ipa_rx_buf_map_lock);
466 }
467 
468 static inline void
dp_ipa_rx_buf_smmu_mapping_unlock(struct dp_soc * soc)469 dp_ipa_rx_buf_smmu_mapping_unlock(struct dp_soc *soc)
470 {
471 	if (soc->ipa_rx_buf_map_lock_initialized)
472 		qdf_spin_unlock_bh(&soc->ipa_rx_buf_map_lock);
473 }
474 
475 static inline void
dp_ipa_reo_ctx_buf_mapping_lock(struct dp_soc * soc,uint32_t reo_ring_num)476 dp_ipa_reo_ctx_buf_mapping_lock(struct dp_soc *soc,
477 				uint32_t reo_ring_num)
478 {
479 	if (!soc->ipa_reo_ctx_lock_required[reo_ring_num])
480 		return;
481 
482 	qdf_spin_lock_bh(&soc->ipa_rx_buf_map_lock);
483 }
484 
485 static inline void
dp_ipa_reo_ctx_buf_mapping_unlock(struct dp_soc * soc,uint32_t reo_ring_num)486 dp_ipa_reo_ctx_buf_mapping_unlock(struct dp_soc *soc,
487 				  uint32_t reo_ring_num)
488 {
489 	if (!soc->ipa_reo_ctx_lock_required[reo_ring_num])
490 		return;
491 
492 	qdf_spin_unlock_bh(&soc->ipa_rx_buf_map_lock);
493 }
494 #else
495 
496 static inline void
dp_ipa_rx_buf_smmu_mapping_lock(struct dp_soc * soc)497 dp_ipa_rx_buf_smmu_mapping_lock(struct dp_soc *soc)
498 {
499 }
500 
501 static inline void
dp_ipa_rx_buf_smmu_mapping_unlock(struct dp_soc * soc)502 dp_ipa_rx_buf_smmu_mapping_unlock(struct dp_soc *soc)
503 {
504 }
505 
506 static inline void
dp_ipa_reo_ctx_buf_mapping_lock(struct dp_soc * soc,uint32_t reo_ring_num)507 dp_ipa_reo_ctx_buf_mapping_lock(struct dp_soc *soc,
508 				uint32_t reo_ring_num)
509 {
510 }
511 
512 static inline void
dp_ipa_reo_ctx_buf_mapping_unlock(struct dp_soc * soc,uint32_t reo_ring_num)513 dp_ipa_reo_ctx_buf_mapping_unlock(struct dp_soc *soc,
514 				  uint32_t reo_ring_num)
515 {
516 }
517 #endif
518 
519 #ifdef IPA_WDS_EASYMESH_FEATURE
520 /**
521  * dp_ipa_ast_create() - Create/update AST entry in AST table
522  *			 for learning/roaming packets from IPA
523  * @soc_hdl: data path soc handle
524  * @data: Structure used for updating the AST table
525  *
526  * Create/update AST entry in AST table for learning/roaming packets from IPA
527  *
528  * Return: QDF_STATUS
529  */
530 QDF_STATUS dp_ipa_ast_create(struct cdp_soc_t *soc_hdl,
531 			     qdf_ipa_ast_info_type_t *data);
532 
533 /**
534  * dp_ipa_ast_notify_cb() - Provide ast notify cb to IPA
535  * @pipe_in: WDI conn pipe in params
536  * @ipa_ast_notify_cb: ipa ast notify cb
537  *
538  * Return: None
539  */
540 static inline void
dp_ipa_ast_notify_cb(qdf_ipa_wdi_conn_in_params_t * pipe_in,void * ipa_ast_notify_cb)541 dp_ipa_ast_notify_cb(qdf_ipa_wdi_conn_in_params_t *pipe_in,
542 		     void *ipa_ast_notify_cb)
543 {
544 	QDF_IPA_WDI_CONN_IN_PARAMS_AST_NOTIFY(pipe_in) = ipa_ast_notify_cb;
545 }
546 #else
547 static inline void
dp_ipa_ast_notify_cb(qdf_ipa_wdi_conn_in_params_t * pipe_in,void * ipa_ast_notify_cb)548 dp_ipa_ast_notify_cb(qdf_ipa_wdi_conn_in_params_t *pipe_in,
549 		     void *ipa_ast_notify_cb)
550 {
551 }
552 #endif
553 
554 #ifdef IPA_OPT_WIFI_DP
dp_ipa_opt_dp_ixo_remap(uint8_t * ix0_map)555 static inline void dp_ipa_opt_dp_ixo_remap(uint8_t *ix0_map)
556 {
557 	ix0_map[0] = REO_REMAP_SW1;
558 	ix0_map[1] = REO_REMAP_SW1;
559 	ix0_map[2] = REO_REMAP_SW2;
560 	ix0_map[3] = REO_REMAP_SW3;
561 	ix0_map[4] = REO_REMAP_SW4;
562 	ix0_map[5] = REO_REMAP_RELEASE;
563 	ix0_map[6] = REO_REMAP_FW;
564 	ix0_map[7] = REO_REMAP_FW;
565 }
566 #else
dp_ipa_opt_dp_ixo_remap(uint8_t * ix0_map)567 static inline void dp_ipa_opt_dp_ixo_remap(uint8_t *ix0_map)
568 {
569 }
570 #endif
571 #ifdef QCA_ENHANCED_STATS_SUPPORT
572 /**
573  * dp_ipa_txrx_get_peer_stats - fetch peer stats
574  * @soc: soc handle
575  * @vdev_id: id of vdev handle
576  * @peer_mac: peer mac address
577  * @peer_stats: buffer to hold peer stats
578  *
579  * Return: status success/failure
580  */
581 QDF_STATUS dp_ipa_txrx_get_peer_stats(struct cdp_soc_t *soc, uint8_t vdev_id,
582 				      uint8_t *peer_mac,
583 				      struct cdp_peer_stats *peer_stats);
584 
585 /**
586  * dp_ipa_txrx_get_vdev_stats - fetch vdev stats
587  * @soc_hdl: soc handle
588  * @vdev_id: id of vdev handle
589  * @buf: buffer to hold vdev stats
590  * @is_aggregate: for aggregation
591  *
592  * Return: int
593  */
594 int dp_ipa_txrx_get_vdev_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
595 			       void *buf, bool is_aggregate);
596 
597 /**
598  * dp_ipa_txrx_get_pdev_stats() - fetch pdev stats
599  * @soc: DP soc handle
600  * @pdev_id: id of DP pdev handle
601  * @pdev_stats: buffer to hold pdev stats
602  *
603  * Return: status success/failure
604  */
605 QDF_STATUS dp_ipa_txrx_get_pdev_stats(struct cdp_soc_t *soc, uint8_t pdev_id,
606 				      struct cdp_pdev_stats *pdev_stats);
607 
608 /**
609  * dp_ipa_update_peer_rx_stats() - update peer rx stats
610  * @soc: soc handle
611  * @vdev_id: vdev id
612  * @peer_mac: Peer Mac Address
613  * @nbuf: data nbuf
614  *
615  * Return: status success/failure
616  */
617 QDF_STATUS dp_ipa_update_peer_rx_stats(struct cdp_soc_t *soc, uint8_t vdev_id,
618 				       uint8_t *peer_mac, qdf_nbuf_t nbuf);
619 #endif
620 /**
621  * dp_ipa_get_wdi_version() - Get WDI version
622  * @soc_hdl: data path soc handle
623  * @wdi_ver: Out parameter for wdi version
624  *
625  * Get WDI version based on soc arch
626  *
627  * Return: None
628  */
629 void dp_ipa_get_wdi_version(struct cdp_soc_t *soc_hdl, uint8_t *wdi_ver);
630 
631 /**
632  * dp_ipa_is_ring_ipa_tx() - Check if the TX ring is used by IPA
633  *
634  * @soc: DP SoC
635  * @ring_id: TX ring id
636  *
637  * Return: bool
638  */
639 bool dp_ipa_is_ring_ipa_tx(struct dp_soc *soc, uint8_t ring_id);
640 #else
dp_ipa_uc_detach(struct dp_soc * soc,struct dp_pdev * pdev)641 static inline int dp_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev)
642 {
643 	return QDF_STATUS_SUCCESS;
644 }
645 
dp_ipa_uc_attach(struct dp_soc * soc,struct dp_pdev * pdev)646 static inline int dp_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev)
647 {
648 	return QDF_STATUS_SUCCESS;
649 }
650 
dp_ipa_ring_resource_setup(struct dp_soc * soc,struct dp_pdev * pdev)651 static inline int dp_ipa_ring_resource_setup(struct dp_soc *soc,
652 					     struct dp_pdev *pdev)
653 {
654 	return 0;
655 }
656 
dp_ipa_handle_rx_buf_smmu_mapping(struct dp_soc * soc,qdf_nbuf_t nbuf,uint32_t size,bool create,const char * func,uint32_t line)657 static inline QDF_STATUS dp_ipa_handle_rx_buf_smmu_mapping(struct dp_soc *soc,
658 							   qdf_nbuf_t nbuf,
659 							   uint32_t size,
660 							   bool create,
661 							   const char *func,
662 							   uint32_t line)
663 {
664 	return QDF_STATUS_SUCCESS;
665 }
666 
667 static inline void
dp_ipa_rx_buf_smmu_mapping_lock(struct dp_soc * soc)668 dp_ipa_rx_buf_smmu_mapping_lock(struct dp_soc *soc)
669 {
670 }
671 
672 static inline void
dp_ipa_rx_buf_smmu_mapping_unlock(struct dp_soc * soc)673 dp_ipa_rx_buf_smmu_mapping_unlock(struct dp_soc *soc)
674 {
675 }
676 
677 static inline void
dp_ipa_reo_ctx_buf_mapping_lock(struct dp_soc * soc,uint32_t reo_ring_num)678 dp_ipa_reo_ctx_buf_mapping_lock(struct dp_soc *soc,
679 				uint32_t reo_ring_num)
680 {
681 }
682 
683 static inline void
dp_ipa_reo_ctx_buf_mapping_unlock(struct dp_soc * soc,uint32_t reo_ring_num)684 dp_ipa_reo_ctx_buf_mapping_unlock(struct dp_soc *soc,
685 				  uint32_t reo_ring_num)
686 {
687 }
688 
dp_ipa_handle_rx_reo_reinject(struct dp_soc * soc,qdf_nbuf_t nbuf)689 static inline qdf_nbuf_t dp_ipa_handle_rx_reo_reinject(struct dp_soc *soc,
690 						       qdf_nbuf_t nbuf)
691 {
692 	return nbuf;
693 }
694 
dp_ipa_tx_buf_smmu_mapping(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,const char * func,uint32_t line)695 static inline QDF_STATUS dp_ipa_tx_buf_smmu_mapping(struct cdp_soc_t *soc_hdl,
696 						    uint8_t pdev_id,
697 						    const char *func,
698 						    uint32_t line)
699 {
700 	return QDF_STATUS_SUCCESS;
701 }
702 
dp_ipa_tx_buf_smmu_unmapping(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,const char * func,uint32_t line)703 static inline QDF_STATUS dp_ipa_tx_buf_smmu_unmapping(struct cdp_soc_t *soc_hdl,
704 						      uint8_t pdev_id,
705 						      const char *func,
706 						      uint32_t line)
707 {
708 	return QDF_STATUS_SUCCESS;
709 }
710 
dp_ipa_rx_buf_pool_smmu_mapping(struct cdp_soc_t * soc_hdl,uint8_t pdev_id,bool create,const char * func,uint32_t line)711 static inline QDF_STATUS dp_ipa_rx_buf_pool_smmu_mapping(
712 						      struct cdp_soc_t *soc_hdl,
713 						      uint8_t pdev_id,
714 						      bool create,
715 						      const char *func,
716 						      uint32_t line)
717 {
718 	return QDF_STATUS_SUCCESS;
719 }
720 
dp_ipa_set_smmu_mapped(struct cdp_soc_t * soc,int val)721 static inline QDF_STATUS dp_ipa_set_smmu_mapped(struct cdp_soc_t *soc, int val)
722 {
723 	return QDF_STATUS_SUCCESS;
724 }
725 
dp_ipa_get_smmu_mapped(struct cdp_soc_t * soc)726 static inline int dp_ipa_get_smmu_mapped(struct cdp_soc_t *soc)
727 {
728 	return QDF_STATUS_SUCCESS;
729 }
730 
731 #ifdef IPA_WDS_EASYMESH_FEATURE
dp_ipa_ast_create(struct cdp_soc_t * soc_hdl,qdf_ipa_ast_info_type_t * data)732 static inline QDF_STATUS dp_ipa_ast_create(struct cdp_soc_t *soc_hdl,
733 					   qdf_ipa_ast_info_type_t *data)
734 {
735 	return QDF_STATUS_SUCCESS;
736 }
737 #endif
dp_ipa_get_wdi_version(struct cdp_soc_t * soc_hdl,uint8_t * wdi_ver)738 static inline void dp_ipa_get_wdi_version(struct cdp_soc_t *soc_hdl,
739 					  uint8_t *wdi_ver)
740 {
741 }
742 
743 static inline bool
dp_ipa_is_ring_ipa_tx(struct dp_soc * soc,uint8_t ring_id)744 dp_ipa_is_ring_ipa_tx(struct dp_soc *soc, uint8_t ring_id)
745 {
746 	return false;
747 }
748 #endif
749 #endif /* _DP_IPA_H_ */
750