xref: /wlan-driver/qca-wifi-host-cmn/dp/wifi3.0/be/dp_be.h (revision 5113495b16420b49004c444715d2daae2066e7dc) !
1 /*
2  * Copyright (c) 2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 #ifndef __DP_BE_H
20 #define __DP_BE_H
21 
22 #include <dp_types.h>
23 #include <hal_be_tx.h>
24 #ifdef WLAN_MLO_MULTI_CHIP
25 #include "mlo/dp_mlo.h"
26 #else
27 #include <dp_peer.h>
28 #endif
29 #ifdef WIFI_MONITOR_SUPPORT
30 #include <dp_mon.h>
31 #endif
32 
33 enum CMEM_MEM_CLIENTS {
34 	COOKIE_CONVERSION,
35 	FISA_FST,
36 };
37 
38 /* maximum number of entries in one page of secondary page table */
39 #define DP_CC_SPT_PAGE_MAX_ENTRIES 512
40 
41 /* maximum number of entries in one page of secondary page table */
42 #define DP_CC_SPT_PAGE_MAX_ENTRIES_MASK (DP_CC_SPT_PAGE_MAX_ENTRIES - 1)
43 
44 /* maximum number of entries in primary page table */
45 #define DP_CC_PPT_MAX_ENTRIES \
46 	DP_CC_PPT_MEM_SIZE / DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED
47 
48 /* cookie conversion required CMEM offset from CMEM pool */
49 #define DP_CC_MEM_OFFSET_IN_CMEM 0
50 
51 /* cookie conversion primary page table size 4K */
52 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
53 #define DP_CC_PPT_MEM_SIZE 4096
54 #else
55 #define DP_CC_PPT_MEM_SIZE 8192
56 #endif
57 
58 /* FST required CMEM offset M pool */
59 #define DP_FST_MEM_OFFSET_IN_CMEM \
60 	(DP_CC_MEM_OFFSET_IN_CMEM + DP_CC_PPT_MEM_SIZE)
61 
62 /* lower 9 bits in Desc ID for offset in page of SPT */
63 #define DP_CC_DESC_ID_SPT_VA_OS_SHIFT 0
64 
65 #define DP_CC_DESC_ID_SPT_VA_OS_MASK 0x1FF
66 
67 #define DP_CC_DESC_ID_SPT_VA_OS_LSB 0
68 
69 #define DP_CC_DESC_ID_SPT_VA_OS_MSB 8
70 
71 /* higher 11 bits in Desc ID for offset in CMEM of PPT */
72 #define DP_CC_DESC_ID_PPT_PAGE_OS_LSB 9
73 
74 #define DP_CC_DESC_ID_PPT_PAGE_OS_MSB 19
75 
76 #define DP_CC_DESC_ID_PPT_PAGE_OS_SHIFT 9
77 
78 #define DP_CC_DESC_ID_PPT_PAGE_OS_MASK 0xFFE00
79 
80 /*
81  * page 4K unaligned case, single SPT page physical address
82  * need 8 bytes in PPT
83  */
84 #define DP_CC_PPT_ENTRY_SIZE_4K_UNALIGNED 8
85 /*
86  * page 4K aligned case, single SPT page physical address
87  * need 4 bytes in PPT
88  */
89 #define DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED 4
90 
91 /* 4K aligned case, number of bits HW append for one PPT entry value */
92 #define DP_CC_PPT_ENTRY_HW_APEND_BITS_4K_ALIGNED 12
93 
94 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
95 /* WBM2SW ring id for rx release */
96 #define WBM2SW_REL_ERR_RING_NUM 3
97 #else
98 /* WBM2SW ring id for rx release */
99 #define WBM2SW_REL_ERR_RING_NUM 5
100 #endif
101 
102 #ifdef WLAN_SUPPORT_PPEDS
103 #define DP_PPEDS_STAMODE_ASTIDX_MAP_REG_IDX 1
104 /* The MAX PPE PRI2TID */
105 #define DP_TX_INT_PRI2TID_MAX 15
106 
107 /* size of CMEM needed for a ppeds tx desc pool */
108 #define DP_TX_PPEDS_DESC_POOL_CMEM_SIZE \
109 	((WLAN_CFG_NUM_PPEDS_TX_DESC_MAX / DP_CC_SPT_PAGE_MAX_ENTRIES) * \
110 	 DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED)
111 
112 /* Offset of ppeds tx descripotor pool */
113 #define DP_TX_PPEDS_DESC_CMEM_OFFSET 0
114 
115 #define PEER_ROUTING_USE_PPE 1
116 #define PEER_ROUTING_ENABLED 1
117 #define DP_PPE_INTR_STRNG_LEN 32
118 #define DP_PPE_INTR_MAX 3
119 
120 #else
121 #define DP_TX_PPEDS_DESC_CMEM_OFFSET 0
122 #define DP_TX_PPEDS_DESC_POOL_CMEM_SIZE 0
123 
124 #define DP_PPE_INTR_STRNG_LEN 0
125 #define DP_PPE_INTR_MAX 0
126 #endif
127 
128 /* tx descriptor are programmed at start of CMEM region*/
129 #define DP_TX_DESC_CMEM_OFFSET \
130 	(DP_TX_PPEDS_DESC_CMEM_OFFSET + DP_TX_PPEDS_DESC_POOL_CMEM_SIZE)
131 
132 /* size of CMEM needed for a tx desc pool*/
133 #define DP_TX_DESC_POOL_CMEM_SIZE \
134 	((WLAN_CFG_NUM_TX_DESC_MAX / DP_CC_SPT_PAGE_MAX_ENTRIES) * \
135 	 DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED)
136 
137 #ifndef QCA_SUPPORT_DP_GLOBAL_CTX
138 /* Offset of rx descripotor pool */
139 #define DP_RX_DESC_CMEM_OFFSET \
140 	DP_TX_DESC_CMEM_OFFSET + (MAX_TXDESC_POOLS * DP_TX_DESC_POOL_CMEM_SIZE)
141 
142 #else
143 /* tx special descriptor are programmed after tx desc CMEM region*/
144 #define DP_TX_SPCL_DESC_CMEM_OFFSET \
145 	DP_TX_DESC_CMEM_OFFSET + (MAX_TXDESC_POOLS * DP_TX_DESC_POOL_CMEM_SIZE)
146 
147 /* size of CMEM needed for a tx special desc pool*/
148 #define DP_TX_SPCL_DESC_POOL_CMEM_SIZE \
149 	((WLAN_CFG_NUM_TX_SPL_DESC_MAX / DP_CC_SPT_PAGE_MAX_ENTRIES) * \
150 	 DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED)
151 
152 /* Offset of rx descripotor pool */
153 #define DP_RX_DESC_CMEM_OFFSET \
154 	DP_TX_SPCL_DESC_CMEM_OFFSET + (MAX_TXDESC_POOLS * \
155 	DP_TX_SPCL_DESC_POOL_CMEM_SIZE)
156 #endif
157 
158 /* size of CMEM needed for a rx desc pool */
159 #define DP_RX_DESC_POOL_CMEM_SIZE \
160 	((WLAN_CFG_RX_SW_DESC_NUM_SIZE_MAX / DP_CC_SPT_PAGE_MAX_ENTRIES) * \
161 	 DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED)
162 
163 /* get ppt_id from CMEM_OFFSET */
164 #define DP_CMEM_OFFSET_TO_PPT_ID(offset) \
165 	((offset) / DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED)
166 
167 /**
168  * struct dp_spt_page_desc - secondary page table page descriptors
169  * @page_v_addr: page virtual address
170  * @page_p_addr: page physical address
171  * @ppt_index: entry index in primary page table where this page physical
172  *		address stored
173  */
174 struct dp_spt_page_desc {
175 	uint8_t *page_v_addr;
176 	qdf_dma_addr_t page_p_addr;
177 	uint32_t ppt_index;
178 };
179 
180 /**
181  * struct dp_hw_cookie_conversion_t - main context for HW cookie conversion
182  * @cmem_offset: CMEM offset from base address for primary page table setup
183  * @total_page_num: total DDR page allocated
184  * @page_desc_freelist: available page Desc list
185  * @page_desc_base: page Desc buffer base address.
186  * @page_pool: DDR pages pool
187  * @cc_lock: locks for page acquiring/free
188  */
189 struct dp_hw_cookie_conversion_t {
190 	uint32_t cmem_offset;
191 	uint32_t total_page_num;
192 	struct dp_spt_page_desc *page_desc_base;
193 	struct qdf_mem_multi_page_t page_pool;
194 	qdf_spinlock_t cc_lock;
195 };
196 
197 /**
198  * struct dp_spt_page_desc_list - containor of SPT page desc list info
199  * @spt_page_list_head: head of SPT page descriptor list
200  * @spt_page_list_tail: tail of SPT page descriptor list
201  * @num_spt_pages: number of SPT page descriptor allocated
202  */
203 struct dp_spt_page_desc_list {
204 	struct dp_spt_page_desc *spt_page_list_head;
205 	struct dp_spt_page_desc *spt_page_list_tail;
206 	uint16_t num_spt_pages;
207 };
208 
209 /* HW reading 8 bytes for VA */
210 #define DP_CC_HW_READ_BYTES 8
211 #define DP_CC_SPT_PAGE_UPDATE_VA(_page_base_va, _index, _desc_va) \
212 	{ *((uintptr_t *)((_page_base_va) + (_index) * DP_CC_HW_READ_BYTES)) \
213 	= (uintptr_t)(_desc_va); }
214 
215 /**
216  * struct dp_tx_bank_profile - DP wrapper for TCL banks
217  * @is_configured: flag indicating if this bank is configured
218  * @ref_count: ref count indicating number of users of the bank
219  * @bank_config: HAL TX bank configuration
220  */
221 struct dp_tx_bank_profile {
222 	uint8_t is_configured;
223 	qdf_atomic_t  ref_count;
224 	union hal_tx_bank_config bank_config;
225 };
226 
227 #ifdef WLAN_SUPPORT_PPEDS
228 /**
229  * struct dp_ppe_vp_tbl_entry - PPE Virtual table entry
230  * @is_configured: Boolean that the entry is configured.
231  */
232 struct dp_ppe_vp_tbl_entry {
233 	bool is_configured;
234 };
235 
236 /**
237  * struct dp_ppe_vp_search_idx_tbl_entry - PPE Virtual search table entry
238  * @is_configured: Boolean that the entry is configured.
239  */
240 struct dp_ppe_vp_search_idx_tbl_entry {
241 	bool is_configured;
242 };
243 
244 /**
245  * struct dp_ppe_vp_profile - PPE direct switch profiler per vdev
246  * @is_configured: Boolean that the entry is configured.
247  * @vp_num: Virtual port number
248  * @ppe_vp_num_idx: Index to the PPE VP table entry
249  * @search_idx_reg_num: Address search Index register number
250  * @drop_prec_enable: Drop precedance enable
251  * @to_fw: To FW exception enable/disable.
252  * @use_ppe_int_pri: Use PPE INT_PRI to TID mapping table
253  * @vdev_id: Vdev ID
254  */
255 struct dp_ppe_vp_profile {
256 	bool is_configured;
257 	uint8_t vp_num;
258 	uint8_t ppe_vp_num_idx;
259 	uint8_t search_idx_reg_num;
260 	uint8_t drop_prec_enable;
261 	uint8_t to_fw;
262 	uint8_t use_ppe_int_pri;
263 	uint8_t vdev_id;
264 };
265 
266 /**
267  * struct dp_ppeds_tx_desc_pool_s - PPEDS Tx Descriptor Pool
268  * @elem_size: Size of each descriptor
269  * @hot_list_len: Length of hotlist chain
270  * @num_allocated: Number of used descriptors
271  * @freelist: Chain of free descriptors
272  * @hotlist: Chain of descriptors with attached nbufs
273  * @desc_pages: multiple page allocation information for actual descriptors
274  * @elem_count: Number of descriptors in the pool
275  * @num_free: Number of free descriptors
276  * @lock: Lock for descriptor allocation/free from/to the pool
277  */
278 struct dp_ppeds_tx_desc_pool_s {
279 	uint16_t elem_size;
280 	uint32_t num_allocated;
281 	uint32_t hot_list_len;
282 	struct dp_tx_desc_s *freelist;
283 	struct dp_tx_desc_s *hotlist;
284 	struct qdf_mem_multi_page_t desc_pages;
285 	uint16_t elem_count;
286 	uint32_t num_free;
287 	qdf_spinlock_t lock;
288 };
289 #endif
290 
291 /**
292  * struct dp_ppeds_napi - napi parameters for ppe ds
293  * @napi: napi structure to register with napi infra
294  * @ndev: net_dev structure
295  */
296 struct dp_ppeds_napi {
297 	struct napi_struct napi;
298 	struct net_device ndev;
299 };
300 
301 /*
302  * NB: intentionally not using kernel-doc comment because the kernel-doc
303  *     script does not handle the TAILQ_HEAD macro
304  * struct dp_soc_be - Extended DP soc for BE targets
305  * @soc: dp soc structure
306  * @num_bank_profiles: num TX bank profiles
307  * @tx_bank_lock: lock for @bank_profiles
308  * @bank_profiles: bank profiles for various TX banks
309  * @page_desc_base:
310  * @cc_cmem_base: cmem offset reserved for CC
311  * @tx_cc_ctx: Cookie conversion context for tx desc pools
312  * @rx_cc_ctx: Cookie conversion context for rx desc pools
313  * @ppeds_int_mode_enabled: PPE DS interrupt mode enabled
314  * @ppeds_stopped:
315  * @reo2ppe_ring: REO2PPE ring
316  * @ppe2tcl_ring: PPE2TCL ring
317  * @ppeds_wbm_release_ring:
318  * @ppe_vp_tbl: PPE VP table
319  * @ppe_vp_search_idx_tbl: PPE VP search idx table
320  * @ppeds_tx_cc_ctx: Cookie conversion context for ppeds tx desc pool
321  * @ppeds_tx_desc: PPEDS tx desc pool
322  * @ppeds_napi_ctxt:
323  * @ppeds_handle: PPEDS soc instance handle
324  * @dp_ppeds_txdesc_hotlist_len: PPEDS tx desc hotlist length
325  * @ppe_vp_tbl_lock: PPE VP table lock
326  * @num_ppe_vp_entries: Number of PPE VP entries
327  * @num_ppe_vp_search_idx_entries: PPEDS VP search idx entries
328  * @irq_name: PPEDS VP irq names
329  * @ppeds_stats: PPEDS stats
330  * @mlo_enabled: Flag to indicate MLO is enabled or not
331  * @mlo_chip_id: MLO chip_id
332  * @ml_ctxt: pointer to global ml_context
333  * @delta_tqm: delta_tqm
334  * @mlo_tstamp_offset: mlo timestamp offset
335  * @mld_peer_hash_lock: lock to protect mld_peer_hash
336  * @mld_peer_hash: peer hash table for ML peers
337  * @mlo_dev_list: list of MLO device context
338  * @mlo_dev_list_lock: lock to protect MLO device ctxt
339  * @ipa_bank_id: TCL bank id used by IPA
340  */
341 struct dp_soc_be {
342 	struct dp_soc soc;
343 	uint8_t num_bank_profiles;
344 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
345 	qdf_mutex_t tx_bank_lock;
346 #else
347 	qdf_spinlock_t tx_bank_lock;
348 #endif
349 	struct dp_tx_bank_profile *bank_profiles;
350 	struct dp_spt_page_desc *page_desc_base;
351 	uint32_t cc_cmem_base;
352 	struct dp_hw_cookie_conversion_t tx_cc_ctx[MAX_TXDESC_POOLS];
353 	struct dp_hw_cookie_conversion_t rx_cc_ctx[MAX_RXDESC_POOLS];
354 #ifdef WLAN_SUPPORT_PPEDS
355 	uint8_t ppeds_int_mode_enabled:1,
356 		ppeds_stopped:1;
357 	struct dp_srng reo2ppe_ring;
358 	struct dp_srng ppe2tcl_ring;
359 	struct dp_srng ppeds_wbm_release_ring;
360 	struct dp_ppe_vp_tbl_entry *ppe_vp_tbl;
361 	struct dp_ppe_vp_search_idx_tbl_entry *ppe_vp_search_idx_tbl;
362 	struct dp_ppe_vp_profile *ppe_vp_profile;
363 	struct dp_hw_cookie_conversion_t ppeds_tx_cc_ctx;
364 	struct dp_ppeds_tx_desc_pool_s ppeds_tx_desc;
365 	struct dp_ppeds_napi ppeds_napi_ctxt;
366 	void *ppeds_handle;
367 	int dp_ppeds_txdesc_hotlist_len;
368 	qdf_mutex_t ppe_vp_tbl_lock;
369 	uint8_t num_ppe_vp_entries;
370 	uint8_t num_ppe_vp_search_idx_entries;
371 	uint8_t num_ppe_vp_profiles;
372 	char irq_name[DP_PPE_INTR_MAX][DP_PPE_INTR_STRNG_LEN];
373 	struct {
374 		struct {
375 			uint64_t desc_alloc_failed;
376 #ifdef GLOBAL_ASSERT_AVOIDANCE
377 			uint32_t tx_comp_buf_src;
378 			uint32_t tx_comp_desc_null;
379 			uint32_t tx_comp_invalid_flag;
380 #endif
381 		} tx;
382 	} ppeds_stats;
383 #endif
384 #ifdef WLAN_FEATURE_11BE_MLO
385 #ifdef WLAN_MLO_MULTI_CHIP
386 	uint8_t mlo_enabled;
387 	uint8_t mlo_chip_id;
388 	struct dp_mlo_ctxt *ml_ctxt;
389 	uint64_t delta_tqm;
390 	uint64_t mlo_tstamp_offset;
391 #else
392 	/* Protect mld peer hash table */
393 	DP_MUTEX_TYPE mld_peer_hash_lock;
394 	struct {
395 		uint32_t mask;
396 		uint32_t idx_bits;
397 
398 		TAILQ_HEAD(, dp_peer) * bins;
399 	} mld_peer_hash;
400 
401 	/* MLO device ctxt list */
402 	TAILQ_HEAD(, dp_mlo_dev_ctxt) mlo_dev_list;
403 	qdf_spinlock_t mlo_dev_list_lock;
404 #endif
405 #endif
406 #ifdef IPA_OFFLOAD
407 	int8_t ipa_bank_id;
408 #endif
409 };
410 
411 /* convert struct dp_soc_be pointer to struct dp_soc pointer */
412 #define DP_SOC_BE_GET_SOC(be_soc) ((struct dp_soc *)be_soc)
413 
414 /**
415  * struct dp_pdev_be - Extended DP pdev for BE targets
416  * @pdev: dp pdev structure
417  * @monitor_pdev_be: BE specific monitor object
418  * @mlo_link_id: MLO link id for PDEV
419  * @delta_tsf2: delta_tsf2
420  */
421 struct dp_pdev_be {
422 	struct dp_pdev pdev;
423 #ifdef WLAN_MLO_MULTI_CHIP
424 	uint8_t mlo_link_id;
425 	uint64_t delta_tsf2;
426 #endif
427 };
428 
429 /**
430  * struct dp_vdev_be - Extended DP vdev for BE targets
431  * @vdev: dp vdev structure
432  * @bank_id: bank_id to be used for TX
433  * @vdev_id_check_en: flag if HW vdev_id check is enabled for vdev
434  * @partner_vdev_list: partner list used for Intra-BSS
435  * @bridge_vdev_list: partner bridge vdev list
436  * @mlo_stats: structure to hold stats for mlo unmapped peers
437  * @mcast_primary: MLO Mcast primary vdev
438  * @mlo_dev_ctxt: MLO device context pointer
439  */
440 struct dp_vdev_be {
441 	struct dp_vdev vdev;
442 	int8_t bank_id;
443 	uint8_t vdev_id_check_en;
444 #ifdef WLAN_MLO_MULTI_CHIP
445 	struct cdp_vdev_stats mlo_stats;
446 #ifdef WLAN_FEATURE_11BE_MLO
447 #ifdef WLAN_MCAST_MLO
448 	bool mcast_primary;
449 #endif
450 #endif
451 #endif
452 #ifdef WLAN_FEATURE_11BE_MLO
453 	struct dp_mlo_dev_ctxt *mlo_dev_ctxt;
454 #endif /* WLAN_FEATURE_11BE_MLO */
455 };
456 
457 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_DP_MLO_DEV_CTX)
458 /**
459  * struct dp_mlo_dev_ctxt - Datapath MLO device context
460  *
461  * @ml_dev_list_elem: node in the ML dev list of Global MLO context
462  * @mld_mac_addr: MLO device MAC address
463  * @vdev_list: list of vdevs associated with this MLO connection
464  * @vdev_list_lock: lock to protect vdev list
465  * @bridge_vdev: list of bridge vdevs associated with this MLO connection
466  * @is_bridge_vdev_present: flag to check if bridge vdev is present
467  * @vdev_list_lock: lock to protect vdev list
468  * @vdev_count: number of elements in the vdev list
469  * @seq_num: DP MLO multicast sequence number
470  * @ref_cnt: reference count
471  * @mod_refs: module reference count
472  * @ref_delete_pending: flag to monitor last ref delete
473  * @stats: structure to store vdev stats of removed MLO Link
474  */
475 struct dp_mlo_dev_ctxt {
476 	TAILQ_ENTRY(dp_mlo_dev_ctxt) ml_dev_list_elem;
477 	union dp_align_mac_addr mld_mac_addr;
478 #ifdef WLAN_MLO_MULTI_CHIP
479 	uint8_t vdev_list[WLAN_MAX_MLO_CHIPS][WLAN_MAX_MLO_LINKS_PER_SOC];
480 	uint8_t bridge_vdev[WLAN_MAX_MLO_CHIPS][WLAN_MAX_MLO_LINKS_PER_SOC];
481 	bool is_bridge_vdev_present;
482 	qdf_spinlock_t vdev_list_lock;
483 	uint16_t vdev_count;
484 	uint16_t seq_num;
485 #endif
486 	qdf_atomic_t ref_cnt;
487 	qdf_atomic_t mod_refs[DP_MOD_ID_MAX];
488 	uint8_t ref_delete_pending;
489 	struct dp_vdev_stats stats;
490 };
491 #endif /* WLAN_FEATURE_11BE_MLO */
492 
493 /**
494  * struct dp_peer_be - Extended DP peer for BE targets
495  * @peer: dp peer structure
496  * @priority_valid:
497  */
498 struct dp_peer_be {
499 	struct dp_peer peer;
500 #ifdef WLAN_SUPPORT_PPEDS
501 	uint8_t priority_valid;
502 #endif
503 };
504 
505 /**
506  * dp_get_soc_context_size_be() - get context size for target specific DP soc
507  *
508  * Return: value in bytes for BE specific soc structure
509  */
510 qdf_size_t dp_get_soc_context_size_be(void);
511 
512 /**
513  * dp_initialize_arch_ops_be() - initialize BE specific arch ops
514  * @arch_ops: arch ops pointer
515  *
516  * Return: none
517  */
518 void dp_initialize_arch_ops_be(struct dp_arch_ops *arch_ops);
519 
520 /**
521  * dp_get_context_size_be() - get BE specific size for peer/vdev/pdev/soc
522  * @context_type: context type for which the size is needed
523  *
524  * Return: size in bytes for the context_type
525  */
526 qdf_size_t dp_get_context_size_be(enum dp_context_type context_type);
527 
528 /**
529  * dp_get_be_soc_from_dp_soc() - get dp_soc_be from dp_soc
530  * @soc: dp_soc pointer
531  *
532  * Return: dp_soc_be pointer
533  */
dp_get_be_soc_from_dp_soc(struct dp_soc * soc)534 static inline struct dp_soc_be *dp_get_be_soc_from_dp_soc(struct dp_soc *soc)
535 {
536 	return (struct dp_soc_be *)soc;
537 }
538 
539 /**
540  * dp_mlo_iter_ptnr_soc() - iterate through mlo soc list and call the callback
541  * @be_soc: dp_soc_be pointer
542  * @func: Function to be called for each soc
543  * @arg: context to be passed to the callback
544  *
545  * Return: true if mlo is enabled, false if mlo is disabled
546  */
547 bool dp_mlo_iter_ptnr_soc(struct dp_soc_be *be_soc, dp_ptnr_soc_iter_func func,
548 			  void *arg);
549 
550 #ifdef WLAN_MLO_MULTI_CHIP
551 typedef struct dp_mlo_ctxt *dp_mld_peer_hash_obj_t;
552 typedef struct dp_mlo_ctxt *dp_mlo_dev_obj_t;
553 
554 /**
555  * dp_mlo_get_peer_hash_obj() - return the container struct of MLO hash table
556  * @soc: soc handle
557  *
558  * return: MLD peer hash object
559  */
560 static inline dp_mld_peer_hash_obj_t
dp_mlo_get_peer_hash_obj(struct dp_soc * soc)561 dp_mlo_get_peer_hash_obj(struct dp_soc *soc)
562 {
563 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
564 
565 	return be_soc->ml_ctxt;
566 }
567 
568 /**
569  * dp_get_mlo_dev_list_obj() - return the container struct of MLO Dev list
570  * @be_soc: be soc handle
571  *
572  * return: MLO dev list object
573  */
574 static inline dp_mlo_dev_obj_t
dp_get_mlo_dev_list_obj(struct dp_soc_be * be_soc)575 dp_get_mlo_dev_list_obj(struct dp_soc_be *be_soc)
576 {
577 	return be_soc->ml_ctxt;
578 }
579 
580 #if defined(WLAN_FEATURE_11BE_MLO)
581 /**
582  * dp_mlo_partner_chips_map() - Map MLO peers to partner SOCs
583  * @soc: Soc handle
584  * @peer: DP peer handle for ML peer
585  * @peer_id: peer_id
586  * Return: None
587  */
588 void dp_mlo_partner_chips_map(struct dp_soc *soc,
589 			      struct dp_peer *peer,
590 			      uint16_t peer_id);
591 
592 /**
593  * dp_mlo_partner_chips_unmap() - Unmap MLO peers to partner SOCs
594  * @soc: Soc handle
595  * @peer_id: peer_id
596  * Return: None
597  */
598 void dp_mlo_partner_chips_unmap(struct dp_soc *soc,
599 				uint16_t peer_id);
600 
601 /**
602  * dp_soc_initialize_cdp_cmn_mlo_ops() - Initialize common CDP API's
603  * @soc: Soc handle
604  *
605  * Return: None
606  */
607 void dp_soc_initialize_cdp_cmn_mlo_ops(struct dp_soc *soc);
608 
609 #ifdef WLAN_MLO_MULTI_CHIP
610 typedef void dp_ptnr_vdev_iter_func(struct dp_vdev_be *be_vdev,
611 				    struct dp_vdev *ptnr_vdev,
612 				    void *arg);
613 
614 /**
615  * dp_mlo_iter_ptnr_vdev() - API to iterate through ptnr vdev list
616  * @be_soc: dp_soc_be pointer
617  * @be_vdev: dp_vdev_be pointer
618  * @func: function to be called for each peer
619  * @arg: argument need to be passed to func
620  * @mod_id: module id
621  * @type: iterate type
622  * @include_self_vdev: flag to include/exclude self vdev in iteration
623  *
624  * Return: None
625  */
626 void dp_mlo_iter_ptnr_vdev(struct dp_soc_be *be_soc,
627 			   struct dp_vdev_be *be_vdev,
628 			   dp_ptnr_vdev_iter_func func, void *arg,
629 			   enum dp_mod_id mod_id,
630 			   uint8_t type,
631 			   bool include_self_vdev);
632 #endif
633 
634 #ifdef WLAN_MCAST_MLO
635 /**
636  * dp_mlo_get_mcast_primary_vdev() - get ref to mcast primary vdev
637  * @be_soc: dp_soc_be pointer
638  * @be_vdev: dp_vdev_be pointer
639  * @mod_id: module id
640  *
641  * Return: mcast primary DP VDEV handle on success, NULL on failure
642  */
643 struct dp_vdev *dp_mlo_get_mcast_primary_vdev(struct dp_soc_be *be_soc,
644 					      struct dp_vdev_be *be_vdev,
645 					      enum dp_mod_id mod_id);
646 #endif
647 #endif
648 
649 #else
650 typedef struct dp_soc_be *dp_mld_peer_hash_obj_t;
651 typedef struct dp_soc_be *dp_mlo_dev_obj_t;
652 
653 static inline dp_mld_peer_hash_obj_t
dp_mlo_get_peer_hash_obj(struct dp_soc * soc)654 dp_mlo_get_peer_hash_obj(struct dp_soc *soc)
655 {
656 	return dp_get_be_soc_from_dp_soc(soc);
657 }
658 
659 static inline dp_mlo_dev_obj_t
dp_get_mlo_dev_list_obj(struct dp_soc_be * be_soc)660 dp_get_mlo_dev_list_obj(struct dp_soc_be *be_soc)
661 {
662 	return be_soc;
663 }
664 #endif
665 
666 #ifdef QCA_SUPPORT_DP_GLOBAL_CTX
667 static inline
dp_get_tx_cookie_t(struct dp_soc * soc,uint8_t pool_id)668 struct dp_hw_cookie_conversion_t *dp_get_tx_cookie_t(struct dp_soc *soc,
669 						     uint8_t pool_id)
670 {
671 	struct dp_global_context *dp_global = NULL;
672 
673 	dp_global = wlan_objmgr_get_global_ctx();
674 	return dp_global->tx_cc_ctx[pool_id];
675 }
676 
677 static inline
dp_get_spcl_tx_cookie_t(struct dp_soc * soc,uint8_t pool_id)678 struct dp_hw_cookie_conversion_t *dp_get_spcl_tx_cookie_t(struct dp_soc *soc,
679 							  uint8_t pool_id)
680 {
681 	struct dp_global_context *dp_global = NULL;
682 
683 	dp_global = wlan_objmgr_get_global_ctx();
684 	return dp_global->spcl_tx_cc_ctx[pool_id];
685 }
686 #else
687 static inline
dp_get_tx_cookie_t(struct dp_soc * soc,uint8_t pool_id)688 struct dp_hw_cookie_conversion_t *dp_get_tx_cookie_t(struct dp_soc *soc,
689 						     uint8_t pool_id)
690 {
691 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
692 
693 	return &be_soc->tx_cc_ctx[pool_id];
694 }
695 
696 static inline
dp_get_spcl_tx_cookie_t(struct dp_soc * soc,uint8_t pool_id)697 struct dp_hw_cookie_conversion_t *dp_get_spcl_tx_cookie_t(struct dp_soc *soc,
698 							  uint8_t pool_id)
699 {
700 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
701 
702 	return &be_soc->tx_cc_ctx[pool_id];
703 }
704 #endif
705 
706 /**
707  * dp_mlo_peer_find_hash_attach_be() - API to initialize ML peer hash table
708  * @mld_hash_obj: Peer has object
709  * @hash_elems: number of entries in hash table
710  *
711  * Return: QDF_STATUS_SUCCESS when attach is success else QDF_STATUS_FAILURE
712  */
713 QDF_STATUS
714 dp_mlo_peer_find_hash_attach_be(dp_mld_peer_hash_obj_t mld_hash_obj,
715 				int hash_elems);
716 
717 /**
718  * dp_mlo_peer_find_hash_detach_be() - API to de-initialize ML peer hash table
719  *
720  * @mld_hash_obj: Peer has object
721  *
722  * Return: void
723  */
724 void dp_mlo_peer_find_hash_detach_be(dp_mld_peer_hash_obj_t mld_hash_obj);
725 
726 /**
727  * dp_get_be_pdev_from_dp_pdev() - get dp_pdev_be from dp_pdev
728  * @pdev: dp_pdev pointer
729  *
730  * Return: dp_pdev_be pointer
731  */
732 static inline
dp_get_be_pdev_from_dp_pdev(struct dp_pdev * pdev)733 struct dp_pdev_be *dp_get_be_pdev_from_dp_pdev(struct dp_pdev *pdev)
734 {
735 	return (struct dp_pdev_be *)pdev;
736 }
737 
738 /**
739  * dp_get_be_vdev_from_dp_vdev() - get dp_vdev_be from dp_vdev
740  * @vdev: dp_vdev pointer
741  *
742  * Return: dp_vdev_be pointer
743  */
744 static inline
dp_get_be_vdev_from_dp_vdev(struct dp_vdev * vdev)745 struct dp_vdev_be *dp_get_be_vdev_from_dp_vdev(struct dp_vdev *vdev)
746 {
747 	return (struct dp_vdev_be *)vdev;
748 }
749 
750 /**
751  * dp_get_be_peer_from_dp_peer() - get dp_peer_be from dp_peer
752  * @peer: dp_peer pointer
753  *
754  * Return: dp_peer_be pointer
755  */
756 static inline
dp_get_be_peer_from_dp_peer(struct dp_peer * peer)757 struct dp_peer_be *dp_get_be_peer_from_dp_peer(struct dp_peer *peer)
758 {
759 	return (struct dp_peer_be *)peer;
760 }
761 
762 void dp_ppeds_disable_irq(struct dp_soc *soc, struct dp_srng *srng);
763 void dp_ppeds_enable_irq(struct dp_soc *soc, struct dp_srng *srng);
764 
765 QDF_STATUS dp_peer_setup_ppeds_be(struct dp_soc *soc, struct dp_peer *peer,
766 				  struct dp_vdev_be *be_vdev,
767 				  void *args);
768 
769 QDF_STATUS
770 dp_hw_cookie_conversion_attach(struct dp_soc_be *be_soc,
771 			       struct dp_hw_cookie_conversion_t *cc_ctx,
772 			       uint32_t num_descs,
773 			       enum qdf_dp_desc_type desc_type,
774 			       uint8_t desc_pool_id);
775 
776 void dp_reo_shared_qaddr_detach(struct dp_soc *soc);
777 
778 QDF_STATUS
779 dp_hw_cookie_conversion_detach(struct dp_soc_be *be_soc,
780 			       struct dp_hw_cookie_conversion_t *cc_ctx);
781 QDF_STATUS
782 dp_hw_cookie_conversion_init(struct dp_soc_be *be_soc,
783 			     struct dp_hw_cookie_conversion_t *cc_ctx);
784 QDF_STATUS
785 dp_hw_cookie_conversion_deinit(struct dp_soc_be *be_soc,
786 			       struct dp_hw_cookie_conversion_t *cc_ctx);
787 
788 /**
789  * dp_cc_spt_page_desc_alloc() - allocate SPT DDR page descriptor from pool
790  * @be_soc: beryllium soc handler
791  * @list_head: pointer to page desc head
792  * @list_tail: pointer to page desc tail
793  * @num_desc: number of TX/RX Descs required for SPT pages
794  *
795  * Return: number of SPT page Desc allocated
796  */
797 uint16_t dp_cc_spt_page_desc_alloc(struct dp_soc_be *be_soc,
798 				   struct dp_spt_page_desc **list_head,
799 				   struct dp_spt_page_desc **list_tail,
800 				   uint16_t num_desc);
801 
802 /**
803  * dp_cc_spt_page_desc_free() - free SPT DDR page descriptor to pool
804  * @be_soc: beryllium soc handler
805  * @list_head: pointer to page desc head
806  * @list_tail: pointer to page desc tail
807  * @page_nums: number of page desc freed back to pool
808  */
809 void dp_cc_spt_page_desc_free(struct dp_soc_be *be_soc,
810 			      struct dp_spt_page_desc **list_head,
811 			      struct dp_spt_page_desc **list_tail,
812 			      uint16_t page_nums);
813 
814 /**
815  * dp_cc_desc_id_generate() - generate SW cookie ID according to
816  *				DDR page 4K aligned or not
817  * @ppt_index: offset index in primary page table
818  * @spt_index: offset index in sceondary DDR page
819  *
820  * Generate SW cookie ID to match as HW expected
821  *
822  * Return: cookie ID
823  */
dp_cc_desc_id_generate(uint32_t ppt_index,uint16_t spt_index)824 static inline uint32_t dp_cc_desc_id_generate(uint32_t ppt_index,
825 					      uint16_t spt_index)
826 {
827 	/*
828 	 * for 4k aligned case, cmem entry size is 4 bytes,
829 	 * HW index from bit19~bit10 value = ppt_index / 2, high 32bits flag
830 	 * from bit9 value = ppt_index % 2, then bit 19 ~ bit9 value is
831 	 * exactly same with original ppt_index value.
832 	 * for 4k un-aligned case, cmem entry size is 8 bytes.
833 	 * bit19 ~ bit9 will be HW index value, same as ppt_index value.
834 	 */
835 	return ((((uint32_t)ppt_index) << DP_CC_DESC_ID_PPT_PAGE_OS_SHIFT) |
836 		spt_index);
837 }
838 
839 /**
840  * dp_cc_desc_find() - find TX/RX Descs virtual address by ID
841  * @soc: be soc handle
842  * @desc_id: TX/RX Dess ID
843  *
844  * Return: TX/RX Desc virtual address
845  */
dp_cc_desc_find(struct dp_soc * soc,uint32_t desc_id)846 static inline uintptr_t dp_cc_desc_find(struct dp_soc *soc,
847 					uint32_t desc_id)
848 {
849 	struct dp_soc_be *be_soc;
850 	uint16_t ppt_page_id, spt_va_id;
851 	uint8_t *spt_page_va;
852 
853 	be_soc = dp_get_be_soc_from_dp_soc(soc);
854 	ppt_page_id = (desc_id & DP_CC_DESC_ID_PPT_PAGE_OS_MASK) >>
855 			DP_CC_DESC_ID_PPT_PAGE_OS_SHIFT;
856 
857 	spt_va_id = (desc_id & DP_CC_DESC_ID_SPT_VA_OS_MASK) >>
858 			DP_CC_DESC_ID_SPT_VA_OS_SHIFT;
859 
860 	/*
861 	 * ppt index in cmem is same order where the page in the
862 	 * page desc array during initialization.
863 	 * entry size in DDR page is 64 bits, for 32 bits system,
864 	 * only lower 32 bits VA value is needed.
865 	 */
866 	spt_page_va = be_soc->page_desc_base[ppt_page_id].page_v_addr;
867 
868 	return (*((uintptr_t *)(spt_page_va  +
869 				spt_va_id * DP_CC_HW_READ_BYTES)));
870 }
871 
872 /**
873  * dp_update_mlo_mld_vdev_ctxt_stats() - aggregate stats from mlo ctx
874  * @buf: vdev stats buf
875  * @mlo_ctxt_stats: mlo ctxt stats
876  *
877  * return: void
878  */
879 static inline
dp_update_mlo_mld_vdev_ctxt_stats(void * buf,struct dp_vdev_stats * mlo_ctxt_stats)880 void dp_update_mlo_mld_vdev_ctxt_stats(void *buf,
881 				   struct dp_vdev_stats *mlo_ctxt_stats)
882 {
883 	struct dp_vdev_stats *tgt_vdev_stats = (struct dp_vdev_stats *)buf;
884 
885 	DP_UPDATE_TO_MLD_VDEV_STATS(tgt_vdev_stats, mlo_ctxt_stats,
886 				    DP_XMIT_TOTAL);
887 }
888 
889 /**
890  * dp_update_mlo_link_vdev_ctxt_stats() - aggregate stats from mlo ctx
891  * @buf: vdev stats buf
892  * @mlo_ctxt_stats: mlo ctxt stats
893  * @xmit_type: xmit type of packet - MLD/Link
894  * return: void
895  */
896 static inline
dp_update_mlo_link_vdev_ctxt_stats(void * buf,struct dp_vdev_stats * mlo_ctxt_stats,enum dp_pkt_xmit_type xmit_type)897 void dp_update_mlo_link_vdev_ctxt_stats(void *buf,
898 					struct dp_vdev_stats *mlo_ctxt_stats,
899 					enum dp_pkt_xmit_type xmit_type)
900 {
901 	struct cdp_vdev_stats *tgt_vdev_stats = (struct cdp_vdev_stats *)buf;
902 
903 	DP_UPDATE_TO_LINK_VDEV_STATS(tgt_vdev_stats, mlo_ctxt_stats, xmit_type);
904 }
905 
906 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
907 /**
908  * enum dp_srng_near_full_levels - SRNG Near FULL levels
909  * @DP_SRNG_THRESH_SAFE: SRNG level safe for yielding the near full mode
910  *		of processing the entries in SRNG
911  * @DP_SRNG_THRESH_NEAR_FULL: SRNG level enters the near full mode
912  *		of processing the entries in SRNG
913  * @DP_SRNG_THRESH_CRITICAL: SRNG level enters the critical level of full
914  *		condition and drastic steps need to be taken for processing
915  *		the entries in SRNG
916  */
917 enum dp_srng_near_full_levels {
918 	DP_SRNG_THRESH_SAFE,
919 	DP_SRNG_THRESH_NEAR_FULL,
920 	DP_SRNG_THRESH_CRITICAL,
921 };
922 
923 /**
924  * dp_srng_check_ring_near_full() - Check if SRNG is marked as near-full from
925  *				its corresponding near-full irq handler
926  * @soc: Datapath SoC handle
927  * @dp_srng: datapath handle for this SRNG
928  *
929  * Return: 1, if the srng was marked as near-full
930  *	   0, if the srng was not marked as near-full
931  */
dp_srng_check_ring_near_full(struct dp_soc * soc,struct dp_srng * dp_srng)932 static inline int dp_srng_check_ring_near_full(struct dp_soc *soc,
933 					       struct dp_srng *dp_srng)
934 {
935 	return qdf_atomic_read(&dp_srng->near_full);
936 }
937 
938 /**
939  * dp_srng_get_near_full_level() - Check the num available entries in the
940  *			consumer srng and return the level of the srng
941  *			near full state.
942  * @soc: Datapath SoC Handle [To be validated by the caller]
943  * @dp_srng: SRNG handle
944  *
945  * Return: near-full level
946  */
947 static inline int
dp_srng_get_near_full_level(struct dp_soc * soc,struct dp_srng * dp_srng)948 dp_srng_get_near_full_level(struct dp_soc *soc, struct dp_srng *dp_srng)
949 {
950 	uint32_t num_valid;
951 
952 	num_valid = hal_srng_dst_num_valid_nolock(soc->hal_soc,
953 						  dp_srng->hal_srng,
954 						  true);
955 
956 	if (num_valid > dp_srng->crit_thresh)
957 		return DP_SRNG_THRESH_CRITICAL;
958 	else if (num_valid < dp_srng->safe_thresh)
959 		return DP_SRNG_THRESH_SAFE;
960 	else
961 		return DP_SRNG_THRESH_NEAR_FULL;
962 }
963 
964 #define DP_SRNG_PER_LOOP_NF_REAP_MULTIPLIER	2
965 
966 /**
967  * _dp_srng_test_and_update_nf_params() - Test the near full level and update
968  *			the reap_limit and flags to reflect the state.
969  * @soc: Datapath soc handle
970  * @srng: Datapath handle for the srng
971  * @max_reap_limit: [Output Param] Buffer to set the map_reap_limit as
972  *			per the near-full state
973  *
974  * Return: 1, if the srng is near full
975  *	   0, if the srng is not near full
976  */
977 static inline int
_dp_srng_test_and_update_nf_params(struct dp_soc * soc,struct dp_srng * srng,int * max_reap_limit)978 _dp_srng_test_and_update_nf_params(struct dp_soc *soc,
979 				   struct dp_srng *srng,
980 				   int *max_reap_limit)
981 {
982 	int ring_near_full = 0, near_full_level;
983 
984 	if (dp_srng_check_ring_near_full(soc, srng)) {
985 		near_full_level = dp_srng_get_near_full_level(soc, srng);
986 		switch (near_full_level) {
987 		case DP_SRNG_THRESH_CRITICAL:
988 			/* Currently not doing anything special here */
989 			fallthrough;
990 		case DP_SRNG_THRESH_NEAR_FULL:
991 			ring_near_full = 1;
992 			*max_reap_limit *= DP_SRNG_PER_LOOP_NF_REAP_MULTIPLIER;
993 			break;
994 		case DP_SRNG_THRESH_SAFE:
995 			qdf_atomic_set(&srng->near_full, 0);
996 			ring_near_full = 0;
997 			break;
998 		default:
999 			qdf_assert(0);
1000 			break;
1001 		}
1002 	}
1003 
1004 	return ring_near_full;
1005 }
1006 #else
1007 static inline int
_dp_srng_test_and_update_nf_params(struct dp_soc * soc,struct dp_srng * srng,int * max_reap_limit)1008 _dp_srng_test_and_update_nf_params(struct dp_soc *soc,
1009 				   struct dp_srng *srng,
1010 				   int *max_reap_limit)
1011 {
1012 	return 0;
1013 }
1014 #endif
1015 
1016 #ifdef QCA_SUPPORT_DP_GLOBAL_CTX
1017 static inline
dp_desc_pool_get_spcl_cmem_base(uint8_t desc_pool_id)1018 uint32_t dp_desc_pool_get_spcl_cmem_base(uint8_t desc_pool_id)
1019 {
1020 	return (DP_TX_SPCL_DESC_CMEM_OFFSET +
1021 		(desc_pool_id * DP_TX_SPCL_DESC_POOL_CMEM_SIZE));
1022 }
1023 #else
1024 static inline
dp_desc_pool_get_spcl_cmem_base(uint8_t desc_pool_id)1025 uint32_t dp_desc_pool_get_spcl_cmem_base(uint8_t desc_pool_id)
1026 {
1027 	QDF_BUG(0);
1028 	return 0;
1029 }
1030 #endif
1031 static inline
dp_desc_pool_get_cmem_base(uint8_t chip_id,uint8_t desc_pool_id,enum qdf_dp_desc_type desc_type)1032 uint32_t dp_desc_pool_get_cmem_base(uint8_t chip_id, uint8_t desc_pool_id,
1033 				    enum qdf_dp_desc_type desc_type)
1034 {
1035 	switch (desc_type) {
1036 	case QDF_DP_TX_DESC_TYPE:
1037 		return (DP_TX_DESC_CMEM_OFFSET +
1038 			(desc_pool_id * DP_TX_DESC_POOL_CMEM_SIZE));
1039 	case QDF_DP_TX_SPCL_DESC_TYPE:
1040 		return dp_desc_pool_get_spcl_cmem_base(desc_pool_id);
1041 	case QDF_DP_RX_DESC_BUF_TYPE:
1042 		return (DP_RX_DESC_CMEM_OFFSET +
1043 			((chip_id * MAX_RXDESC_POOLS) + desc_pool_id) *
1044 			DP_RX_DESC_POOL_CMEM_SIZE);
1045 	case QDF_DP_TX_PPEDS_DESC_TYPE:
1046 		return DP_TX_PPEDS_DESC_CMEM_OFFSET;
1047 	default:
1048 			QDF_BUG(0);
1049 	}
1050 	return 0;
1051 }
1052 
1053 #ifndef WLAN_MLO_MULTI_CHIP
1054 static inline
dp_soc_mlo_fill_params(struct dp_soc * soc,struct cdp_soc_attach_params * params)1055 void dp_soc_mlo_fill_params(struct dp_soc *soc,
1056 			    struct cdp_soc_attach_params *params)
1057 {
1058 }
1059 
1060 static inline
dp_pdev_mlo_fill_params(struct dp_pdev * pdev,struct cdp_pdev_attach_params * params)1061 void dp_pdev_mlo_fill_params(struct dp_pdev *pdev,
1062 			     struct cdp_pdev_attach_params *params)
1063 {
1064 }
1065 
1066 static inline
dp_mlo_update_link_to_pdev_map(struct dp_soc * soc,struct dp_pdev * pdev)1067 void dp_mlo_update_link_to_pdev_map(struct dp_soc *soc, struct dp_pdev *pdev)
1068 {
1069 }
1070 
1071 static inline
dp_mlo_update_link_to_pdev_unmap(struct dp_soc * soc,struct dp_pdev * pdev)1072 void dp_mlo_update_link_to_pdev_unmap(struct dp_soc *soc, struct dp_pdev *pdev)
1073 {
1074 }
1075 
dp_mlo_get_chip_id(struct dp_soc * soc)1076 static inline uint8_t dp_mlo_get_chip_id(struct dp_soc *soc)
1077 {
1078 	return 0;
1079 }
1080 #endif
1081 
1082 /**
1083  * dp_mlo_dev_ctxt_list_attach_wrapper() - Wrapper API for MLO dev list Init
1084  *
1085  * @mlo_dev_obj: MLO device object
1086  *
1087  * Return: void
1088  */
1089 void dp_mlo_dev_ctxt_list_attach_wrapper(dp_mlo_dev_obj_t mlo_dev_obj);
1090 
1091 /**
1092  * dp_mlo_dev_ctxt_list_detach_wrapper() - Wrapper API for MLO dev list de-Init
1093  *
1094  * @mlo_dev_obj: MLO device object
1095  *
1096  * Return: void
1097  */
1098 void dp_mlo_dev_ctxt_list_detach_wrapper(dp_mlo_dev_obj_t mlo_dev_obj);
1099 
1100 /**
1101  * dp_mlo_dev_ctxt_list_attach() - API to initialize MLO device List
1102  *
1103  * @mlo_dev_obj: MLO device object
1104  *
1105  * Return: void
1106  */
1107 void dp_mlo_dev_ctxt_list_attach(dp_mlo_dev_obj_t mlo_dev_obj);
1108 
1109 /**
1110  * dp_mlo_dev_ctxt_list_detach() - API to de-initialize MLO device List
1111  *
1112  * @mlo_dev_obj: MLO device object
1113  *
1114  * Return: void
1115  */
1116 void dp_mlo_dev_ctxt_list_detach(dp_mlo_dev_obj_t mlo_dev_obj);
1117 
1118 /**
1119  * dp_soc_initialize_cdp_cmn_mlo_ops() - API to initialize common CDP MLO ops
1120  *
1121  * @soc: Datapath soc handle
1122  *
1123  * Return: void
1124  */
1125 void dp_soc_initialize_cdp_cmn_mlo_ops(struct dp_soc *soc);
1126 
1127 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_DP_MLO_DEV_CTX)
1128 /**
1129  * dp_mlo_dev_ctxt_unref_delete() - Releasing the ref for MLO device ctxt
1130  *
1131  * @mlo_dev_ctxt: MLO device context handle
1132  * @mod_id: module id which is releasing the reference
1133  *
1134  * Return: void
1135  */
1136 void dp_mlo_dev_ctxt_unref_delete(struct dp_mlo_dev_ctxt *mlo_dev_ctxt,
1137 				  enum dp_mod_id mod_id);
1138 
1139 /**
1140  * dp_mlo_dev_get_ref() - Get the ref for MLO device ctxt
1141  *
1142  * @mlo_dev_ctxt: MLO device context handle
1143  * @mod_id: module id which is requesting the reference
1144  *
1145  * Return: SUCCESS on acquiring the ref.
1146  */
1147 QDF_STATUS
1148 dp_mlo_dev_get_ref(struct dp_mlo_dev_ctxt *mlo_dev_ctxt,
1149 		   enum dp_mod_id mod_id);
1150 
1151 /**
1152  * dp_get_mlo_dev_ctx_by_mld_mac_addr() - Get MLO device ctx based on MLD MAC
1153  *
1154  * @be_soc: be soc handle
1155  * @mldaddr: MLD MAC address
1156  * @mod_id: module id which is requesting the reference
1157  *
1158  * Return: MLO device context Handle on success, NULL on failure
1159  */
1160 struct dp_mlo_dev_ctxt *
1161 dp_get_mlo_dev_ctx_by_mld_mac_addr(struct dp_soc_be *be_soc,
1162 				   uint8_t *mldaddr, enum dp_mod_id mod_id);
1163 #endif /* WLAN_DP_MLO_DEV_CTX */
1164 #endif
1165