1 /*
2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3 * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for
6 * any purpose with or without fee is hereby granted, provided that the
7 * above copyright notice and this permission notice appear in all
8 * copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17 * PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 #ifndef _DP_TYPES_H_
21 #define _DP_TYPES_H_
22
23 #include <qdf_types.h>
24 #include <qdf_nbuf.h>
25 #include <qdf_lock.h>
26 #include <qdf_atomic.h>
27 #include <qdf_util.h>
28 #include <qdf_list.h>
29 #include <qdf_lro.h>
30 #include <queue.h>
31 #include <htt_common.h>
32 #include <htt.h>
33 #include <htt_stats.h>
34 #include <cdp_txrx_cmn.h>
35 #ifdef DP_MOB_DEFS
36 #include <cds_ieee80211_common.h>
37 #endif
38 #include <wdi_event_api.h> /* WDI subscriber event list */
39
40 #include "hal_hw_headers.h"
41 #include <hal_tx.h>
42 #include <hal_reo.h>
43 #include "wlan_cfg.h"
44 #include "hal_rx.h"
45 #include <hal_api.h>
46 #include <hal_api_mon.h>
47 #include "hal_rx.h"
48
49 #define dp_init_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_INIT, params)
50 #define dp_init_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_INIT, params)
51 #define dp_init_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_INIT, params)
52 #define dp_init_info(params...) \
53 __QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_INIT, ## params)
54 #define dp_init_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_INIT, params)
55
56 #define dp_vdev_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_VDEV, params)
57 #define dp_vdev_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_VDEV, params)
58 #define dp_vdev_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_VDEV, params)
59 #define dp_vdev_info(params...) \
60 __QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_VDEV, ## params)
61 #define dp_vdev_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_VDEV, params)
62
63 #define MAX_BW 8
64 #define MAX_RETRIES 4
65 #define MAX_RECEPTION_TYPES 4
66
67 #define MINIDUMP_STR_SIZE 25
68 #include <dp_umac_reset.h>
69
70 #define REPT_MU_MIMO 1
71 #define REPT_MU_OFDMA_MIMO 3
72 #define DP_VO_TID 6
73 /** MAX TID MAPS AVAILABLE PER PDEV */
74 #define DP_MAX_TID_MAPS 16
75 /** pad DSCP_TID_MAP_MAX with 6 to fix oob issue */
76 #define DSCP_TID_MAP_MAX (64 + 6)
77 #define DP_IP_DSCP_SHIFT 2
78 #define DP_IP_DSCP_MASK 0x3f
79 #define DP_FC0_SUBTYPE_QOS 0x80
80 #define DP_QOS_TID 0x0f
81 #define DP_IPV6_PRIORITY_SHIFT 20
82 #define MAX_MON_LINK_DESC_BANKS 2
83 #define DP_VDEV_ALL CDP_VDEV_ALL
84
85 #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
86 #define WLAN_DP_RESET_MON_BUF_RING_FILTER
87 #define MAX_TXDESC_POOLS 6
88 #else
89 #define MAX_TXDESC_POOLS 4
90 #endif
91
92 /* Max no of descriptors to handle special frames like EAPOL */
93 #define MAX_TX_SPL_DESC 1024
94
95 #define MAX_RXDESC_POOLS 4
96 #define MAX_PPE_TXDESC_POOLS 1
97
98 /* Max no. of VDEV per PSOC */
99 #ifdef WLAN_PSOC_MAX_VDEVS
100 #define MAX_VDEV_CNT WLAN_PSOC_MAX_VDEVS
101 #else
102 #define MAX_VDEV_CNT 51
103 #endif
104
105 /* Max no. of VDEVs, a PDEV can support */
106 #ifdef WLAN_PDEV_MAX_VDEVS
107 #define DP_PDEV_MAX_VDEVS WLAN_PDEV_MAX_VDEVS
108 #else
109 #define DP_PDEV_MAX_VDEVS 17
110 #endif
111
112 #define EXCEPTION_DEST_RING_ID 0
113 #define MAX_IDLE_SCATTER_BUFS 16
114 #define DP_MAX_IRQ_PER_CONTEXT 12
115 #define DEFAULT_HW_PEER_ID 0xffff
116
117 #define MAX_AST_AGEOUT_COUNT 128
118
119 #ifdef TX_ADDR_INDEX_SEARCH
120 #define DP_TX_ADDR_SEARCH_ADDR_POLICY HAL_TX_ADDR_INDEX_SEARCH
121 #else
122 #define DP_TX_ADDR_SEARCH_ADDR_POLICY HAL_TX_ADDR_SEARCH_DEFAULT
123 #endif
124
125 #define WBM_INT_ERROR_ALL 0
126 #define WBM_INT_ERROR_REO_NULL_BUFFER 1
127 #define WBM_INT_ERROR_REO_NULL_LINK_DESC 2
128 #define WBM_INT_ERROR_REO_NULL_MSDU_BUFF 3
129 #define WBM_INT_ERROR_REO_BUFF_REAPED 4
130 #define MAX_WBM_INT_ERROR_REASONS 5
131
132 #define MAX_TX_HW_QUEUES MAX_TCL_DATA_RINGS
133 /* Maximum retries for Delba per tid per peer */
134 #define DP_MAX_DELBA_RETRY 3
135
136 #ifdef AST_OFFLOAD_ENABLE
137 #define AST_OFFLOAD_ENABLE_STATUS 1
138 #else
139 #define AST_OFFLOAD_ENABLE_STATUS 0
140 #endif
141
142 #ifdef FEATURE_MEC_OFFLOAD
143 #define FW_MEC_FW_OFFLOAD_ENABLED 1
144 #else
145 #define FW_MEC_FW_OFFLOAD_ENABLED 0
146 #endif
147
148 #define PCP_TID_MAP_MAX 8
149 #define MAX_MU_USERS 37
150
151 #define REO_CMD_EVENT_HIST_MAX 64
152
153 #define DP_MAX_SRNGS 64
154
155 /* 2G PHYB */
156 #define PHYB_2G_LMAC_ID 2
157 #define PHYB_2G_TARGET_PDEV_ID 2
158
159 /* Flags for skippig s/w tid classification */
160 #define DP_TX_HW_DSCP_TID_MAP_VALID 0x1
161 #define DP_TXRX_HLOS_TID_OVERRIDE_ENABLED 0x2
162 #define DP_TX_MESH_ENABLED 0x4
163 #define DP_TX_INVALID_QOS_TAG 0xf
164
165 #ifdef WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL
166 #define DP_RX_REFILL_BUFF_POOL_BURST 64
167 #endif
168
169 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
170 #define DP_RX_FSE_FLOW_MATCH_SFE 0xAAAA
171 #endif
172
173 #ifdef WLAN_VENDOR_SPECIFIC_BAR_UPDATE
174 #define DP_SKIP_BAR_UPDATE_TIMEOUT 5000
175 #endif
176
177 #define DP_TX_MAGIC_PATTERN_INUSE 0xABCD1234
178 #define DP_TX_MAGIC_PATTERN_FREE 0xDEADBEEF
179
180 #define DP_INTR_POLL_TIMER_MS 5
181
182 #ifdef IPA_OFFLOAD
183 #define DP_PEER_REO_STATS_TID_SHIFT 16
184 #define DP_PEER_REO_STATS_TID_MASK 0xFFFF0000
185 #define DP_PEER_REO_STATS_PEER_ID_MASK 0x0000FFFF
186 #define DP_PEER_GET_REO_STATS_TID(comb_peer_id_tid) \
187 ((comb_peer_id_tid & DP_PEER_REO_STATS_TID_MASK) >> \
188 DP_PEER_REO_STATS_TID_SHIFT)
189 #define DP_PEER_GET_REO_STATS_PEER_ID(comb_peer_id_tid) \
190 (comb_peer_id_tid & DP_PEER_REO_STATS_PEER_ID_MASK)
191 #endif
192
193 typedef void dp_ptnr_soc_iter_func(struct dp_soc *ptnr_soc, void *arg,
194 int chip_id);
195
196 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
197 #define DP_MLD_MODE_UNIFIED_NONBOND 0
198 #define DP_MLD_MODE_UNIFIED_BOND 1
199 #define DP_MLD_MODE_HYBRID_NONBOND 2
200 #define DP_MLD_MODE_MAX DP_MLD_MODE_HYBRID_NONBOND
201
202 #define DP_LINK_VDEV_ITER 1
203 #define DP_BRIDGE_VDEV_ITER 2
204 #define DP_ALL_VDEV_ITER 3
205 #define IS_LINK_VDEV_ITER_REQUIRED(type) (type & DP_LINK_VDEV_ITER)
206 #define IS_BRIDGE_VDEV_ITER_REQUIRED(type) (type & DP_BRIDGE_VDEV_ITER)
207 #define DP_VDEV_ITERATE_ALL 1
208 #define DP_VDEV_ITERATE_SKIP_SELF 0
209 #endif
210
211 /**
212 * enum dp_pkt_xmit_type - The type of ingress stats are being referred
213 *
214 * @DP_XMIT_LINK: Packet ingress-ed on Link
215 * @DP_XMIT_MLD: Packet ingress-ed on MLD
216 * @DP_XMIT_TOTAL: Packets ingress-ed on MLD and LINK
217 */
218 enum dp_pkt_xmit_type {
219 DP_XMIT_LINK,
220 DP_XMIT_MLD,
221 DP_XMIT_TOTAL,
222 };
223
224 enum rx_pktlog_mode {
225 DP_RX_PKTLOG_DISABLED = 0,
226 DP_RX_PKTLOG_FULL,
227 DP_RX_PKTLOG_LITE,
228 };
229
230 /* enum m_copy_mode - Available mcopy mode
231 *
232 */
233 enum m_copy_mode {
234 M_COPY_DISABLED = 0,
235 M_COPY = 2,
236 M_COPY_EXTENDED = 4,
237 };
238
239 struct msdu_list {
240 qdf_nbuf_t head;
241 qdf_nbuf_t tail;
242 uint32_t sum_len;
243 };
244
245 struct dp_soc_cmn;
246 struct dp_pdev;
247 struct dp_vdev;
248 struct dp_tx_desc_s;
249 struct dp_soc;
250 union dp_rx_desc_list_elem_t;
251 struct cdp_peer_rate_stats_ctx;
252 struct cdp_soc_rate_stats_ctx;
253 struct dp_rx_fst;
254 struct dp_mon_filter;
255 struct dp_mon_mpdu;
256 #ifdef BE_PKTLOG_SUPPORT
257 struct dp_mon_filter_be;
258 #endif
259 struct dp_peer;
260 struct dp_txrx_peer;
261
262 /**
263 * enum dp_peer_state - DP peer states
264 * @DP_PEER_STATE_NONE:
265 * @DP_PEER_STATE_INIT:
266 * @DP_PEER_STATE_ACTIVE:
267 * @DP_PEER_STATE_LOGICAL_DELETE:
268 * @DP_PEER_STATE_INACTIVE:
269 * @DP_PEER_STATE_FREED:
270 * @DP_PEER_STATE_INVALID:
271 */
272 enum dp_peer_state {
273 DP_PEER_STATE_NONE,
274 DP_PEER_STATE_INIT,
275 DP_PEER_STATE_ACTIVE,
276 DP_PEER_STATE_LOGICAL_DELETE,
277 DP_PEER_STATE_INACTIVE,
278 DP_PEER_STATE_FREED,
279 DP_PEER_STATE_INVALID,
280 };
281
282 /**
283 * enum dp_mod_id - DP module IDs
284 * @DP_MOD_ID_TX_RX:
285 * @DP_MOD_ID_TX_COMP:
286 * @DP_MOD_ID_RX:
287 * @DP_MOD_ID_HTT_COMP:
288 * @DP_MOD_ID_RX_ERR:
289 * @DP_MOD_ID_TX_PPDU_STATS:
290 * @DP_MOD_ID_RX_PPDU_STATS:
291 * @DP_MOD_ID_CDP:
292 * @DP_MOD_ID_GENERIC_STATS:
293 * @DP_MOD_ID_TX_MULTIPASS:
294 * @DP_MOD_ID_TX_CAPTURE:
295 * @DP_MOD_ID_NSS_OFFLOAD:
296 * @DP_MOD_ID_CONFIG:
297 * @DP_MOD_ID_HTT:
298 * @DP_MOD_ID_IPA:
299 * @DP_MOD_ID_AST:
300 * @DP_MOD_ID_MCAST2UCAST:
301 * @DP_MOD_ID_CHILD:
302 * @DP_MOD_ID_MESH:
303 * @DP_MOD_ID_TX_EXCEPTION:
304 * @DP_MOD_ID_TDLS:
305 * @DP_MOD_ID_MISC:
306 * @DP_MOD_ID_MSCS:
307 * @DP_MOD_ID_TX:
308 * @DP_MOD_ID_SAWF:
309 * @DP_MOD_ID_REINJECT:
310 * @DP_MOD_ID_SCS:
311 * @DP_MOD_ID_UMAC_RESET:
312 * @DP_MOD_ID_TX_MCAST:
313 * @DP_MOD_ID_DS:
314 * @DP_MOD_ID_MLO_DEV:
315 * @DP_MOD_ID_MAX:
316 */
317 enum dp_mod_id {
318 DP_MOD_ID_TX_RX,
319 DP_MOD_ID_TX_COMP,
320 DP_MOD_ID_RX,
321 DP_MOD_ID_HTT_COMP,
322 DP_MOD_ID_RX_ERR,
323 DP_MOD_ID_TX_PPDU_STATS,
324 DP_MOD_ID_RX_PPDU_STATS,
325 DP_MOD_ID_CDP,
326 DP_MOD_ID_GENERIC_STATS,
327 DP_MOD_ID_TX_MULTIPASS,
328 DP_MOD_ID_TX_CAPTURE,
329 DP_MOD_ID_NSS_OFFLOAD,
330 DP_MOD_ID_CONFIG,
331 DP_MOD_ID_HTT,
332 DP_MOD_ID_IPA,
333 DP_MOD_ID_AST,
334 DP_MOD_ID_MCAST2UCAST,
335 DP_MOD_ID_CHILD,
336 DP_MOD_ID_MESH,
337 DP_MOD_ID_TX_EXCEPTION,
338 DP_MOD_ID_TDLS,
339 DP_MOD_ID_MISC,
340 DP_MOD_ID_MSCS,
341 DP_MOD_ID_TX,
342 DP_MOD_ID_SAWF,
343 DP_MOD_ID_REINJECT,
344 DP_MOD_ID_SCS,
345 DP_MOD_ID_UMAC_RESET,
346 DP_MOD_ID_TX_MCAST,
347 DP_MOD_ID_DS,
348 DP_MOD_ID_MLO_DEV,
349 DP_MOD_ID_MAX,
350 };
351
352 /**
353 * enum dp_peer_type - DP peer type
354 * @DP_PEER_TYPE_LEGACY:
355 * @DP_PEER_TYPE_MLO_LINK:
356 * @DP_PEER_TYPE_MLO:
357 */
358 enum dp_peer_type {
359 DP_PEER_TYPE_LEGACY,
360 DP_PEER_TYPE_MLO_LINK,
361 DP_PEER_TYPE_MLO,
362 };
363
364 #define DP_PDEV_ITERATE_VDEV_LIST(_pdev, _vdev) \
365 TAILQ_FOREACH((_vdev), &(_pdev)->vdev_list, vdev_list_elem)
366
367 #define DP_VDEV_ITERATE_PEER_LIST(_vdev, _peer) \
368 TAILQ_FOREACH((_peer), &(_vdev)->peer_list, peer_list_elem)
369
370 #define DP_PEER_ITERATE_ASE_LIST(_peer, _ase, _temp_ase) \
371 TAILQ_FOREACH_SAFE((_ase), &_peer->ast_entry_list, ase_list_elem, (_temp_ase))
372
373 #define DP_MUTEX_TYPE qdf_spinlock_t
374
375 #define DP_FRAME_IS_MULTICAST(_a) (*(_a) & 0x01)
376 #define DP_FRAME_IS_IPV4_MULTICAST(_a) (*(_a) == 0x01)
377
378 #define DP_FRAME_IS_IPV6_MULTICAST(_a) \
379 ((_a)[0] == 0x33 && \
380 (_a)[1] == 0x33)
381
382 #define DP_FRAME_IS_BROADCAST(_a) \
383 ((_a)[0] == 0xff && \
384 (_a)[1] == 0xff && \
385 (_a)[2] == 0xff && \
386 (_a)[3] == 0xff && \
387 (_a)[4] == 0xff && \
388 (_a)[5] == 0xff)
389 #define DP_FRAME_IS_SNAP(_llc) ((_llc)->llc_dsap == 0xaa && \
390 (_llc)->llc_ssap == 0xaa && \
391 (_llc)->llc_un.type_snap.control == 0x3)
392 #define DP_FRAME_IS_LLC(typeorlen) ((typeorlen) >= 0x600)
393 #define DP_FRAME_FC0_TYPE_MASK 0x0c
394 #define DP_FRAME_FC0_TYPE_DATA 0x08
395 #define DP_FRAME_IS_DATA(_frame) \
396 (((_frame)->i_fc[0] & DP_FRAME_FC0_TYPE_MASK) == DP_FRAME_FC0_TYPE_DATA)
397
398 /*
399 * macros to convert hw mac id to sw mac id:
400 * mac ids used by hardware start from a value of 1 while
401 * those in host software start from a value of 0. Use the
402 * macros below to convert between mac ids used by software and
403 * hardware
404 */
405 #define DP_SW2HW_MACID(id) ((id) + 1)
406 #define DP_HW2SW_MACID(id) ((id) > 0 ? ((id) - 1) : 0)
407
408 /*
409 * Number of Tx Queues
410 * enum and macro to define how many threshold levels is used
411 * for the AC based flow control
412 */
413 #ifdef QCA_AC_BASED_FLOW_CONTROL
414 enum dp_fl_ctrl_threshold {
415 DP_TH_BE_BK = 0,
416 DP_TH_VI,
417 DP_TH_VO,
418 DP_TH_HI,
419 };
420
421 #define FL_TH_MAX (4)
422 #define FL_TH_VI_PERCENTAGE (80)
423 #define FL_TH_VO_PERCENTAGE (60)
424 #define FL_TH_HI_PERCENTAGE (40)
425 #endif
426
427 /**
428 * enum dp_intr_mode
429 * @DP_INTR_INTEGRATED: Line interrupts
430 * @DP_INTR_MSI: MSI interrupts
431 * @DP_INTR_POLL: Polling
432 * @DP_INTR_LEGACY_VIRTUAL_IRQ:
433 */
434 enum dp_intr_mode {
435 DP_INTR_INTEGRATED = 0,
436 DP_INTR_MSI,
437 DP_INTR_POLL,
438 DP_INTR_LEGACY_VIRTUAL_IRQ,
439 };
440
441 /**
442 * enum dp_tx_frm_type
443 * @dp_tx_frm_std: Regular frame, no added header fragments
444 * @dp_tx_frm_tso: TSO segment, with a modified IP header added
445 * @dp_tx_frm_sg: SG segment
446 * @dp_tx_frm_audio: Audio frames, a custom LLC/SNAP header added
447 * @dp_tx_frm_me: Multicast to Unicast Converted frame
448 * @dp_tx_frm_raw: Raw Frame
449 * @dp_tx_frm_rmnet:
450 */
451 enum dp_tx_frm_type {
452 dp_tx_frm_std = 0,
453 dp_tx_frm_tso,
454 dp_tx_frm_sg,
455 dp_tx_frm_audio,
456 dp_tx_frm_me,
457 dp_tx_frm_raw,
458 dp_tx_frm_rmnet,
459 };
460
461 /**
462 * enum dp_ast_type
463 * @dp_ast_type_wds: WDS peer AST type
464 * @dp_ast_type_static: static ast entry type
465 * @dp_ast_type_mec: Multicast echo ast entry type
466 */
467 enum dp_ast_type {
468 dp_ast_type_wds = 0,
469 dp_ast_type_static,
470 dp_ast_type_mec,
471 };
472
473 /**
474 * enum dp_nss_cfg
475 * @dp_nss_cfg_default: No radios are offloaded
476 * @dp_nss_cfg_first_radio: First radio offloaded
477 * @dp_nss_cfg_second_radio: Second radio offloaded
478 * @dp_nss_cfg_dbdc: Dual radios offloaded
479 * @dp_nss_cfg_dbtc: Three radios offloaded
480 * @dp_nss_cfg_max: max value
481 */
482 enum dp_nss_cfg {
483 dp_nss_cfg_default = 0x0,
484 dp_nss_cfg_first_radio = 0x1,
485 dp_nss_cfg_second_radio = 0x2,
486 dp_nss_cfg_dbdc = 0x3,
487 dp_nss_cfg_dbtc = 0x7,
488 dp_nss_cfg_max
489 };
490
491 #ifdef WLAN_TX_PKT_CAPTURE_ENH
492 #define DP_CPU_RING_MAP_1 1
493 #endif
494
495 /**
496 * enum dp_cpu_ring_map_types - dp tx cpu ring map
497 * @DP_NSS_DEFAULT_MAP: Default mode with no NSS offloaded
498 * @DP_NSS_FIRST_RADIO_OFFLOADED_MAP: Only First Radio is offloaded
499 * @DP_NSS_SECOND_RADIO_OFFLOADED_MAP: Only second radio is offloaded
500 * @DP_NSS_DBDC_OFFLOADED_MAP: Both radios are offloaded
501 * @DP_NSS_DBTC_OFFLOADED_MAP: All three radios are offloaded
502 * @DP_SINGLE_TX_RING_MAP: to avoid out of order all cpu mapped to single ring
503 * @DP_NSS_CPU_RING_MAP_MAX: Max cpu ring map val
504 */
505 enum dp_cpu_ring_map_types {
506 DP_NSS_DEFAULT_MAP,
507 DP_NSS_FIRST_RADIO_OFFLOADED_MAP,
508 DP_NSS_SECOND_RADIO_OFFLOADED_MAP,
509 DP_NSS_DBDC_OFFLOADED_MAP,
510 DP_NSS_DBTC_OFFLOADED_MAP,
511 #ifdef WLAN_TX_PKT_CAPTURE_ENH
512 DP_SINGLE_TX_RING_MAP,
513 #endif
514 DP_NSS_CPU_RING_MAP_MAX
515 };
516
517 /**
518 * struct dp_rx_nbuf_frag_info - Hold vaddr and paddr for a buffer
519 *
520 * @paddr: Physical address of buffer allocated.
521 * @virt_addr: union of virtual address representations
522 * @nbuf: Allocated nbuf in case of nbuf approach.
523 * @vaddr: Virtual address of frag allocated in case of frag approach.
524 */
525 struct dp_rx_nbuf_frag_info {
526 qdf_dma_addr_t paddr;
527 union {
528 qdf_nbuf_t nbuf;
529 qdf_frag_t vaddr;
530 } virt_addr;
531 };
532
533 /**
534 * enum dp_ctxt_type - context type
535 * @DP_PDEV_TYPE: PDEV context
536 * @DP_RX_RING_HIST_TYPE: Datapath rx ring history
537 * @DP_RX_ERR_RING_HIST_TYPE: Datapath rx error ring history
538 * @DP_RX_REINJECT_RING_HIST_TYPE: Datapath reinject ring history
539 * @DP_TX_TCL_HIST_TYPE:
540 * @DP_TX_COMP_HIST_TYPE:
541 * @DP_FISA_RX_FT_TYPE:
542 * @DP_RX_REFILL_RING_HIST_TYPE: Datapath rx refill ring history
543 * @DP_TX_HW_DESC_HIST_TYPE: Datapath TX HW descriptor history
544 * @DP_MON_SOC_TYPE: Datapath monitor soc context
545 * @DP_MON_PDEV_TYPE: Datapath monitor pdev context
546 * @DP_MON_STATUS_BUF_HIST_TYPE: DP monitor status buffer history
547 * @DP_CFG_EVENT_HIST_TYPE: DP config events history
548 * @DP_MON_TX_DESC_POOL_TYPE: DP TX desc pool buffer
549 * @DP_MON_RX_DESC_POOL_TYPE: DP RX desc pool buffer
550 */
551 enum dp_ctxt_type {
552 DP_PDEV_TYPE,
553 DP_RX_RING_HIST_TYPE,
554 DP_RX_ERR_RING_HIST_TYPE,
555 DP_RX_REINJECT_RING_HIST_TYPE,
556 DP_TX_TCL_HIST_TYPE,
557 DP_TX_COMP_HIST_TYPE,
558 DP_FISA_RX_FT_TYPE,
559 DP_RX_REFILL_RING_HIST_TYPE,
560 DP_TX_HW_DESC_HIST_TYPE,
561 DP_MON_SOC_TYPE,
562 DP_MON_PDEV_TYPE,
563 DP_MON_STATUS_BUF_HIST_TYPE,
564 DP_CFG_EVENT_HIST_TYPE,
565 DP_MON_TX_DESC_POOL_TYPE,
566 DP_MON_RX_DESC_POOL_TYPE,
567 };
568
569 /**
570 * struct rx_desc_pool
571 * @pool_size: number of RX descriptor in the pool
572 * @elem_size: Element size
573 * @desc_pages: Multi page descriptors
574 * @array: pointer to array of RX descriptor
575 * @freelist: pointer to free RX descriptor link list
576 * @lock: Protection for the RX descriptor pool
577 * @owner: owner for nbuf
578 * @buf_size: Buffer size
579 * @buf_alignment: Buffer alignment
580 * @rx_mon_dest_frag_enable: Enable frag processing for mon dest buffer
581 * @pf_cache: page frag cache
582 * @desc_type: type of desc this pool serves
583 */
584 struct rx_desc_pool {
585 uint32_t pool_size;
586 #ifdef RX_DESC_MULTI_PAGE_ALLOC
587 uint16_t elem_size;
588 struct qdf_mem_multi_page_t desc_pages;
589 #else
590 union dp_rx_desc_list_elem_t *array;
591 #endif
592 union dp_rx_desc_list_elem_t *freelist;
593 qdf_spinlock_t lock;
594 uint8_t owner;
595 uint16_t buf_size;
596 uint8_t buf_alignment;
597 bool rx_mon_dest_frag_enable;
598 qdf_frag_cache_t pf_cache;
599 enum qdf_dp_desc_type desc_type;
600 };
601
602 /**
603 * struct dp_tx_ext_desc_elem_s
604 * @next: next extension descriptor pointer
605 * @vaddr: hlos virtual address pointer
606 * @paddr: physical address pointer for descriptor
607 * @flags: mark features for extension descriptor
608 * @me_buffer: Pointer to ME buffer - store this so that it can be freed on
609 * Tx completion of ME packet
610 * @tso_desc: Pointer to Tso desc
611 * @tso_num_desc: Pointer to tso_num_desc
612 */
613 struct dp_tx_ext_desc_elem_s {
614 struct dp_tx_ext_desc_elem_s *next;
615 void *vaddr;
616 qdf_dma_addr_t paddr;
617 uint16_t flags;
618 struct dp_tx_me_buf_t *me_buffer;
619 struct qdf_tso_seg_elem_t *tso_desc;
620 struct qdf_tso_num_seg_elem_t *tso_num_desc;
621 };
622
623 /*
624 * NB: intentionally not using kernel-doc comment because the kernel-doc
625 * script does not handle the qdf_dma_mem_context macro
626 * struct dp_tx_ext_desc_pool_s - Tx Extension Descriptor Pool
627 * @elem_count: Number of descriptors in the pool
628 * @elem_size: Size of each descriptor
629 * @num_free: Number of free descriptors
630 * @desc_pages: multiple page allocation information for actual descriptors
631 * @link_elem_size: size of the link descriptor in cacheable memory used for
632 * chaining the extension descriptors
633 * @desc_link_pages: multiple page allocation information for link descriptors
634 * @freelist:
635 * @lock:
636 * @memctx:
637 */
638 struct dp_tx_ext_desc_pool_s {
639 uint16_t elem_count;
640 int elem_size;
641 uint16_t num_free;
642 struct qdf_mem_multi_page_t desc_pages;
643 int link_elem_size;
644 struct qdf_mem_multi_page_t desc_link_pages;
645 struct dp_tx_ext_desc_elem_s *freelist;
646 qdf_spinlock_t lock;
647 qdf_dma_mem_context(memctx);
648 };
649
650 /**
651 * struct dp_tx_desc_s - Tx Descriptor
652 * @next: Next in the chain of descriptors in freelist or in the completion list
653 * @nbuf: Buffer Address
654 * @length:
655 * @magic:
656 * @timestamp_tick:
657 * @flags: Flags to track the state of descriptor and special frame handling
658 * @id: Descriptor ID
659 * @dma_addr:
660 * @vdev_id: vdev_id of vdev over which the packet was transmitted
661 * @tx_status:
662 * @peer_id:
663 * @pdev: Handle to pdev
664 * @tx_encap_type: Transmit encap type (i.e. Raw, Native Wi-Fi, Ethernet).
665 * This is maintained in descriptor to allow more efficient
666 * processing in completion event processing code.
667 * This field is filled in with the htt_pkt_type enum.
668 * @buffer_src: buffer source TQM, REO, FW etc.
669 * @reserved:
670 * @frm_type: Frame Type - ToDo check if this is redundant
671 * @pkt_offset: Offset from which the actual packet data starts
672 * @pool_id: Pool ID - used when releasing the descriptor
673 * @msdu_ext_desc: MSDU extension descriptor
674 * @timestamp:
675 * @driver_egress_ts: driver egress timestamp
676 * @driver_ingress_ts: driver ingress timestamp
677 * @comp:
678 * @tcl_cmd_vaddr: VADDR of the TCL descriptor, valid for soft-umac arch
679 * @tcl_cmd_paddr: PADDR of the TCL descriptor, valid for soft-umac arch
680 */
681 struct dp_tx_desc_s {
682 struct dp_tx_desc_s *next;
683 qdf_nbuf_t nbuf;
684 uint16_t length;
685 #ifdef DP_TX_TRACKING
686 uint32_t magic;
687 uint64_t timestamp_tick;
688 #endif
689 uint32_t flags;
690 uint32_t id;
691 qdf_dma_addr_t dma_addr;
692 uint8_t vdev_id;
693 uint8_t tx_status;
694 uint16_t peer_id;
695 struct dp_pdev *pdev;
696 uint8_t tx_encap_type:2,
697 buffer_src:3,
698 reserved:3;
699 uint8_t frm_type;
700 uint8_t pkt_offset;
701 uint8_t pool_id;
702 struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
703 qdf_ktime_t timestamp;
704 #ifdef WLAN_FEATURE_TX_LATENCY_STATS
705 qdf_ktime_t driver_egress_ts;
706 qdf_ktime_t driver_ingress_ts;
707 #endif
708 struct hal_tx_desc_comp_s comp;
709 #ifdef WLAN_SOFTUMAC_SUPPORT
710 void *tcl_cmd_vaddr;
711 qdf_dma_addr_t tcl_cmd_paddr;
712 #endif
713 };
714
715 #ifdef QCA_AC_BASED_FLOW_CONTROL
716 /**
717 * enum flow_pool_status - flow pool status
718 * @FLOW_POOL_ACTIVE_UNPAUSED : pool is active (can take/put descriptors)
719 * and network queues are unpaused
720 * @FLOW_POOL_ACTIVE_PAUSED: pool is active (can take/put descriptors)
721 * and network queues are paused
722 * @FLOW_POOL_BE_BK_PAUSED:
723 * @FLOW_POOL_VI_PAUSED:
724 * @FLOW_POOL_VO_PAUSED:
725 * @FLOW_POOL_INVALID: pool is invalid (put descriptor)
726 * @FLOW_POOL_INACTIVE: pool is inactive (pool is free)
727 * @FLOW_POOL_ACTIVE_UNPAUSED_REATTACH: pool is reattached but network
728 * queues are not paused
729 */
730 enum flow_pool_status {
731 FLOW_POOL_ACTIVE_UNPAUSED = 0,
732 FLOW_POOL_ACTIVE_PAUSED = 1,
733 FLOW_POOL_BE_BK_PAUSED = 2,
734 FLOW_POOL_VI_PAUSED = 3,
735 FLOW_POOL_VO_PAUSED = 4,
736 FLOW_POOL_INVALID = 5,
737 FLOW_POOL_INACTIVE = 6,
738 FLOW_POOL_ACTIVE_UNPAUSED_REATTACH = 7,
739 };
740
741 #else
742 /**
743 * enum flow_pool_status - flow pool status
744 * @FLOW_POOL_ACTIVE_UNPAUSED : pool is active (can take/put descriptors)
745 * and network queues are unpaused
746 * @FLOW_POOL_ACTIVE_PAUSED: pool is active (can take/put descriptors)
747 * and network queues are paused
748 * @FLOW_POOL_BE_BK_PAUSED:
749 * @FLOW_POOL_VI_PAUSED:
750 * @FLOW_POOL_VO_PAUSED:
751 * @FLOW_POOL_INVALID: pool is invalid (put descriptor)
752 * @FLOW_POOL_INACTIVE: pool is inactive (pool is free)
753 */
754 enum flow_pool_status {
755 FLOW_POOL_ACTIVE_UNPAUSED = 0,
756 FLOW_POOL_ACTIVE_PAUSED = 1,
757 FLOW_POOL_BE_BK_PAUSED = 2,
758 FLOW_POOL_VI_PAUSED = 3,
759 FLOW_POOL_VO_PAUSED = 4,
760 FLOW_POOL_INVALID = 5,
761 FLOW_POOL_INACTIVE = 6,
762 };
763
764 #endif
765
766 /**
767 * struct dp_tx_tso_seg_pool_s
768 * @pool_size: total number of pool elements
769 * @num_free: free element count
770 * @freelist: first free element pointer
771 * @desc_pages: multiple page allocation information for actual descriptors
772 * @lock: lock for accessing the pool
773 */
774 struct dp_tx_tso_seg_pool_s {
775 uint16_t pool_size;
776 uint16_t num_free;
777 struct qdf_tso_seg_elem_t *freelist;
778 struct qdf_mem_multi_page_t desc_pages;
779 qdf_spinlock_t lock;
780 };
781
782 /**
783 * struct dp_tx_tso_num_seg_pool_s - TSO Num seg pool
784 * @num_seg_pool_size: total number of pool elements
785 * @num_free: free element count
786 * @freelist: first free element pointer
787 * @desc_pages: multiple page allocation information for actual descriptors
788 * @lock: lock for accessing the pool
789 */
790
791 struct dp_tx_tso_num_seg_pool_s {
792 uint16_t num_seg_pool_size;
793 uint16_t num_free;
794 struct qdf_tso_num_seg_elem_t *freelist;
795 struct qdf_mem_multi_page_t desc_pages;
796 /*tso mutex */
797 qdf_spinlock_t lock;
798 };
799
800 /**
801 * struct dp_tx_desc_pool_s - Tx Descriptor pool information
802 * @elem_size: Size of each descriptor in the pool
803 * @num_allocated: Number of used descriptors
804 * @freelist: Chain of free descriptors
805 * @desc_pages: multiple page allocation information for actual descriptors
806 * @pool_size: Total number of descriptors in the pool
807 * @flow_pool_id:
808 * @num_invalid_bin: Deleted pool with pending Tx completions.
809 * @avail_desc:
810 * @status:
811 * @flow_type:
812 * @stop_th:
813 * @start_th:
814 * @max_pause_time:
815 * @latest_pause_time:
816 * @pkt_drop_no_desc:
817 * @flow_pool_lock:
818 * @pool_create_cnt:
819 * @pool_owner_ctx:
820 * @elem_count:
821 * @num_free: Number of free descriptors
822 * @lock: Lock for descriptor allocation/free from/to the pool
823 */
824 struct dp_tx_desc_pool_s {
825 uint16_t elem_size;
826 uint32_t num_allocated;
827 struct dp_tx_desc_s *freelist;
828 struct qdf_mem_multi_page_t desc_pages;
829 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
830 uint16_t pool_size;
831 uint8_t flow_pool_id;
832 uint8_t num_invalid_bin;
833 uint16_t avail_desc;
834 enum flow_pool_status status;
835 enum htt_flow_type flow_type;
836 #ifdef QCA_AC_BASED_FLOW_CONTROL
837 uint16_t stop_th[FL_TH_MAX];
838 uint16_t start_th[FL_TH_MAX];
839 qdf_time_t max_pause_time[FL_TH_MAX];
840 qdf_time_t latest_pause_time[FL_TH_MAX];
841 #else
842 uint16_t stop_th;
843 uint16_t start_th;
844 #endif
845 uint16_t pkt_drop_no_desc;
846 qdf_spinlock_t flow_pool_lock;
847 uint8_t pool_create_cnt;
848 void *pool_owner_ctx;
849 #else
850 uint16_t elem_count;
851 uint32_t num_free;
852 qdf_spinlock_t lock;
853 #endif
854 };
855
856 /**
857 * struct dp_txrx_pool_stats - flow pool related statistics
858 * @pool_map_count: flow pool map received
859 * @pool_unmap_count: flow pool unmap received
860 * @pkt_drop_no_pool: packets dropped due to unavailablity of pool
861 */
862 struct dp_txrx_pool_stats {
863 uint16_t pool_map_count;
864 uint16_t pool_unmap_count;
865 uint16_t pkt_drop_no_pool;
866 };
867
868 /**
869 * struct dp_srng - DP srng structure
870 * @hal_srng: hal_srng handle
871 * @base_vaddr_unaligned: un-aligned virtual base address of the srng ring
872 * @base_vaddr_aligned: aligned virtual base address of the srng ring
873 * @base_paddr_unaligned: un-aligned physical base address of the srng ring
874 * @base_paddr_aligned: aligned physical base address of the srng ring
875 * @alloc_size: size of the srng ring
876 * @cached: is the srng ring memory cached or un-cached memory
877 * @irq: irq number of the srng ring
878 * @num_entries: number of entries in the srng ring
879 * @stats: Structure to track the ring utilization stats
880 * @is_mem_prealloc: Is this srng memory pre-allocated
881 * @crit_thresh: Critical threshold for near-full processing of this srng
882 * @safe_thresh: Safe threshold for near-full processing of this srng
883 * @near_full: Flag to indicate srng is near-full
884 */
885 struct dp_srng {
886 hal_ring_handle_t hal_srng;
887 void *base_vaddr_unaligned;
888 void *base_vaddr_aligned;
889 qdf_dma_addr_t base_paddr_unaligned;
890 qdf_dma_addr_t base_paddr_aligned;
891 uint32_t alloc_size;
892 uint8_t cached;
893 int irq;
894 uint32_t num_entries;
895 struct ring_util_stats stats;
896 #ifdef DP_MEM_PRE_ALLOC
897 uint8_t is_mem_prealloc;
898 #endif
899 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ
900 uint16_t crit_thresh;
901 uint16_t safe_thresh;
902 qdf_atomic_t near_full;
903 #endif
904 };
905
906 struct dp_rx_reorder_array_elem {
907 qdf_nbuf_t head;
908 qdf_nbuf_t tail;
909 };
910
911 #define DP_RX_BA_INACTIVE 0
912 #define DP_RX_BA_ACTIVE 1
913 #define DP_RX_BA_IN_PROGRESS 2
914 struct dp_reo_cmd_info {
915 uint16_t cmd;
916 enum hal_reo_cmd_type cmd_type;
917 void *data;
918 void (*handler)(struct dp_soc *, void *, union hal_reo_status *);
919 TAILQ_ENTRY(dp_reo_cmd_info) reo_cmd_list_elem;
920 };
921
922 struct dp_peer_delay_stats {
923 struct cdp_delay_tid_stats delay_tid_stats[CDP_MAX_DATA_TIDS]
924 [CDP_MAX_TXRX_CTX];
925 };
926
927 /* Rx TID defrag*/
928 struct dp_rx_tid_defrag {
929 /* TID */
930 int tid;
931
932 /* only used for defrag right now */
933 TAILQ_ENTRY(dp_rx_tid_defrag) defrag_waitlist_elem;
934
935 /* Store dst desc for reinjection */
936 hal_ring_desc_t dst_ring_desc;
937 struct dp_rx_desc *head_frag_desc;
938
939 /* Sequence and fragments that are being processed currently */
940 uint32_t curr_seq_num;
941 uint32_t curr_frag_num;
942
943 /* TODO: Check the following while adding defragmentation support */
944 struct dp_rx_reorder_array_elem *array;
945 /* base - single rx reorder element used for non-aggr cases */
946 struct dp_rx_reorder_array_elem base;
947 /* rx_tid lock */
948 qdf_spinlock_t defrag_tid_lock;
949
950 /* head PN number */
951 uint64_t pn128[2];
952
953 uint32_t defrag_timeout_ms;
954
955 /* defrag usage only, dp_peer pointer related with this tid */
956 struct dp_txrx_peer *defrag_peer;
957 };
958
959 /* Rx TID */
960 struct dp_rx_tid {
961 /* TID */
962 int tid;
963
964 /* Num of addba requests */
965 uint32_t num_of_addba_req;
966
967 /* Num of addba responses */
968 uint32_t num_of_addba_resp;
969
970 /* Num of delba requests */
971 uint32_t num_of_delba_req;
972
973 /* Num of addba responses successful */
974 uint32_t num_addba_rsp_success;
975
976 /* Num of addba responses failed */
977 uint32_t num_addba_rsp_failed;
978
979 /* pn size */
980 uint8_t pn_size;
981 /* REO TID queue descriptors */
982 void *hw_qdesc_vaddr_unaligned;
983 void *hw_qdesc_vaddr_aligned;
984 qdf_dma_addr_t hw_qdesc_paddr_unaligned;
985 qdf_dma_addr_t hw_qdesc_paddr;
986 uint32_t hw_qdesc_alloc_size;
987
988 /* RX ADDBA session state */
989 int ba_status;
990
991 /* RX BA window size */
992 uint16_t ba_win_size;
993
994 /* Starting sequence number in Addba request */
995 uint16_t startseqnum;
996 uint16_t dialogtoken;
997 uint16_t statuscode;
998 /* user defined ADDBA response status code */
999 uint16_t userstatuscode;
1000
1001 /* rx_tid lock */
1002 qdf_spinlock_t tid_lock;
1003
1004 /* Store ppdu_id when 2k exception is received */
1005 uint32_t ppdu_id_2k;
1006
1007 /* Delba Tx completion status */
1008 uint8_t delba_tx_status;
1009
1010 /* Delba Tx retry count */
1011 uint8_t delba_tx_retry;
1012
1013 /* Delba stats */
1014 uint32_t delba_tx_success_cnt;
1015 uint32_t delba_tx_fail_cnt;
1016
1017 /* Delba reason code for retries */
1018 uint8_t delba_rcode;
1019
1020 /* Coex Override preserved windows size 1 based */
1021 uint16_t rx_ba_win_size_override;
1022 #ifdef IPA_OFFLOAD
1023 /* rx msdu count per tid */
1024 struct cdp_pkt_info rx_msdu_cnt;
1025 #endif
1026
1027 };
1028
1029 /**
1030 * struct dp_intr_stats - DP Interrupt Stats for an interrupt context
1031 * @num_tx_ring_masks: interrupts with tx_ring_mask set
1032 * @num_rx_ring_masks: interrupts with rx_ring_mask set
1033 * @num_rx_mon_ring_masks: interrupts with rx_mon_ring_mask set
1034 * @num_rx_err_ring_masks: interrupts with rx_err_ring_mask set
1035 * @num_rx_wbm_rel_ring_masks: interrupts with rx_wbm_rel_ring_mask set
1036 * @num_reo_status_ring_masks: interrupts with reo_status_ring_mask set
1037 * @num_rxdma2host_ring_masks: interrupts with rxdma2host_ring_mask set
1038 * @num_host2rxdma_ring_masks: interrupts with host2rxdma_ring_mask set
1039 * @num_host2rxdma_mon_ring_masks: interrupts with host2rxdma_ring_mask set
1040 * @num_rx_ring_near_full_masks: Near-full interrupts for REO DST ring
1041 * @num_tx_comp_ring_near_full_masks: Near-full interrupts for TX completion
1042 * @num_rx_wbm_rel_ring_near_full_masks: total number of times the wbm rel ring
1043 * near full interrupt was received
1044 * @num_reo_status_ring_near_full_masks: total number of times the reo status
1045 * near full interrupt was received
1046 * @num_near_full_masks: total number of times the near full interrupt
1047 * was received
1048 * @num_masks: total number of times the interrupt was received
1049 * @num_host2txmon_ring__masks: interrupts with host2txmon_ring_mask set
1050 * @num_near_full_masks: total number of times the interrupt was received
1051 * @num_masks: total number of times the near full interrupt was received
1052 * @num_tx_mon_ring_masks: interrupts with num_tx_mon_ring_masks set
1053 *
1054 * Counter for individual masks are incremented only if there are any packets
1055 * on that ring.
1056 */
1057 struct dp_intr_stats {
1058 uint32_t num_tx_ring_masks[MAX_TCL_DATA_RINGS];
1059 uint32_t num_rx_ring_masks[MAX_REO_DEST_RINGS];
1060 uint32_t num_rx_mon_ring_masks;
1061 uint32_t num_rx_err_ring_masks;
1062 uint32_t num_rx_wbm_rel_ring_masks;
1063 uint32_t num_reo_status_ring_masks;
1064 uint32_t num_rxdma2host_ring_masks;
1065 uint32_t num_host2rxdma_ring_masks;
1066 uint32_t num_host2rxdma_mon_ring_masks;
1067 uint32_t num_rx_ring_near_full_masks[MAX_REO_DEST_RINGS];
1068 uint32_t num_tx_comp_ring_near_full_masks[MAX_TCL_DATA_RINGS];
1069 uint32_t num_rx_wbm_rel_ring_near_full_masks;
1070 uint32_t num_reo_status_ring_near_full_masks;
1071 uint32_t num_host2txmon_ring__masks;
1072 uint32_t num_near_full_masks;
1073 uint32_t num_masks;
1074 uint32_t num_tx_mon_ring_masks;
1075 };
1076
1077 #ifdef DP_UMAC_HW_RESET_SUPPORT
1078 /**
1079 * struct dp_intr_bkp - DP per interrupt context ring masks old state
1080 * @tx_ring_mask: WBM Tx completion rings (0-2) associated with this napi ctxt
1081 * @rx_ring_mask: Rx REO rings (0-3) associated with this interrupt context
1082 * @rx_mon_ring_mask: Rx monitor ring mask (0-2)
1083 * @rx_err_ring_mask: REO Exception Ring
1084 * @rx_wbm_rel_ring_mask: WBM2SW Rx Release Ring
1085 * @reo_status_ring_mask: REO command response ring
1086 * @rxdma2host_ring_mask: RXDMA to host destination ring
1087 * @host2rxdma_ring_mask: Host to RXDMA buffer ring
1088 * @host2rxdma_mon_ring_mask: Host to RXDMA monitor buffer ring
1089 * @host2txmon_ring_mask: Tx monitor buffer ring
1090 * @tx_mon_ring_mask: Tx monitor ring mask (0-2)
1091 *
1092 */
1093 struct dp_intr_bkp {
1094 uint8_t tx_ring_mask;
1095 uint8_t rx_ring_mask;
1096 uint8_t rx_mon_ring_mask;
1097 uint8_t rx_err_ring_mask;
1098 uint8_t rx_wbm_rel_ring_mask;
1099 uint8_t reo_status_ring_mask;
1100 uint8_t rxdma2host_ring_mask;
1101 uint8_t host2rxdma_ring_mask;
1102 uint8_t host2rxdma_mon_ring_mask;
1103 uint8_t host2txmon_ring_mask;
1104 uint8_t tx_mon_ring_mask;
1105 };
1106 #endif
1107
1108 /* per interrupt context */
1109 struct dp_intr {
1110 uint8_t tx_ring_mask; /* WBM Tx completion rings (0-2)
1111 associated with this napi context */
1112 uint8_t rx_ring_mask; /* Rx REO rings (0-3) associated
1113 with this interrupt context */
1114 uint8_t rx_mon_ring_mask; /* Rx monitor ring mask (0-2) */
1115 uint8_t rx_err_ring_mask; /* REO Exception Ring */
1116 uint8_t rx_wbm_rel_ring_mask; /* WBM2SW Rx Release Ring */
1117 uint8_t reo_status_ring_mask; /* REO command response ring */
1118 uint8_t rxdma2host_ring_mask; /* RXDMA to host destination ring */
1119 uint8_t host2rxdma_ring_mask; /* Host to RXDMA buffer ring */
1120 /* Host to RXDMA monitor buffer ring */
1121 uint8_t host2rxdma_mon_ring_mask;
1122 /* RX REO rings near full interrupt mask */
1123 uint8_t rx_near_full_grp_1_mask;
1124 /* RX REO rings near full interrupt mask */
1125 uint8_t rx_near_full_grp_2_mask;
1126 /* WBM TX completion rings near full interrupt mask */
1127 uint8_t tx_ring_near_full_mask;
1128 uint8_t host2txmon_ring_mask; /* Tx monitor buffer ring */
1129 uint8_t tx_mon_ring_mask; /* Tx monitor ring mask (0-2) */
1130 struct dp_soc *soc; /* Reference to SoC structure ,
1131 to get DMA ring handles */
1132 qdf_lro_ctx_t lro_ctx;
1133 uint8_t dp_intr_id;
1134
1135 /* Interrupt Stats for individual masks */
1136 struct dp_intr_stats intr_stats;
1137 uint8_t umac_reset_intr_mask; /* UMAC reset interrupt mask */
1138 };
1139
1140 #define REO_DESC_FREELIST_SIZE 64
1141 #define REO_DESC_FREE_DEFER_MS 1000
1142 struct reo_desc_list_node {
1143 qdf_list_node_t node;
1144 unsigned long free_ts;
1145 struct dp_rx_tid rx_tid;
1146 bool resend_update_reo_cmd;
1147 uint32_t pending_ext_desc_size;
1148 #ifdef REO_QDESC_HISTORY
1149 uint8_t peer_mac[QDF_MAC_ADDR_SIZE];
1150 #endif
1151 };
1152
1153 #ifdef WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY
1154 #define REO_DESC_DEFERRED_FREELIST_SIZE 256
1155 #define REO_DESC_DEFERRED_FREE_MS 30000
1156
1157 struct reo_desc_deferred_freelist_node {
1158 qdf_list_node_t node;
1159 unsigned long free_ts;
1160 void *hw_qdesc_vaddr_unaligned;
1161 qdf_dma_addr_t hw_qdesc_paddr;
1162 uint32_t hw_qdesc_alloc_size;
1163 #ifdef REO_QDESC_HISTORY
1164 uint8_t peer_mac[QDF_MAC_ADDR_SIZE];
1165 #endif /* REO_QDESC_HISTORY */
1166 };
1167 #endif /* WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY */
1168
1169 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
1170 /**
1171 * struct reo_cmd_event_record: Elements to record for each reo command
1172 * @cmd_type: reo command type
1173 * @cmd_return_status: reo command post status
1174 * @timestamp: record timestamp for the reo command
1175 */
1176 struct reo_cmd_event_record {
1177 enum hal_reo_cmd_type cmd_type;
1178 uint8_t cmd_return_status;
1179 uint64_t timestamp;
1180 };
1181
1182 /**
1183 * struct reo_cmd_event_history: Account for reo cmd events
1184 * @index: record number
1185 * @cmd_record: list of records
1186 */
1187 struct reo_cmd_event_history {
1188 qdf_atomic_t index;
1189 struct reo_cmd_event_record cmd_record[REO_CMD_EVENT_HIST_MAX];
1190 };
1191 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
1192
1193 /**
1194 * struct htt_t2h_msg_stats: HTT T2H message stats
1195 * @peer_map: Peer map event count
1196 * @peer_unmap: Peer unmap event count (peer_unmap -= ml_peer_unmap)
1197 * @invalid_peer_unmap: Peer unmap with invalid peer id
1198 * @ml_peer_map: MLD peer map count
1199 * @ml_peer_unmap: MLD peer unmap count
1200 */
1201 struct htt_t2h_msg_stats {
1202 uint32_t peer_map;
1203 uint32_t peer_unmap;
1204 uint32_t invalid_peer_unmap;
1205 uint32_t ml_peer_map;
1206 uint32_t ml_peer_unmap;
1207 };
1208
1209 /* SoC level data path statistics */
1210 struct dp_soc_stats {
1211 struct {
1212 uint32_t added;
1213 uint32_t deleted;
1214 uint32_t aged_out;
1215 uint32_t map_err;
1216 uint32_t ast_mismatch;
1217 } ast;
1218
1219 struct {
1220 uint32_t added;
1221 uint32_t deleted;
1222 } mec;
1223
1224 /* SOC level TX stats */
1225 struct {
1226 /* Total packets transmitted */
1227 struct cdp_pkt_info egress[MAX_TCL_DATA_RINGS];
1228 /* Enqueues per tcl ring */
1229 uint32_t tcl_enq[MAX_TCL_DATA_RINGS];
1230 /* packets dropped on tx because of no peer */
1231 struct cdp_pkt_info tx_invalid_peer;
1232 /* descriptors in each tcl ring */
1233 uint32_t tcl_ring_full[MAX_TCL_DATA_RINGS];
1234 /* Descriptors in use at soc */
1235 uint32_t desc_in_use;
1236 /* tqm_release_reason == FW removed */
1237 uint32_t dropped_fw_removed;
1238 /* tx completion release_src != TQM or FW */
1239 uint32_t invalid_release_source;
1240 /* TX descriptor from completion ring Desc is not valid */
1241 uint32_t invalid_tx_comp_desc;
1242 /* tx completion wbm_internal_error */
1243 uint32_t wbm_internal_error[MAX_WBM_INT_ERROR_REASONS];
1244 /* tx completion non_wbm_internal_error */
1245 uint32_t non_wbm_internal_err;
1246 /* TX Comp loop packet limit hit */
1247 uint32_t tx_comp_loop_pkt_limit_hit;
1248 /* Head pointer Out of sync at the end of dp_tx_comp_handler */
1249 uint32_t hp_oos2;
1250 /* tx desc freed as part of vdev detach */
1251 uint32_t tx_comp_exception;
1252 /* TQM drops after/during peer delete */
1253 uint64_t tqm_drop_no_peer;
1254 /* Number of tx completions reaped per WBM2SW release ring */
1255 uint32_t tx_comp[MAX_TCL_DATA_RINGS];
1256 /* Number of tx completions force freed */
1257 uint32_t tx_comp_force_freed;
1258 /* Tx completion ring near full */
1259 uint32_t near_full;
1260 /* Tx drops with buffer src as HAL_TX_COMP_RELEASE_SOURCE_FW */
1261 uint32_t fw2wbm_tx_drop;
1262 } tx;
1263
1264 /* SOC level RX stats */
1265 struct {
1266 /* Total rx packets count */
1267 struct cdp_pkt_info ingress;
1268 /* Rx errors */
1269 /* Total Packets in Rx Error ring */
1270 uint32_t err_ring_pkts;
1271 /* No of Fragments */
1272 uint32_t rx_frags;
1273 /* No of incomplete fragments in waitlist */
1274 uint32_t rx_frag_wait;
1275 /* Fragments dropped due to errors */
1276 uint32_t rx_frag_err;
1277 /* Fragments received OOR causing sequence num mismatch */
1278 uint32_t rx_frag_oor;
1279 /* Fragments dropped due to len errors in skb */
1280 uint32_t rx_frag_err_len_error;
1281 /* Fragments dropped due to no peer found */
1282 uint32_t rx_frag_err_no_peer;
1283 /* No of reinjected packets */
1284 uint32_t reo_reinject;
1285 /* Reap loop packet limit hit */
1286 uint32_t reap_loop_pkt_limit_hit;
1287 /* Head pointer Out of sync at the end of dp_rx_process */
1288 uint32_t hp_oos2;
1289 /* Rx ring near full */
1290 uint32_t near_full;
1291 /* Break ring reaping as not all scattered msdu received */
1292 uint32_t msdu_scatter_wait_break;
1293 /* Number of bar frames received */
1294 uint32_t bar_frame;
1295 /* Number of frames routed from rxdma */
1296 uint32_t rxdma2rel_route_drop;
1297 /* Number of frames routed from reo*/
1298 uint32_t reo2rel_route_drop;
1299 uint64_t fast_recycled;
1300 /* Number of hw stats requested */
1301 uint32_t rx_hw_stats_requested;
1302 /* Number of hw stats request timeout */
1303 uint32_t rx_hw_stats_timeout;
1304
1305 struct {
1306 /* Invalid RBM error count */
1307 uint32_t invalid_rbm;
1308 /* Invalid VDEV Error count */
1309 uint32_t invalid_vdev;
1310 /* Invalid PDEV error count */
1311 uint32_t invalid_pdev;
1312
1313 /* Packets delivered to stack that no related peer */
1314 uint32_t pkt_delivered_no_peer;
1315 /* Defrag peer uninit error count */
1316 uint32_t defrag_peer_uninit;
1317 /* Invalid sa_idx or da_idx*/
1318 uint32_t invalid_sa_da_idx;
1319 /* MSDU DONE failures */
1320 uint32_t msdu_done_fail;
1321 /* Invalid PEER Error count */
1322 struct cdp_pkt_info rx_invalid_peer;
1323 /* Invalid PEER ID count */
1324 struct cdp_pkt_info rx_invalid_peer_id;
1325 /* Invalid packet length */
1326 struct cdp_pkt_info rx_invalid_pkt_len;
1327 /* HAL ring access Fail error count */
1328 uint32_t hal_ring_access_fail;
1329 /* HAL ring access full Fail error count */
1330 uint32_t hal_ring_access_full_fail;
1331 /* RX DMA error count */
1332 uint32_t rxdma_error[HAL_RXDMA_ERR_MAX];
1333 /* RX REO DEST Desc Invalid Magic count */
1334 uint32_t rx_desc_invalid_magic;
1335 /* REO Error count */
1336 uint32_t reo_error[HAL_REO_ERR_MAX];
1337 /* HAL REO ERR Count */
1338 uint32_t hal_reo_error[MAX_REO_DEST_RINGS];
1339 /* HAL REO DEST Duplicate count */
1340 uint32_t hal_reo_dest_dup;
1341 /* HAL WBM RELEASE Duplicate count */
1342 uint32_t hal_wbm_rel_dup;
1343 /* HAL RXDMA error Duplicate count */
1344 uint32_t hal_rxdma_err_dup;
1345 /* ipa smmu map duplicate count */
1346 uint32_t ipa_smmu_map_dup;
1347 /* ipa smmu unmap duplicate count */
1348 uint32_t ipa_smmu_unmap_dup;
1349 /* ipa smmu unmap while ipa pipes is disabled */
1350 uint32_t ipa_unmap_no_pipe;
1351 /* REO cmd send fail/requeue count */
1352 uint32_t reo_cmd_send_fail;
1353 /* REO cmd send drain count */
1354 uint32_t reo_cmd_send_drain;
1355 /* RX msdu drop count due to scatter */
1356 uint32_t scatter_msdu;
1357 /* RX msdu drop count due to invalid cookie */
1358 uint32_t invalid_cookie;
1359 /* Count of stale cookie read in RX path */
1360 uint32_t stale_cookie;
1361 /* Delba sent count due to RX 2k jump */
1362 uint32_t rx_2k_jump_delba_sent;
1363 /* RX 2k jump msdu indicated to stack count */
1364 uint32_t rx_2k_jump_to_stack;
1365 /* RX 2k jump msdu dropped count */
1366 uint32_t rx_2k_jump_drop;
1367 /* REO ERR msdu buffer received */
1368 uint32_t reo_err_msdu_buf_rcved;
1369 /* REO ERR msdu buffer with invalid coookie received */
1370 uint32_t reo_err_msdu_buf_invalid_cookie;
1371 /* REO OOR msdu drop count */
1372 uint32_t reo_err_oor_drop;
1373 /* REO OOR msdu indicated to stack count */
1374 uint32_t reo_err_oor_to_stack;
1375 /* REO OOR scattered msdu count */
1376 uint32_t reo_err_oor_sg_count;
1377 /* RX msdu rejected count on delivery to vdev stack_fn*/
1378 uint32_t rejected;
1379 /* Incorrect msdu count in MPDU desc info */
1380 uint32_t msdu_count_mismatch;
1381 /* RX raw frame dropped count */
1382 uint32_t raw_frm_drop;
1383 /* Stale link desc cookie count*/
1384 uint32_t invalid_link_cookie;
1385 /* Nbuf sanity failure */
1386 uint32_t nbuf_sanity_fail;
1387 /* Duplicate link desc refilled */
1388 uint32_t dup_refill_link_desc;
1389 /* Incorrect msdu continuation bit in MSDU desc */
1390 uint32_t msdu_continuation_err;
1391 /* count of start sequence (ssn) updates */
1392 uint32_t ssn_update_count;
1393 /* count of bar handling fail */
1394 uint32_t bar_handle_fail_count;
1395 /* EAPOL drop count in intrabss scenario */
1396 uint32_t intrabss_eapol_drop;
1397 /* PN check failed for 2K-jump or OOR error */
1398 uint32_t pn_in_dest_check_fail;
1399 /* MSDU len err count */
1400 uint32_t msdu_len_err;
1401 /* Rx flush count */
1402 uint32_t rx_flush_count;
1403 /* Rx invalid tid count */
1404 uint32_t rx_invalid_tid_err;
1405 /* Invalid address1 in defrag path*/
1406 uint32_t defrag_ad1_invalid;
1407 /* decrypt error drop */
1408 uint32_t decrypt_err_drop;
1409 #ifdef GLOBAL_ASSERT_AVOIDANCE
1410 /* rx_desc NULL war count*/
1411 uint32_t rx_desc_null;
1412 /* wbm err invalid release buffer type */
1413 uint32_t wbm_err_buf_rel_type;
1414 /* Reo entry rx desc null */
1415 uint32_t reo_err_rx_desc_null;
1416 /* Invalid chip id received in intrabss path */
1417 uint64_t intra_bss_bad_chipid;
1418 #endif
1419 /* HP Out of sync at the end of dp_rx_err_process */
1420 uint32_t hp_oos2;
1421 /* Rx exception ring near full */
1422 uint32_t near_full;
1423 } err;
1424
1425 /* packet count per core - per ring */
1426 uint64_t ring_packets[NR_CPUS][MAX_REO_DEST_RINGS];
1427 } rx;
1428
1429 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
1430 struct reo_cmd_event_history cmd_event_history;
1431 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
1432 struct htt_t2h_msg_stats t2h_msg_stats;
1433 };
1434
1435 union dp_align_mac_addr {
1436 uint8_t raw[QDF_MAC_ADDR_SIZE];
1437 struct {
1438 uint16_t bytes_ab;
1439 uint16_t bytes_cd;
1440 uint16_t bytes_ef;
1441 } align2;
1442 struct {
1443 uint32_t bytes_abcd;
1444 uint16_t bytes_ef;
1445 } align4;
1446 struct __attribute__((__packed__)) {
1447 uint16_t bytes_ab;
1448 uint32_t bytes_cdef;
1449 } align4_2;
1450 };
1451
1452 /**
1453 * struct dp_ast_free_cb_params - HMWDS free callback cookie
1454 * @mac_addr: ast mac address
1455 * @peer_mac_addr: mac address of peer
1456 * @type: ast entry type
1457 * @vdev_id: vdev_id
1458 * @flags: ast flags
1459 */
1460 struct dp_ast_free_cb_params {
1461 union dp_align_mac_addr mac_addr;
1462 union dp_align_mac_addr peer_mac_addr;
1463 enum cdp_txrx_ast_entry_type type;
1464 uint8_t vdev_id;
1465 uint32_t flags;
1466 };
1467
1468 /**
1469 * struct dp_ast_entry - AST entry
1470 *
1471 * @ast_idx: Hardware AST Index
1472 * @peer_id: Next Hop peer_id (for non-WDS nodes, this will be point to
1473 * associated peer with this MAC address)
1474 * @mac_addr: MAC Address for this AST entry
1475 * @next_hop: Set to 1 if this is for a WDS node
1476 * @is_active: flag to indicate active data traffic on this node
1477 * (used for aging out/expiry)
1478 * @ase_list_elem: node in peer AST list
1479 * @is_bss: flag to indicate if entry corresponds to bss peer
1480 * @is_mapped: flag to indicate that we have mapped the AST entry
1481 * in ast_table
1482 * @pdev_id: pdev ID
1483 * @vdev_id: vdev ID
1484 * @ast_hash_value: hast value in HW
1485 * @ref_cnt: reference count
1486 * @type: flag to indicate type of the entry(static/WDS/MEC)
1487 * @delete_in_progress: Flag to indicate that delete commands send to FW
1488 * and host is waiting for response from FW
1489 * @callback: ast free/unmap callback
1490 * @cookie: argument to callback
1491 * @hash_list_elem: node in soc AST hash list (mac address used as hash)
1492 */
1493 struct dp_ast_entry {
1494 uint16_t ast_idx;
1495 uint16_t peer_id;
1496 union dp_align_mac_addr mac_addr;
1497 bool next_hop;
1498 bool is_active;
1499 bool is_mapped;
1500 uint8_t pdev_id;
1501 uint8_t vdev_id;
1502 uint16_t ast_hash_value;
1503 qdf_atomic_t ref_cnt;
1504 enum cdp_txrx_ast_entry_type type;
1505 bool delete_in_progress;
1506 txrx_ast_free_cb callback;
1507 void *cookie;
1508 TAILQ_ENTRY(dp_ast_entry) ase_list_elem;
1509 TAILQ_ENTRY(dp_ast_entry) hash_list_elem;
1510 };
1511
1512 /**
1513 * struct dp_mec_entry - MEC entry
1514 *
1515 * @mac_addr: MAC Address for this MEC entry
1516 * @is_active: flag to indicate active data traffic on this node
1517 * (used for aging out/expiry)
1518 * @pdev_id: pdev ID
1519 * @vdev_id: vdev ID
1520 * @hash_list_elem: node in soc MEC hash list (mac address used as hash)
1521 */
1522 struct dp_mec_entry {
1523 union dp_align_mac_addr mac_addr;
1524 bool is_active;
1525 uint8_t pdev_id;
1526 uint8_t vdev_id;
1527
1528 TAILQ_ENTRY(dp_mec_entry) hash_list_elem;
1529 };
1530
1531 /* SOC level htt stats */
1532 struct htt_t2h_stats {
1533 /* lock to protect htt_stats_msg update */
1534 qdf_spinlock_t lock;
1535
1536 /* work queue to process htt stats */
1537 qdf_work_t work;
1538
1539 /* T2H Ext stats message queue */
1540 qdf_nbuf_queue_t msg;
1541
1542 /* number of completed stats in htt_stats_msg */
1543 uint32_t num_stats;
1544 };
1545
1546 struct link_desc_bank {
1547 void *base_vaddr_unaligned;
1548 void *base_vaddr;
1549 qdf_dma_addr_t base_paddr_unaligned;
1550 qdf_dma_addr_t base_paddr;
1551 uint32_t size;
1552 };
1553
1554 struct rx_buff_pool {
1555 qdf_nbuf_queue_head_t emerg_nbuf_q;
1556 uint32_t nbuf_fail_cnt;
1557 bool is_initialized;
1558 };
1559
1560 struct rx_refill_buff_pool {
1561 bool is_initialized;
1562 uint16_t head;
1563 uint16_t tail;
1564 struct dp_pdev *dp_pdev;
1565 uint16_t max_bufq_len;
1566 qdf_nbuf_t *buf_elem;
1567 };
1568
1569 #ifdef DP_TX_HW_DESC_HISTORY
1570 #define DP_TX_HW_DESC_HIST_MAX 6144
1571 #define DP_TX_HW_DESC_HIST_PER_SLOT_MAX 2048
1572 #define DP_TX_HW_DESC_HIST_MAX_SLOTS 3
1573 #define DP_TX_HW_DESC_HIST_SLOT_SHIFT 11
1574
1575 struct dp_tx_hw_desc_evt {
1576 uint8_t tcl_desc[HAL_TX_DESC_LEN_BYTES];
1577 uint8_t tcl_ring_id;
1578 uint64_t posted;
1579 uint32_t hp;
1580 uint32_t tp;
1581 };
1582
1583 /* struct dp_tx_hw_desc_history - TX HW desc hisotry
1584 * @index: Index where the last entry is written
1585 * @entry: history entries
1586 */
1587 struct dp_tx_hw_desc_history {
1588 qdf_atomic_t index;
1589 uint16_t num_entries_per_slot;
1590 uint16_t allocated;
1591 struct dp_tx_hw_desc_evt *entry[DP_TX_HW_DESC_HIST_MAX_SLOTS];
1592 };
1593 #endif
1594
1595 /**
1596 * enum dp_mon_status_process_event - Events for monitor status buffer record
1597 * @DP_MON_STATUS_BUF_REAP: Monitor status buffer is reaped from ring
1598 * @DP_MON_STATUS_BUF_ENQUEUE: Status buffer is enqueued to local queue
1599 * @DP_MON_STATUS_BUF_DEQUEUE: Status buffer is dequeued from local queue
1600 */
1601 enum dp_mon_status_process_event {
1602 DP_MON_STATUS_BUF_REAP,
1603 DP_MON_STATUS_BUF_ENQUEUE,
1604 DP_MON_STATUS_BUF_DEQUEUE,
1605 };
1606
1607 #ifdef WLAN_FEATURE_DP_MON_STATUS_RING_HISTORY
1608 #define DP_MON_STATUS_HIST_MAX 2048
1609
1610 /**
1611 * struct dp_mon_stat_info_record - monitor stat ring buffer info
1612 * @hbi: HW ring buffer info
1613 * @timestamp: timestamp when this entry was recorded
1614 * @event: event
1615 * @rx_desc: RX descriptor corresponding to the received buffer
1616 * @nbuf: buffer attached to rx_desc, if event is REAP, else the buffer
1617 * which was enqueued or dequeued.
1618 * @rx_desc_nbuf_data: nbuf data pointer.
1619 */
1620 struct dp_mon_stat_info_record {
1621 struct hal_buf_info hbi;
1622 uint64_t timestamp;
1623 enum dp_mon_status_process_event event;
1624 void *rx_desc;
1625 qdf_nbuf_t nbuf;
1626 uint8_t *rx_desc_nbuf_data;
1627 };
1628
1629 /* struct dp_rx_history - rx ring hisotry
1630 * @index: Index where the last entry is written
1631 * @entry: history entries
1632 */
1633 struct dp_mon_status_ring_history {
1634 qdf_atomic_t index;
1635 struct dp_mon_stat_info_record entry[DP_MON_STATUS_HIST_MAX];
1636 };
1637 #endif
1638
1639 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
1640 /*
1641 * The logic for get current index of these history is dependent on this
1642 * value being power of 2.
1643 */
1644 #define DP_RX_HIST_MAX 2048
1645 #define DP_RX_ERR_HIST_MAX 2048
1646 #define DP_RX_REINJECT_HIST_MAX 1024
1647 #define DP_RX_REFILL_HIST_MAX 2048
1648
1649 QDF_COMPILE_TIME_ASSERT(rx_history_size,
1650 (DP_RX_HIST_MAX &
1651 (DP_RX_HIST_MAX - 1)) == 0);
1652 QDF_COMPILE_TIME_ASSERT(rx_err_history_size,
1653 (DP_RX_ERR_HIST_MAX &
1654 (DP_RX_ERR_HIST_MAX - 1)) == 0);
1655 QDF_COMPILE_TIME_ASSERT(rx_reinject_history_size,
1656 (DP_RX_REINJECT_HIST_MAX &
1657 (DP_RX_REINJECT_HIST_MAX - 1)) == 0);
1658 QDF_COMPILE_TIME_ASSERT(rx_refill_history_size,
1659 (DP_RX_REFILL_HIST_MAX &
1660 (DP_RX_REFILL_HIST_MAX - 1)) == 0);
1661
1662
1663 /**
1664 * struct dp_buf_info_record - ring buffer info
1665 * @hbi: HW ring buffer info
1666 * @timestamp: timestamp when this entry was recorded
1667 */
1668 struct dp_buf_info_record {
1669 struct hal_buf_info hbi;
1670 uint64_t timestamp;
1671 };
1672
1673 /**
1674 * struct dp_refill_info_record - ring refill buffer info
1675 * @hp: HP value after refill
1676 * @tp: cached tail value during refill
1677 * @num_req: number of buffers requested to refill
1678 * @num_refill: number of buffers refilled to ring
1679 * @timestamp: timestamp when this entry was recorded
1680 */
1681 struct dp_refill_info_record {
1682 uint32_t hp;
1683 uint32_t tp;
1684 uint32_t num_req;
1685 uint32_t num_refill;
1686 uint64_t timestamp;
1687 };
1688
1689 /**
1690 * struct dp_rx_history - rx ring hisotry
1691 * @index: Index where the last entry is written
1692 * @entry: history entries
1693 */
1694 struct dp_rx_history {
1695 qdf_atomic_t index;
1696 struct dp_buf_info_record entry[DP_RX_HIST_MAX];
1697 };
1698
1699 /**
1700 * struct dp_rx_err_history - rx err ring hisotry
1701 * @index: Index where the last entry is written
1702 * @entry: history entries
1703 */
1704 struct dp_rx_err_history {
1705 qdf_atomic_t index;
1706 struct dp_buf_info_record entry[DP_RX_ERR_HIST_MAX];
1707 };
1708
1709 /**
1710 * struct dp_rx_reinject_history - rx reinject ring hisotry
1711 * @index: Index where the last entry is written
1712 * @entry: history entries
1713 */
1714 struct dp_rx_reinject_history {
1715 qdf_atomic_t index;
1716 struct dp_buf_info_record entry[DP_RX_REINJECT_HIST_MAX];
1717 };
1718
1719 /**
1720 * struct dp_rx_refill_history - rx buf refill hisotry
1721 * @index: Index where the last entry is written
1722 * @entry: history entries
1723 */
1724 struct dp_rx_refill_history {
1725 qdf_atomic_t index;
1726 struct dp_refill_info_record entry[DP_RX_REFILL_HIST_MAX];
1727 };
1728
1729 #endif
1730
1731 /**
1732 * enum dp_cfg_event_type - Datapath config events type
1733 * @DP_CFG_EVENT_VDEV_ATTACH: vdev attach
1734 * @DP_CFG_EVENT_VDEV_DETACH: vdev detach
1735 * @DP_CFG_EVENT_VDEV_UNREF_DEL: vdev memory free after last ref is released
1736 * @DP_CFG_EVENT_PEER_CREATE: peer create
1737 * @DP_CFG_EVENT_PEER_DELETE: peer delete
1738 * @DP_CFG_EVENT_PEER_UNREF_DEL: peer memory free after last ref is released
1739 * @DP_CFG_EVENT_PEER_SETUP: peer setup
1740 * @DP_CFG_EVENT_MLO_ADD_LINK: add link peer to mld peer
1741 * @DP_CFG_EVENT_MLO_DEL_LINK: delete link peer from mld peer
1742 * @DP_CFG_EVENT_MLO_SETUP: MLO peer setup
1743 * @DP_CFG_EVENT_MLO_SETUP_VDEV_UPDATE: MLD peer vdev update
1744 * @DP_CFG_EVENT_PEER_MAP: peer map
1745 * @DP_CFG_EVENT_PEER_UNMAP: peer unmap
1746 * @DP_CFG_EVENT_MLO_PEER_MAP: MLD peer map
1747 * @DP_CFG_EVENT_MLO_PEER_UNMAP: MLD peer unmap
1748 */
1749 enum dp_cfg_event_type {
1750 DP_CFG_EVENT_VDEV_ATTACH,
1751 DP_CFG_EVENT_VDEV_DETACH,
1752 DP_CFG_EVENT_VDEV_UNREF_DEL,
1753 DP_CFG_EVENT_PEER_CREATE,
1754 DP_CFG_EVENT_PEER_DELETE,
1755 DP_CFG_EVENT_PEER_UNREF_DEL,
1756 DP_CFG_EVENT_PEER_SETUP,
1757 DP_CFG_EVENT_MLO_ADD_LINK,
1758 DP_CFG_EVENT_MLO_DEL_LINK,
1759 DP_CFG_EVENT_MLO_SETUP,
1760 DP_CFG_EVENT_MLO_SETUP_VDEV_UPDATE,
1761 DP_CFG_EVENT_PEER_MAP,
1762 DP_CFG_EVENT_PEER_UNMAP,
1763 DP_CFG_EVENT_MLO_PEER_MAP,
1764 DP_CFG_EVENT_MLO_PEER_UNMAP,
1765 };
1766
1767 #ifdef WLAN_FEATURE_DP_CFG_EVENT_HISTORY
1768 /* Size must be in 2 power, for bitwise index rotation */
1769 #define DP_CFG_EVT_HISTORY_SIZE 0x800
1770 #define DP_CFG_EVT_HIST_PER_SLOT_MAX 256
1771 #define DP_CFG_EVT_HIST_MAX_SLOTS 8
1772 #define DP_CFG_EVT_HIST_SLOT_SHIFT 8
1773
1774 /**
1775 * struct dp_vdev_attach_detach_desc - vdev ops descriptor
1776 * @vdev: DP vdev handle
1777 * @mac_addr: vdev mac address
1778 * @vdev_id: vdev id
1779 * @ref_count: vdev ref count
1780 */
1781 struct dp_vdev_attach_detach_desc {
1782 struct dp_vdev *vdev;
1783 union dp_align_mac_addr mac_addr;
1784 uint8_t vdev_id;
1785 int32_t ref_count;
1786 };
1787
1788 /**
1789 * struct dp_peer_cmn_ops_desc - peer events descriptor
1790 * @vdev_id: vdev_id of the vdev on which peer exists
1791 * @is_reuse: indicates if its a peer reuse case, during peer create
1792 * @peer: DP peer handle
1793 * @vdev: DP vdev handle on which peer exists
1794 * @mac_addr: peer mac address
1795 * @vdev_mac_addr: vdev mac address
1796 * @vdev_ref_count: vdev ref count
1797 * @peer_ref_count: peer ref count
1798 */
1799 struct dp_peer_cmn_ops_desc {
1800 uint8_t vdev_id : 5,
1801 is_reuse : 1;
1802 struct dp_peer *peer;
1803 struct dp_vdev *vdev;
1804 union dp_align_mac_addr mac_addr;
1805 union dp_align_mac_addr vdev_mac_addr;
1806 int32_t vdev_ref_count;
1807 int32_t peer_ref_count;
1808 };
1809
1810 /**
1811 * struct dp_mlo_add_del_link_desc - MLO add/del link event descriptor
1812 * @idx: index at which link peer got added in MLD peer's list
1813 * @num_links: num links added in the MLD peer's list
1814 * @action_result: add/del was success or not
1815 * @reserved: reserved bit
1816 * @link_peer: link peer handle
1817 * @mld_peer: MLD peer handle
1818 * @link_mac_addr: link peer mac address
1819 * @mld_mac_addr: MLD peer mac address
1820 */
1821 struct dp_mlo_add_del_link_desc {
1822 uint8_t idx : 3,
1823 num_links : 3,
1824 action_result : 1,
1825 reserved : 1;
1826 struct dp_peer *link_peer;
1827 struct dp_peer *mld_peer;
1828 union dp_align_mac_addr link_mac_addr;
1829 union dp_align_mac_addr mld_mac_addr;
1830 };
1831
1832 /**
1833 * struct dp_mlo_setup_vdev_update_desc - MLD peer vdev update event desc
1834 * @mld_peer: MLD peer handle
1835 * @prev_vdev: previous vdev handle
1836 * @new_vdev: new vdev handle
1837 */
1838 struct dp_mlo_setup_vdev_update_desc {
1839 struct dp_peer *mld_peer;
1840 struct dp_vdev *prev_vdev;
1841 struct dp_vdev *new_vdev;
1842 };
1843
1844 /**
1845 * struct dp_rx_peer_map_unmap_desc - peer map/unmap event descriptor
1846 * @peer_id: peer id
1847 * @ml_peer_id: ML peer id, if its an MLD peer
1848 * @hw_peer_id: hw peer id
1849 * @vdev_id: vdev id of the peer
1850 * @is_ml_peer: is this MLD peer
1851 * @mac_addr: mac address of the peer
1852 * @peer: peer handle
1853 */
1854 struct dp_rx_peer_map_unmap_desc {
1855 uint16_t peer_id;
1856 uint16_t ml_peer_id;
1857 uint16_t hw_peer_id;
1858 uint8_t vdev_id;
1859 uint8_t is_ml_peer;
1860 union dp_align_mac_addr mac_addr;
1861 struct dp_peer *peer;
1862 };
1863
1864 /**
1865 * struct dp_peer_setup_desc - peer setup event descriptor
1866 * @peer: DP peer handle
1867 * @vdev: vdev handle on which peer exists
1868 * @vdev_ref_count: vdev ref count
1869 * @mac_addr: peer mac address
1870 * @mld_mac_addr: MLD mac address
1871 * @is_first_link: is the current link the first link created
1872 * @is_primary_link: is the current link primary link
1873 * @vdev_id: vdev id of the vdev on which the current link peer exists
1874 * @reserved: reserved bit
1875 */
1876 struct dp_peer_setup_desc {
1877 struct dp_peer *peer;
1878 struct dp_vdev *vdev;
1879 int32_t vdev_ref_count;
1880 union dp_align_mac_addr mac_addr;
1881 union dp_align_mac_addr mld_mac_addr;
1882 uint8_t is_first_link : 1,
1883 is_primary_link : 1,
1884 vdev_id : 5,
1885 reserved : 1;
1886 };
1887
1888 /**
1889 * union dp_cfg_event_desc - DP config event descriptor
1890 * @vdev_evt: vdev events desc
1891 * @peer_cmn_evt: common peer events desc
1892 * @peer_setup_evt: peer setup event desc
1893 * @mlo_link_delink_evt: MLO link/delink event desc
1894 * @mlo_setup_vdev_update: MLD peer vdev update event desc
1895 * @peer_map_unmap_evt: peer map/unmap event desc
1896 */
1897 union dp_cfg_event_desc {
1898 struct dp_vdev_attach_detach_desc vdev_evt;
1899 struct dp_peer_cmn_ops_desc peer_cmn_evt;
1900 struct dp_peer_setup_desc peer_setup_evt;
1901 struct dp_mlo_add_del_link_desc mlo_link_delink_evt;
1902 struct dp_mlo_setup_vdev_update_desc mlo_setup_vdev_update;
1903 struct dp_rx_peer_map_unmap_desc peer_map_unmap_evt;
1904 };
1905
1906 /**
1907 * struct dp_cfg_event - DP config event descriptor
1908 * @timestamp: timestamp at which event was recorded
1909 * @type: event type
1910 * @event_desc: event descriptor
1911 */
1912 struct dp_cfg_event {
1913 uint64_t timestamp;
1914 enum dp_cfg_event_type type;
1915 union dp_cfg_event_desc event_desc;
1916 };
1917
1918 /**
1919 * struct dp_cfg_event_history - DP config event history
1920 * @index: current index
1921 * @num_entries_per_slot: number of entries per slot
1922 * @allocated: Is the history allocated or not
1923 * @entry: event history descriptors
1924 */
1925 struct dp_cfg_event_history {
1926 qdf_atomic_t index;
1927 uint16_t num_entries_per_slot;
1928 uint16_t allocated;
1929 struct dp_cfg_event *entry[DP_CFG_EVT_HIST_MAX_SLOTS];
1930 };
1931 #endif
1932
1933 enum dp_tx_event_type {
1934 DP_TX_DESC_INVAL_EVT = 0,
1935 DP_TX_DESC_MAP,
1936 DP_TX_DESC_COOKIE,
1937 DP_TX_DESC_FLUSH,
1938 DP_TX_DESC_UNMAP,
1939 DP_TX_COMP_UNMAP,
1940 DP_TX_COMP_UNMAP_ERR,
1941 DP_TX_COMP_MSDU_EXT,
1942 };
1943
1944 #ifdef WLAN_FEATURE_DP_TX_DESC_HISTORY
1945 /* Size must be in 2 power, for bitwise index rotation */
1946 #define DP_TX_TCL_HISTORY_SIZE 0x4000
1947 #define DP_TX_TCL_HIST_PER_SLOT_MAX 2048
1948 #define DP_TX_TCL_HIST_MAX_SLOTS 8
1949 #define DP_TX_TCL_HIST_SLOT_SHIFT 11
1950
1951 /* Size must be in 2 power, for bitwise index rotation */
1952 #define DP_TX_COMP_HISTORY_SIZE 0x4000
1953 #define DP_TX_COMP_HIST_PER_SLOT_MAX 2048
1954 #define DP_TX_COMP_HIST_MAX_SLOTS 8
1955 #define DP_TX_COMP_HIST_SLOT_SHIFT 11
1956
1957 struct dp_tx_desc_event {
1958 qdf_nbuf_t skb;
1959 dma_addr_t paddr;
1960 uint32_t sw_cookie;
1961 enum dp_tx_event_type type;
1962 uint64_t ts;
1963 };
1964
1965 struct dp_tx_tcl_history {
1966 qdf_atomic_t index;
1967 uint16_t num_entries_per_slot;
1968 uint16_t allocated;
1969 struct dp_tx_desc_event *entry[DP_TX_TCL_HIST_MAX_SLOTS];
1970 };
1971
1972 struct dp_tx_comp_history {
1973 qdf_atomic_t index;
1974 uint16_t num_entries_per_slot;
1975 uint16_t allocated;
1976 struct dp_tx_desc_event *entry[DP_TX_COMP_HIST_MAX_SLOTS];
1977 };
1978 #endif /* WLAN_FEATURE_DP_TX_DESC_HISTORY */
1979
1980 /* structure to record recent operation related variable */
1981 struct dp_last_op_info {
1982 /* last link desc buf info through WBM release ring */
1983 struct hal_buf_info wbm_rel_link_desc;
1984 /* last link desc buf info through REO reinject ring */
1985 struct hal_buf_info reo_reinject_link_desc;
1986 };
1987
1988 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
1989
1990 /**
1991 * struct dp_swlm_tcl_data - params for tcl register write coalescing
1992 * decision making
1993 * @nbuf: TX packet
1994 * @tid: tid for transmitting the current packet
1995 * @num_ll_connections: Number of low latency connections on this vdev
1996 * @ring_id: TCL ring id
1997 * @pkt_len: Packet length
1998 *
1999 * This structure contains the information required by the software
2000 * latency manager to decide on whether to coalesce the current TCL
2001 * register write or not.
2002 */
2003 struct dp_swlm_tcl_data {
2004 qdf_nbuf_t nbuf;
2005 uint8_t tid;
2006 uint8_t num_ll_connections;
2007 uint8_t ring_id;
2008 uint32_t pkt_len;
2009 };
2010
2011 /**
2012 * union swlm_data - SWLM query data
2013 * @tcl_data: data for TCL query in SWLM
2014 */
2015 union swlm_data {
2016 struct dp_swlm_tcl_data *tcl_data;
2017 };
2018
2019 /**
2020 * struct dp_swlm_ops - SWLM ops
2021 * @tcl_wr_coalesce_check: handler to check if the current TCL register
2022 * write can be coalesced or not
2023 */
2024 struct dp_swlm_ops {
2025 int (*tcl_wr_coalesce_check)(struct dp_soc *soc,
2026 struct dp_swlm_tcl_data *tcl_data);
2027 };
2028
2029 /**
2030 * struct dp_swlm_stats - Stats for Software Latency manager.
2031 * @tcl: TCL stats
2032 * @tcl.timer_flush_success: Num TCL HP writes success from timer context
2033 * @tcl.timer_flush_fail: Num TCL HP writes failure from timer context
2034 * @tcl.tid_fail: Num TCL register write coalescing skips, since the pkt
2035 * was being transmitted on a TID above coalescing threshold
2036 * @tcl.sp_frames: Num TCL register write coalescing skips, since the pkt
2037 * being transmitted was a special frame
2038 * @tcl.ll_connection: Num TCL register write coalescing skips, since the
2039 * vdev has low latency connections
2040 * @tcl.bytes_thresh_reached: Num TCL HP writes flush after the coalescing
2041 * bytes threshold was reached
2042 * @tcl.time_thresh_reached: Num TCL HP writes flush after the coalescing
2043 * session time expired
2044 * @tcl.tput_criteria_fail: Num TCL HP writes coalescing fails, since the
2045 * throughput did not meet session threshold
2046 * @tcl.coalesce_success: Num of TCL HP writes coalesced successfully.
2047 * @tcl.coalesce_fail: Num of TCL HP writes coalesces failed
2048 */
2049 struct dp_swlm_stats {
2050 struct {
2051 uint32_t timer_flush_success;
2052 uint32_t timer_flush_fail;
2053 uint32_t tid_fail;
2054 uint32_t sp_frames;
2055 uint32_t ll_connection;
2056 uint32_t bytes_thresh_reached;
2057 uint32_t time_thresh_reached;
2058 uint32_t tput_criteria_fail;
2059 uint32_t coalesce_success;
2060 uint32_t coalesce_fail;
2061 } tcl[MAX_TCL_DATA_RINGS];
2062 };
2063
2064 /**
2065 * struct dp_swlm_tcl_params: Parameters based on TCL for different modules
2066 * in the Software latency manager.
2067 * @soc: DP soc reference
2068 * @ring_id: TCL ring id
2069 * @flush_timer: Timer for flushing the coalesced TCL HP writes
2070 * @sampling_session_tx_bytes: Num bytes transmitted in the sampling time
2071 * @bytes_flush_thresh: Bytes threshold to flush the TCL HP register write
2072 * @coalesce_end_time: End timestamp for current coalescing session
2073 * @bytes_coalesced: Num bytes coalesced in the current session
2074 * @prev_tx_packets: Previous TX packets accounted
2075 * @prev_tx_bytes: Previous TX bytes accounted
2076 * @prev_rx_bytes: Previous RX bytes accounted
2077 * @expire_time: expiry time for sample
2078 * @tput_pass_cnt: threshold throughput pass counter
2079 */
2080 struct dp_swlm_tcl_params {
2081 struct dp_soc *soc;
2082 uint32_t ring_id;
2083 qdf_timer_t flush_timer;
2084 uint32_t sampling_session_tx_bytes;
2085 uint32_t bytes_flush_thresh;
2086 uint64_t coalesce_end_time;
2087 uint32_t bytes_coalesced;
2088 uint32_t prev_tx_packets;
2089 uint32_t prev_tx_bytes;
2090 uint32_t prev_rx_bytes;
2091 uint64_t expire_time;
2092 uint32_t tput_pass_cnt;
2093 };
2094
2095 /**
2096 * struct dp_swlm_params: Parameters for different modules in the
2097 * Software latency manager.
2098 * @rx_traffic_thresh: Threshold for RX traffic, to begin TCL register
2099 * write coalescing
2100 * @tx_traffic_thresh: Threshold for TX traffic, to begin TCL register
2101 * write coalescing
2102 * @sampling_time: Sampling time to test the throughput threshold
2103 * @time_flush_thresh: Time threshold to flush the TCL HP register write
2104 * @tx_thresh_multiplier: Multiplier to deduce the bytes threshold after
2105 * which the TCL HP register is written, thereby
2106 * ending the coalescing.
2107 * @tx_pkt_thresh: Threshold for TX packet count, to begin TCL register
2108 * write coalescing
2109 * @tcl: TCL ring specific params
2110 */
2111
2112 struct dp_swlm_params {
2113 uint32_t rx_traffic_thresh;
2114 uint32_t tx_traffic_thresh;
2115 uint32_t sampling_time;
2116 uint32_t time_flush_thresh;
2117 uint32_t tx_thresh_multiplier;
2118 uint32_t tx_pkt_thresh;
2119 struct dp_swlm_tcl_params tcl[MAX_TCL_DATA_RINGS];
2120 };
2121
2122 /**
2123 * struct dp_swlm - Software latency manager context
2124 * @ops: SWLM ops pointers
2125 * @is_enabled: SWLM enabled/disabled
2126 * @is_init: SWLM module initialized
2127 * @stats: SWLM stats
2128 * @params: SWLM SRNG params
2129 * @tcl_flush_timer: flush timer for TCL register writes
2130 */
2131 struct dp_swlm {
2132 struct dp_swlm_ops *ops;
2133 uint8_t is_enabled:1,
2134 is_init:1;
2135 struct dp_swlm_stats stats;
2136 struct dp_swlm_params params;
2137 };
2138 #endif
2139
2140 #ifdef IPA_OFFLOAD
2141 /* IPA uC datapath offload Wlan Tx resources */
2142 struct ipa_dp_tx_rsc {
2143 /* Resource info to be passed to IPA */
2144 qdf_dma_addr_t ipa_tcl_ring_base_paddr;
2145 void *ipa_tcl_ring_base_vaddr;
2146 uint32_t ipa_tcl_ring_size;
2147 qdf_dma_addr_t ipa_tcl_hp_paddr;
2148 uint32_t alloc_tx_buf_cnt;
2149
2150 qdf_dma_addr_t ipa_wbm_ring_base_paddr;
2151 void *ipa_wbm_ring_base_vaddr;
2152 uint32_t ipa_wbm_ring_size;
2153 qdf_dma_addr_t ipa_wbm_tp_paddr;
2154 /* WBM2SW HP shadow paddr */
2155 qdf_dma_addr_t ipa_wbm_hp_shadow_paddr;
2156
2157 /* TX buffers populated into the WBM ring */
2158 void **tx_buf_pool_vaddr_unaligned;
2159 qdf_dma_addr_t *tx_buf_pool_paddr_unaligned;
2160 };
2161
2162 /* IPA uC datapath offload Wlan Rx resources */
2163 struct ipa_dp_rx_rsc {
2164 /* Resource info to be passed to IPA */
2165 qdf_dma_addr_t ipa_reo_ring_base_paddr;
2166 void *ipa_reo_ring_base_vaddr;
2167 uint32_t ipa_reo_ring_size;
2168 qdf_dma_addr_t ipa_reo_tp_paddr;
2169
2170 /* Resource info to be passed to firmware and IPA */
2171 qdf_dma_addr_t ipa_rx_refill_buf_ring_base_paddr;
2172 void *ipa_rx_refill_buf_ring_base_vaddr;
2173 uint32_t ipa_rx_refill_buf_ring_size;
2174 qdf_dma_addr_t ipa_rx_refill_buf_hp_paddr;
2175 };
2176 #endif
2177
2178 struct dp_tx_msdu_info_s;
2179 /**
2180 * enum dp_context_type- DP Context Type
2181 * @DP_CONTEXT_TYPE_SOC: Context type DP SOC
2182 * @DP_CONTEXT_TYPE_PDEV: Context type DP PDEV
2183 * @DP_CONTEXT_TYPE_VDEV: Context type DP VDEV
2184 * @DP_CONTEXT_TYPE_PEER: Context type DP PEER
2185 * @DP_CONTEXT_TYPE_MON_SOC: Context type DP MON SOC
2186 * @DP_CONTEXT_TYPE_MON_PDEV: Context type DP MON PDEV
2187 *
2188 * Helper enums to be used to retrieve the size of the corresponding
2189 * data structure by passing the type.
2190 */
2191 enum dp_context_type {
2192 DP_CONTEXT_TYPE_SOC,
2193 DP_CONTEXT_TYPE_PDEV,
2194 DP_CONTEXT_TYPE_VDEV,
2195 DP_CONTEXT_TYPE_PEER,
2196 DP_CONTEXT_TYPE_MON_SOC,
2197 DP_CONTEXT_TYPE_MON_PDEV
2198 };
2199
2200 /**
2201 * struct dp_arch_ops - DP target specific arch ops
2202 * @txrx_soc_attach:
2203 * @txrx_soc_detach:
2204 * @txrx_soc_init:
2205 * @txrx_soc_deinit:
2206 * @txrx_soc_srng_alloc:
2207 * @txrx_soc_srng_init:
2208 * @txrx_soc_srng_deinit:
2209 * @txrx_soc_srng_free:
2210 * @txrx_pdev_attach:
2211 * @txrx_pdev_detach:
2212 * @txrx_vdev_attach:
2213 * @txrx_vdev_detach:
2214 * @txrx_peer_map_attach:
2215 * @txrx_peer_map_detach:
2216 * @dp_rxdma_ring_sel_cfg:
2217 * @soc_cfg_attach:
2218 * @txrx_peer_setup:
2219 * @peer_get_reo_hash:
2220 * @reo_remap_config:
2221 * @tx_hw_enqueue: enqueue TX data to HW
2222 * @tx_comp_get_params_from_hal_desc: get software tx descriptor and release
2223 * source from HAL desc for wbm release ring
2224 * @dp_tx_mlo_mcast_send: Tx send handler for MLO multicast enhance
2225 * @dp_tx_process_htt_completion:
2226 * @dp_rx_process:
2227 * @dp_tx_send_fast:
2228 * @dp_tx_desc_pool_init:
2229 * @dp_tx_desc_pool_deinit:
2230 * @dp_rx_desc_pool_init:
2231 * @dp_rx_desc_pool_deinit:
2232 * @dp_wbm_get_rx_desc_from_hal_desc:
2233 * @dp_rx_intrabss_mcast_handler:
2234 * @dp_rx_word_mask_subscribe:
2235 * @dp_rx_desc_cookie_2_va:
2236 * @dp_service_near_full_srngs: Handler for servicing the near full IRQ
2237 * @tx_implicit_rbm_set:
2238 * @dp_rx_peer_metadata_peer_id_get:
2239 * @dp_rx_chain_msdus:
2240 * @txrx_set_vdev_param: target specific ops while setting vdev params
2241 * @txrx_get_vdev_mcast_param: target specific ops for getting vdev
2242 * params related to multicast
2243 * @txrx_get_context_size:
2244 * @txrx_get_mon_context_size:
2245 * @dp_srng_test_and_update_nf_params: Check if the srng is in near full state
2246 * and set the near-full params.
2247 * @dp_tx_mcast_handler:
2248 * @dp_rx_mcast_handler:
2249 * @dp_tx_is_mcast_primary:
2250 * @dp_soc_get_by_idle_bm_id:
2251 * @mlo_peer_find_hash_detach:
2252 * @mlo_peer_find_hash_attach:
2253 * @mlo_peer_find_hash_add:
2254 * @mlo_peer_find_hash_remove:
2255 * @mlo_peer_find_hash_find:
2256 * @mlo_get_chip_id: get the MLO chip id
2257 * @mlo_link_peer_find_hash_find_by_chip_id: return the link peer on the chip
2258 * @get_hw_link_id:
2259 * @dp_rx_peer_set_link_id: set link id in nbuf cb
2260 * @get_reo_qdesc_addr:
2261 * @get_rx_hash_key:
2262 * @dp_set_rx_fst:
2263 * @dp_get_rx_fst:
2264 * @dp_rx_fst_deref:
2265 * @dp_rx_fst_ref:
2266 * @txrx_print_peer_stats:
2267 * @dp_peer_rx_reorder_queue_setup: Dp peer reorder queue setup
2268 * @dp_bank_reconfig:
2269 * @dp_get_soc_by_chip_id: Get soc by chip id
2270 * @dp_soc_get_num_soc:
2271 * @dp_reconfig_tx_vdev_mcast_ctrl:
2272 * @dp_cc_reg_cfg_init:
2273 * @dp_tx_compute_hw_delay:
2274 * @print_mlo_ast_stats:
2275 * @dp_partner_chips_map:
2276 * @dp_partner_chips_unmap:
2277 * @ipa_get_bank_id: Get TCL bank id used by IPA
2278 * @ipa_get_wdi_ver: Get WDI version
2279 * @dp_txrx_ppeds_rings_status:
2280 * @dp_tx_ppeds_inuse_desc:
2281 * @dp_ppeds_clear_stats: Clear ppeds related stats
2282 * @dp_tx_ppeds_cfg_astidx_cache_mapping:
2283 * @dp_txrx_ppeds_rings_stats: Printing the util stats of ring
2284 * @dp_txrx_ppeds_clear_rings_stats: Clearing the ring util stats
2285 * @txrx_soc_ppeds_start:
2286 * @txrx_soc_ppeds_stop:
2287 * @dp_register_ppeds_interrupts:
2288 * @dp_free_ppeds_interrupts:
2289 * @dp_rx_wbm_err_reap_desc: Reap WBM Error Ring Descriptor
2290 * @dp_rx_null_q_desc_handle: Handle Null Queue Exception Error
2291 * @dp_tx_desc_pool_alloc: Allocate arch specific TX descriptor pool
2292 * @dp_tx_desc_pool_free: Free arch specific TX descriptor pool
2293 * @txrx_srng_init: Init txrx srng
2294 * @dp_get_vdev_stats_for_unmap_peer: Get vdev stats pointer for unmap peer
2295 * @dp_get_interface_stats: Get interface stats
2296 * @ppeds_handle_attached:
2297 * @txrx_soc_ppeds_interrupt_stop:
2298 * @txrx_soc_ppeds_interrupt_start:
2299 * @txrx_soc_ppeds_service_status_update:
2300 * @txrx_soc_ppeds_enabled_check:
2301 * @txrx_soc_ppeds_txdesc_pool_reset:
2302 * @dp_update_ring_hptp: Update rings hptp during suspend/resume
2303 * @dp_get_fst_cmem_base: Get CMEM base address for FISA
2304 * @dp_flush_tx_ring: Flush TCL ring HP
2305 * @dp_mlo_print_ptnr_info: print partner vdev info
2306 * @dp_soc_interrupt_attach: DP interrupt attach
2307 * @dp_soc_attach_poll: DP poll attach
2308 * @dp_soc_interrupt_detach: DP interrupt detach
2309 * @dp_service_srngs: Service DP interrupts
2310 */
2311 struct dp_arch_ops {
2312 /* INIT/DEINIT Arch Ops */
2313 QDF_STATUS (*txrx_soc_attach)(struct dp_soc *soc,
2314 struct cdp_soc_attach_params *params);
2315 QDF_STATUS (*txrx_soc_detach)(struct dp_soc *soc);
2316 void* (*txrx_soc_init)(struct dp_soc *soc, HTC_HANDLE htc_handle,
2317 struct hif_opaque_softc *hif_handle);
2318 QDF_STATUS (*txrx_soc_deinit)(struct dp_soc *soc);
2319 QDF_STATUS (*txrx_soc_srng_alloc)(struct dp_soc *soc);
2320 QDF_STATUS (*txrx_soc_srng_init)(struct dp_soc *soc);
2321 void (*txrx_soc_srng_deinit)(struct dp_soc *soc);
2322 void (*txrx_soc_srng_free)(struct dp_soc *soc);
2323 QDF_STATUS (*txrx_pdev_attach)(struct dp_pdev *pdev,
2324 struct cdp_pdev_attach_params *params);
2325 QDF_STATUS (*txrx_pdev_detach)(struct dp_pdev *pdev);
2326 QDF_STATUS (*txrx_vdev_attach)(struct dp_soc *soc,
2327 struct dp_vdev *vdev);
2328 QDF_STATUS (*txrx_vdev_detach)(struct dp_soc *soc,
2329 struct dp_vdev *vdev);
2330 QDF_STATUS (*txrx_peer_map_attach)(struct dp_soc *soc);
2331 void (*txrx_peer_map_detach)(struct dp_soc *soc);
2332 QDF_STATUS (*dp_rxdma_ring_sel_cfg)(struct dp_soc *soc);
2333 void (*soc_cfg_attach)(struct dp_soc *soc);
2334 QDF_STATUS (*txrx_peer_setup)(struct cdp_soc_t *soc_hdl,
2335 uint8_t vdev_id, uint8_t *peer_mac,
2336 struct cdp_peer_setup_info *setup_info);
2337 void (*peer_get_reo_hash)(struct dp_vdev *vdev,
2338 struct cdp_peer_setup_info *setup_info,
2339 enum cdp_host_reo_dest_ring *reo_dest,
2340 bool *hash_based,
2341 uint8_t *lmac_peer_id_msb);
2342 bool (*reo_remap_config)(struct dp_soc *soc, uint32_t *remap0,
2343 uint32_t *remap1, uint32_t *remap2);
2344
2345 /* TX RX Arch Ops */
2346 QDF_STATUS (*tx_hw_enqueue)(struct dp_soc *soc, struct dp_vdev *vdev,
2347 struct dp_tx_desc_s *tx_desc,
2348 uint16_t fw_metadata,
2349 struct cdp_tx_exception_metadata *metadata,
2350 struct dp_tx_msdu_info_s *msdu_info);
2351
2352 QDF_STATUS (*tx_comp_get_params_from_hal_desc)(
2353 struct dp_soc *soc, void *tx_comp_hal_desc,
2354 struct dp_tx_desc_s **desc);
2355
2356 qdf_nbuf_t (*dp_tx_mlo_mcast_send)(struct dp_soc *soc,
2357 struct dp_vdev *vdev,
2358 qdf_nbuf_t nbuf,
2359 struct cdp_tx_exception_metadata
2360 *tx_exc_metadata);
2361
2362 void (*dp_tx_process_htt_completion)(struct dp_soc *soc,
2363 struct dp_tx_desc_s *tx_desc,
2364 uint8_t *status,
2365 uint8_t ring_id);
2366
2367 uint32_t (*dp_rx_process)(struct dp_intr *int_ctx,
2368 hal_ring_handle_t hal_ring_hdl,
2369 uint8_t reo_ring_num, uint32_t quota);
2370
2371 qdf_nbuf_t (*dp_tx_send_fast)(struct cdp_soc_t *soc_hdl,
2372 uint8_t vdev_id,
2373 qdf_nbuf_t nbuf);
2374
2375 QDF_STATUS (*dp_tx_desc_pool_init)(struct dp_soc *soc,
2376 uint32_t num_elem,
2377 uint8_t pool_id,
2378 bool spcl_tx_desc);
2379 void (*dp_tx_desc_pool_deinit)(
2380 struct dp_soc *soc,
2381 struct dp_tx_desc_pool_s *tx_desc_pool,
2382 uint8_t pool_id,
2383 bool spcl_tx_desc);
2384
2385 QDF_STATUS (*dp_rx_desc_pool_init)(struct dp_soc *soc,
2386 struct rx_desc_pool *rx_desc_pool,
2387 uint32_t pool_id);
2388 void (*dp_rx_desc_pool_deinit)(struct dp_soc *soc,
2389 struct rx_desc_pool *rx_desc_pool,
2390 uint32_t pool_id);
2391
2392 QDF_STATUS (*dp_wbm_get_rx_desc_from_hal_desc)(
2393 struct dp_soc *soc,
2394 void *ring_desc,
2395 struct dp_rx_desc **r_rx_desc);
2396
2397 bool
2398 (*dp_rx_intrabss_mcast_handler)(struct dp_soc *soc,
2399 struct dp_txrx_peer *ta_txrx_peer,
2400 qdf_nbuf_t nbuf_copy,
2401 struct cdp_tid_rx_stats *tid_stats,
2402 uint8_t link_id);
2403
2404 void (*dp_rx_word_mask_subscribe)(
2405 struct dp_soc *soc,
2406 uint32_t *msg_word,
2407 void *rx_filter);
2408
2409 struct dp_rx_desc *(*dp_rx_desc_cookie_2_va)(struct dp_soc *soc,
2410 uint32_t cookie);
2411 uint32_t (*dp_service_near_full_srngs)(struct dp_soc *soc,
2412 struct dp_intr *int_ctx,
2413 uint32_t dp_budget);
2414 void (*tx_implicit_rbm_set)(struct dp_soc *soc, uint8_t tx_ring_id,
2415 uint8_t bm_id);
2416 uint16_t (*dp_rx_peer_metadata_peer_id_get)(struct dp_soc *soc,
2417 uint32_t peer_metadata);
2418 bool (*dp_rx_chain_msdus)(struct dp_soc *soc, qdf_nbuf_t nbuf,
2419 uint8_t *rx_tlv_hdr, uint8_t mac_id);
2420 /* Control Arch Ops */
2421 QDF_STATUS (*txrx_set_vdev_param)(struct dp_soc *soc,
2422 struct dp_vdev *vdev,
2423 enum cdp_vdev_param_type param,
2424 cdp_config_param_type val);
2425
2426 QDF_STATUS (*txrx_get_vdev_mcast_param)(struct dp_soc *soc,
2427 struct dp_vdev *vdev,
2428 cdp_config_param_type *val);
2429
2430 /* Misc Arch Ops */
2431 qdf_size_t (*txrx_get_context_size)(enum dp_context_type);
2432 #ifdef WIFI_MONITOR_SUPPORT
2433 qdf_size_t (*txrx_get_mon_context_size)(enum dp_context_type);
2434 #endif
2435 int (*dp_srng_test_and_update_nf_params)(struct dp_soc *soc,
2436 struct dp_srng *dp_srng,
2437 int *max_reap_limit);
2438
2439 /* MLO ops */
2440 #ifdef WLAN_FEATURE_11BE_MLO
2441 #ifdef WLAN_MCAST_MLO
2442 void (*dp_tx_mcast_handler)(struct dp_soc *soc, struct dp_vdev *vdev,
2443 qdf_nbuf_t nbuf);
2444 bool (*dp_rx_mcast_handler)(struct dp_soc *soc, struct dp_vdev *vdev,
2445 struct dp_txrx_peer *peer, qdf_nbuf_t nbuf,
2446 uint8_t link_id);
2447 bool (*dp_tx_is_mcast_primary)(struct dp_soc *soc,
2448 struct dp_vdev *vdev);
2449 #endif
2450 struct dp_soc * (*dp_soc_get_by_idle_bm_id)(struct dp_soc *soc,
2451 uint8_t bm_id);
2452
2453 void (*mlo_peer_find_hash_detach)(struct dp_soc *soc);
2454 QDF_STATUS (*mlo_peer_find_hash_attach)(struct dp_soc *soc);
2455 void (*mlo_peer_find_hash_add)(struct dp_soc *soc,
2456 struct dp_peer *peer);
2457 void (*mlo_peer_find_hash_remove)(struct dp_soc *soc,
2458 struct dp_peer *peer);
2459
2460 struct dp_peer *(*mlo_peer_find_hash_find)(struct dp_soc *soc,
2461 uint8_t *peer_mac_addr,
2462 int mac_addr_is_aligned,
2463 enum dp_mod_id mod_id,
2464 uint8_t vdev_id);
2465 #ifdef WLAN_MLO_MULTI_CHIP
2466 uint8_t (*mlo_get_chip_id)(struct dp_soc *soc);
2467 struct dp_peer *(*mlo_link_peer_find_hash_find_by_chip_id)
2468 (struct dp_soc *soc,
2469 uint8_t *peer_mac_addr,
2470 int mac_addr_is_aligned,
2471 uint8_t vdev_id,
2472 uint8_t chip_id,
2473 enum dp_mod_id mod_id);
2474 #endif
2475 #endif
2476 uint8_t (*get_hw_link_id)(struct dp_pdev *pdev);
2477 void (*dp_rx_peer_set_link_id)(qdf_nbuf_t nbuf, uint32_t peer_mdata);
2478 uint64_t (*get_reo_qdesc_addr)(hal_soc_handle_t hal_soc_hdl,
2479 uint8_t *dst_ring_desc,
2480 uint8_t *buf,
2481 struct dp_txrx_peer *peer,
2482 unsigned int tid);
2483 void (*get_rx_hash_key)(struct dp_soc *soc,
2484 struct cdp_lro_hash_config *lro_hash);
2485 void (*dp_set_rx_fst)(struct dp_rx_fst *fst);
2486 struct dp_rx_fst *(*dp_get_rx_fst)(void);
2487 uint32_t (*dp_rx_fst_deref)(void);
2488 void (*dp_rx_fst_ref)(void);
2489 void (*txrx_print_peer_stats)(struct cdp_peer_stats *peer_stats,
2490 enum peer_stats_type stats_type);
2491 QDF_STATUS (*dp_peer_rx_reorder_queue_setup)(struct dp_soc *soc,
2492 struct dp_peer *peer,
2493 uint32_t tid_bitmap,
2494 uint32_t ba_window_size);
2495 void (*dp_bank_reconfig)(struct dp_soc *soc, struct dp_vdev *vdev);
2496
2497 struct dp_soc * (*dp_get_soc_by_chip_id)(struct dp_soc *soc,
2498 uint8_t chip_id);
2499
2500 uint8_t (*dp_soc_get_num_soc)(struct dp_soc *soc);
2501 void (*dp_reconfig_tx_vdev_mcast_ctrl)(struct dp_soc *soc,
2502 struct dp_vdev *vdev);
2503
2504 void (*dp_cc_reg_cfg_init)(struct dp_soc *soc, bool is_4k_align);
2505
2506 QDF_STATUS
2507 (*dp_tx_compute_hw_delay)(struct dp_soc *soc,
2508 struct dp_vdev *vdev,
2509 struct hal_tx_completion_status *ts,
2510 uint32_t *delay_us);
2511 void (*print_mlo_ast_stats)(struct dp_soc *soc);
2512 void (*dp_partner_chips_map)(struct dp_soc *soc,
2513 struct dp_peer *peer,
2514 uint16_t peer_id);
2515 void (*dp_partner_chips_unmap)(struct dp_soc *soc,
2516 uint16_t peer_id);
2517
2518 #ifdef IPA_OFFLOAD
2519 int8_t (*ipa_get_bank_id)(struct dp_soc *soc);
2520 void (*ipa_get_wdi_ver)(uint8_t *wdi_ver);
2521 #endif
2522 #ifdef WLAN_SUPPORT_PPEDS
2523 void (*dp_txrx_ppeds_rings_status)(struct dp_soc *soc);
2524 void (*dp_tx_ppeds_inuse_desc)(struct dp_soc *soc);
2525 void (*dp_ppeds_clear_stats)(struct dp_soc *soc);
2526 void (*dp_tx_ppeds_cfg_astidx_cache_mapping)(struct dp_soc *soc,
2527 struct dp_vdev *vdev,
2528 bool peer_map);
2529 void (*dp_txrx_ppeds_rings_stats)(struct dp_soc *soc);
2530 void (*dp_txrx_ppeds_clear_rings_stats)(struct dp_soc *soc);
2531 #endif
2532 bool (*ppeds_handle_attached)(struct dp_soc *soc);
2533 QDF_STATUS (*txrx_soc_ppeds_start)(struct dp_soc *soc);
2534 void (*txrx_soc_ppeds_stop)(struct dp_soc *soc);
2535 int (*dp_register_ppeds_interrupts)(struct dp_soc *soc,
2536 struct dp_srng *srng, int vector,
2537 int ring_type, int ring_num);
2538 void (*dp_free_ppeds_interrupts)(struct dp_soc *soc,
2539 struct dp_srng *srng, int ring_type,
2540 int ring_num);
2541 qdf_nbuf_t (*dp_rx_wbm_err_reap_desc)(struct dp_intr *int_ctx,
2542 struct dp_soc *soc,
2543 hal_ring_handle_t hal_ring_hdl,
2544 uint32_t quota,
2545 uint32_t *rx_bufs_used);
2546 QDF_STATUS (*dp_rx_null_q_desc_handle)(struct dp_soc *soc,
2547 qdf_nbuf_t nbuf,
2548 uint8_t *rx_tlv_hdr,
2549 uint8_t pool_id,
2550 struct dp_txrx_peer *txrx_peer,
2551 bool is_reo_exception,
2552 uint8_t link_id);
2553
2554 QDF_STATUS (*dp_tx_desc_pool_alloc)(struct dp_soc *soc,
2555 uint32_t num_elem,
2556 uint8_t pool_id);
2557 void (*dp_tx_desc_pool_free)(struct dp_soc *soc, uint8_t pool_id);
2558
2559 QDF_STATUS (*txrx_srng_init)(struct dp_soc *soc, struct dp_srng *srng,
2560 int ring_type, int ring_num, int mac_id);
2561
2562 void (*dp_get_vdev_stats_for_unmap_peer)(
2563 struct dp_vdev *vdev,
2564 struct dp_peer *peer);
2565 QDF_STATUS (*dp_get_interface_stats)(struct cdp_soc_t *soc_hdl,
2566 uint8_t vdev_id,
2567 void *buf,
2568 bool is_aggregate);
2569 #ifdef WLAN_SUPPORT_PPEDS
2570 void (*txrx_soc_ppeds_interrupt_stop)(struct dp_soc *soc);
2571 void (*txrx_soc_ppeds_interrupt_start)(struct dp_soc *soc);
2572 void (*txrx_soc_ppeds_service_status_update)(struct dp_soc *soc,
2573 bool enable);
2574 bool (*txrx_soc_ppeds_enabled_check)(struct dp_soc *soc);
2575 void (*txrx_soc_ppeds_txdesc_pool_reset)(struct dp_soc *soc,
2576 qdf_nbuf_t *nbuf_list);
2577 #endif
2578 void (*dp_update_ring_hptp)(struct dp_soc *soc, bool force_flush_tx);
2579 uint64_t (*dp_get_fst_cmem_base)(struct dp_soc *soc, uint64_t size);
2580 int (*dp_flush_tx_ring)(struct dp_pdev *pdev, int ring_id);
2581 void (*dp_mlo_print_ptnr_info)(struct dp_vdev *vdev);
2582 QDF_STATUS (*dp_soc_interrupt_attach)(struct cdp_soc_t *txrx_soc);
2583 QDF_STATUS (*dp_soc_attach_poll)(struct cdp_soc_t *txrx_soc);
2584 void (*dp_soc_interrupt_detach)(struct cdp_soc_t *txrx_soc);
2585 uint32_t (*dp_service_srngs)(void *dp_ctx, uint32_t dp_budget, int cpu);
2586 };
2587
2588 /**
2589 * struct dp_soc_features: Data structure holding the SOC level feature flags.
2590 * @pn_in_reo_dest: PN provided by hardware in the REO destination ring.
2591 * @dmac_cmn_src_rxbuf_ring_enabled: Flag to indicate DMAC mode common Rx
2592 * buffer source rings
2593 * @rssi_dbm_conv_support: Rssi dbm conversion support param.
2594 * @umac_hw_reset_support: UMAC HW reset support
2595 * @wds_ext_ast_override_enable:
2596 * @multi_rx_reorder_q_setup_support: multi rx reorder q setup at a time support
2597 */
2598 struct dp_soc_features {
2599 uint8_t pn_in_reo_dest:1,
2600 dmac_cmn_src_rxbuf_ring_enabled:1;
2601 bool rssi_dbm_conv_support;
2602 bool umac_hw_reset_support;
2603 bool wds_ext_ast_override_enable;
2604 bool multi_rx_reorder_q_setup_support;
2605 };
2606
2607 enum sysfs_printing_mode {
2608 PRINTING_MODE_DISABLED = 0,
2609 PRINTING_MODE_ENABLED
2610 };
2611
2612 /**
2613 * typedef notify_pre_reset_fw_callback() - pre-reset callback
2614 * @soc: DP SoC
2615 */
2616 typedef void (*notify_pre_reset_fw_callback)(struct dp_soc *soc);
2617
2618 #ifdef WLAN_SYSFS_DP_STATS
2619 /**
2620 * struct sysfs_stats_config: Data structure holding stats sysfs config.
2621 * @rw_stats_lock: Lock to read and write to stat_type and pdev_id.
2622 * @sysfs_read_lock: Lock held while another stat req is being executed.
2623 * @sysfs_write_user_buffer: Lock to change buff len, max buf len
2624 * and *buf.
2625 * @sysfs_txrx_fw_request_done: Event to wait for firmware response.
2626 * @stat_type_requested: stat type requested.
2627 * @mac_id: mac id for which stat type are requested.
2628 * @printing_mode: Should a print go through.
2629 * @process_id: Process allowed to write to buffer.
2630 * @curr_buffer_length: Curr length of buffer written
2631 * @max_buffer_length: Max buffer length.
2632 * @buf: Sysfs buffer.
2633 */
2634 struct sysfs_stats_config {
2635 /* lock held to read stats */
2636 qdf_spinlock_t rw_stats_lock;
2637 qdf_mutex_t sysfs_read_lock;
2638 qdf_spinlock_t sysfs_write_user_buffer;
2639 qdf_event_t sysfs_txrx_fw_request_done;
2640 uint32_t stat_type_requested;
2641 uint32_t mac_id;
2642 enum sysfs_printing_mode printing_mode;
2643 int process_id;
2644 uint16_t curr_buffer_length;
2645 uint16_t max_buffer_length;
2646 char *buf;
2647 };
2648 #endif
2649
2650 struct test_mem_free {
2651 unsigned long ts_qdesc_mem_hdl;
2652 qdf_dma_addr_t hw_qdesc_paddr;
2653 void *hw_qdesc_vaddr_align;
2654 void *hw_qdesc_vaddr_unalign;
2655 uint32_t peer_id;
2656 uint32_t tid;
2657 uint8_t chip_id;
2658 unsigned long ts_hw_flush_back;
2659 };
2660
2661 struct test_qaddr_del {
2662 unsigned long ts_qaddr_del;
2663 uint32_t peer_id;
2664 uint32_t paddr;
2665 uint32_t tid;
2666 uint8_t chip_id;
2667 };
2668
2669 #ifdef DP_RX_MSDU_DONE_FAIL_HISTORY
2670
2671 #define DP_MSDU_DONE_FAIL_HIST_MAX 128
2672
2673 struct dp_msdu_done_fail_entry {
2674 qdf_dma_addr_t paddr;
2675 uint32_t sw_cookie;
2676 };
2677
2678 struct dp_msdu_done_fail_history {
2679 qdf_atomic_t index;
2680 struct dp_msdu_done_fail_entry entry[DP_MSDU_DONE_FAIL_HIST_MAX];
2681 };
2682 #endif
2683
2684 #ifdef DP_RX_PEEK_MSDU_DONE_WAR
2685 #define DP_MSDU_DONE_FAIL_DESCS_MAX 64
2686
2687 struct dp_rx_msdu_done_fail_desc_list {
2688 qdf_atomic_t index;
2689 struct dp_rx_desc *msdu_done_fail_descs[DP_MSDU_DONE_FAIL_DESCS_MAX];
2690 };
2691 #endif
2692
2693 /* SOC level structure for data path */
2694 struct dp_soc {
2695 /**
2696 * re-use memory section starts
2697 */
2698
2699 /* Common base structure - Should be the first member */
2700 struct cdp_soc_t cdp_soc;
2701
2702 /* SoC Obj */
2703 struct cdp_ctrl_objmgr_psoc *ctrl_psoc;
2704
2705 /* OS device abstraction */
2706 qdf_device_t osdev;
2707
2708 /*cce disable*/
2709 bool cce_disable;
2710
2711 /* WLAN config context */
2712 struct wlan_cfg_dp_soc_ctxt *wlan_cfg_ctx;
2713
2714 /* HTT handle for host-fw interaction */
2715 struct htt_soc *htt_handle;
2716
2717 /* Commint init done */
2718 qdf_atomic_t cmn_init_done;
2719
2720 /* Opaque hif handle */
2721 struct hif_opaque_softc *hif_handle;
2722
2723 /* PDEVs on this SOC */
2724 struct dp_pdev *pdev_list[MAX_PDEV_CNT];
2725
2726 /* Ring used to replenish rx buffers (maybe to the firmware of MAC) */
2727 struct dp_srng rx_refill_buf_ring[MAX_PDEV_CNT];
2728
2729 struct dp_srng rxdma_mon_desc_ring[MAX_NUM_LMAC_HW];
2730
2731 /* RXDMA error destination ring */
2732 struct dp_srng rxdma_err_dst_ring[MAX_NUM_LMAC_HW];
2733
2734 /* RXDMA monitor buffer replenish ring */
2735 struct dp_srng rxdma_mon_buf_ring[MAX_NUM_LMAC_HW];
2736
2737 /* RXDMA monitor destination ring */
2738 struct dp_srng rxdma_mon_dst_ring[MAX_NUM_LMAC_HW];
2739
2740 /* RXDMA monitor status ring. TBD: Check format of this ring */
2741 struct dp_srng rxdma_mon_status_ring[MAX_NUM_LMAC_HW];
2742
2743 /* Ring to handover links to hw in monitor mode for SOFTUMAC arch */
2744 struct dp_srng sw2rxdma_link_ring[MAX_NUM_LMAC_HW];
2745
2746 /* Number of PDEVs */
2747 uint8_t pdev_count;
2748
2749 /*ast override support in HW*/
2750 bool ast_override_support;
2751
2752 /*number of hw dscp tid map*/
2753 uint8_t num_hw_dscp_tid_map;
2754
2755 /* HAL SOC handle */
2756 hal_soc_handle_t hal_soc;
2757
2758 /* rx monitor pkt tlv size */
2759 uint16_t rx_mon_pkt_tlv_size;
2760 /* rx pkt tlv size */
2761 uint16_t rx_pkt_tlv_size;
2762 /* rx pkt tlv size in current operation mode */
2763 uint16_t curr_rx_pkt_tlv_size;
2764
2765 /* enable/disable dp debug logs */
2766 bool dp_debug_log_en;
2767
2768 struct dp_arch_ops arch_ops;
2769
2770 /* Device ID coming from Bus sub-system */
2771 uint32_t device_id;
2772
2773 /* Link descriptor pages */
2774 struct qdf_mem_multi_page_t link_desc_pages;
2775
2776 /* total link descriptors for regular RX and TX */
2777 uint32_t total_link_descs;
2778
2779 /* Link descriptor Idle list for HW internal use (SRNG mode) */
2780 struct dp_srng wbm_idle_link_ring;
2781
2782 /* Link descriptor Idle list for HW internal use (scatter buffer mode)
2783 */
2784 qdf_dma_addr_t wbm_idle_scatter_buf_base_paddr[MAX_IDLE_SCATTER_BUFS];
2785 void *wbm_idle_scatter_buf_base_vaddr[MAX_IDLE_SCATTER_BUFS];
2786 uint32_t num_scatter_bufs;
2787
2788 /* Tx SW descriptor pool */
2789 struct dp_tx_desc_pool_s tx_desc[MAX_TXDESC_POOLS];
2790
2791 /* Tx MSDU Extension descriptor pool */
2792 struct dp_tx_ext_desc_pool_s tx_ext_desc[MAX_TXDESC_POOLS];
2793
2794 /* Tx TSO descriptor pool */
2795 struct dp_tx_tso_seg_pool_s tx_tso_desc[MAX_TXDESC_POOLS];
2796
2797 /* Tx TSO Num of segments pool */
2798 struct dp_tx_tso_num_seg_pool_s tx_tso_num_seg[MAX_TXDESC_POOLS];
2799
2800 /* REO destination rings */
2801 struct dp_srng reo_dest_ring[MAX_REO_DEST_RINGS];
2802
2803 /* REO exception ring - See if should combine this with reo_dest_ring */
2804 struct dp_srng reo_exception_ring;
2805
2806 /* REO reinjection ring */
2807 struct dp_srng reo_reinject_ring;
2808
2809 /* REO command ring */
2810 struct dp_srng reo_cmd_ring;
2811
2812 /* REO command status ring */
2813 struct dp_srng reo_status_ring;
2814
2815 /* WBM Rx release ring */
2816 struct dp_srng rx_rel_ring;
2817
2818 /* TCL data ring */
2819 struct dp_srng tcl_data_ring[MAX_TCL_DATA_RINGS];
2820
2821 /* Number of Tx comp rings */
2822 uint8_t num_tx_comp_rings;
2823
2824 /* Number of TCL data rings */
2825 uint8_t num_tcl_data_rings;
2826
2827 /* TCL CMD_CREDIT ring */
2828 bool init_tcl_cmd_cred_ring;
2829
2830 /* It is used as credit based ring on QCN9000 else command ring */
2831 struct dp_srng tcl_cmd_credit_ring;
2832
2833 /* TCL command status ring */
2834 struct dp_srng tcl_status_ring;
2835
2836 /* WBM Tx completion rings */
2837 struct dp_srng tx_comp_ring[MAX_TCL_DATA_RINGS];
2838
2839 /* Common WBM link descriptor release ring (SW to WBM) */
2840 struct dp_srng wbm_desc_rel_ring;
2841
2842 /* DP Interrupts */
2843 struct dp_intr intr_ctx[WLAN_CFG_INT_NUM_CONTEXTS];
2844
2845 /* Monitor mode mac id to dp_intr_id map */
2846 int mon_intr_id_lmac_map[MAX_NUM_LMAC_HW];
2847 /* Rx SW descriptor pool for RXDMA monitor buffer */
2848 struct rx_desc_pool rx_desc_mon[MAX_RXDESC_POOLS];
2849
2850 /* Rx SW descriptor pool for RXDMA status buffer */
2851 struct rx_desc_pool rx_desc_status[MAX_RXDESC_POOLS];
2852
2853 /* Rx SW descriptor pool for RXDMA buffer */
2854 struct rx_desc_pool rx_desc_buf[MAX_RXDESC_POOLS];
2855
2856 /* Number of REO destination rings */
2857 uint8_t num_reo_dest_rings;
2858
2859 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
2860 /* lock to control access to soc TX descriptors */
2861 qdf_spinlock_t flow_pool_array_lock;
2862
2863 /* pause callback to pause TX queues as per flow control */
2864 tx_pause_callback pause_cb;
2865
2866 /* flow pool related statistics */
2867 struct dp_txrx_pool_stats pool_stats;
2868 #endif /* !QCA_LL_TX_FLOW_CONTROL_V2 */
2869
2870 notify_pre_reset_fw_callback notify_fw_callback;
2871
2872 unsigned long service_rings_running;
2873
2874 uint32_t wbm_idle_scatter_buf_size;
2875
2876 /* VDEVs on this SOC */
2877 struct dp_vdev *vdev_id_map[MAX_VDEV_CNT];
2878
2879 uint8_t hw_txrx_stats_en:1;
2880
2881 /* Tx H/W queues lock */
2882 qdf_spinlock_t tx_queue_lock[MAX_TX_HW_QUEUES];
2883
2884 /* Tx ring map for interrupt processing */
2885 uint8_t tx_ring_map[WLAN_CFG_INT_NUM_CONTEXTS];
2886
2887 /* Rx ring map for interrupt processing */
2888 uint8_t rx_ring_map[WLAN_CFG_INT_NUM_CONTEXTS];
2889
2890 /* peer ID to peer object map (array of pointers to peer objects) */
2891 struct dp_peer **peer_id_to_obj_map;
2892
2893 struct {
2894 unsigned mask;
2895 unsigned idx_bits;
2896 TAILQ_HEAD(, dp_peer) * bins;
2897 } peer_hash;
2898
2899 /* rx defrag state – TBD: do we need this per radio? */
2900 struct {
2901 struct {
2902 TAILQ_HEAD(, dp_rx_tid_defrag) waitlist;
2903 uint32_t timeout_ms;
2904 uint32_t next_flush_ms;
2905 qdf_spinlock_t defrag_lock;
2906 } defrag;
2907 struct {
2908 int defrag_timeout_check;
2909 int dup_check;
2910 } flags;
2911 TAILQ_HEAD(, dp_reo_cmd_info) reo_cmd_list;
2912 qdf_spinlock_t reo_cmd_lock;
2913 } rx;
2914
2915 /* optional rx processing function */
2916 void (*rx_opt_proc)(
2917 struct dp_vdev *vdev,
2918 struct dp_peer *peer,
2919 unsigned tid,
2920 qdf_nbuf_t msdu_list);
2921
2922 /* pool addr for mcast enhance buff */
2923 struct {
2924 int size;
2925 uint32_t paddr;
2926 uint32_t *vaddr;
2927 struct dp_tx_me_buf_t *freelist;
2928 int buf_in_use;
2929 qdf_dma_mem_context(memctx);
2930 } me_buf;
2931
2932 /* Protect peer hash table */
2933 DP_MUTEX_TYPE peer_hash_lock;
2934 /* Protect peer_id_to_objmap */
2935 DP_MUTEX_TYPE peer_map_lock;
2936
2937 /* maximum number of suppoerted peers */
2938 uint32_t max_peers;
2939 /* maximum value for peer_id */
2940 uint32_t max_peer_id;
2941
2942 #ifdef DP_USE_REDUCED_PEER_ID_FIELD_WIDTH
2943 uint32_t peer_id_shift;
2944 uint32_t peer_id_mask;
2945 #endif
2946
2947 /* rx peer metadata field shift and mask configuration */
2948 uint8_t htt_peer_id_s;
2949 uint32_t htt_peer_id_m;
2950 uint8_t htt_vdev_id_s;
2951 uint32_t htt_vdev_id_m;
2952 uint8_t htt_mld_peer_valid_s;
2953 uint32_t htt_mld_peer_valid_m;
2954 /* rx peer metadata version */
2955 uint8_t rx_peer_metadata_ver;
2956
2957 /* SoC level data path statistics */
2958 struct dp_soc_stats stats;
2959 #ifdef WLAN_SYSFS_DP_STATS
2960 /* sysfs config for DP stats */
2961 struct sysfs_stats_config *sysfs_config;
2962 #endif
2963 /* timestamp to keep track of msdu buffers received on reo err ring */
2964 uint64_t rx_route_err_start_pkt_ts;
2965
2966 /* Num RX Route err in a given window to keep track of rate of errors */
2967 uint32_t rx_route_err_in_window;
2968
2969 /* Enable processing of Tx completion status words */
2970 bool process_tx_status;
2971 bool process_rx_status;
2972 struct dp_ast_entry **ast_table;
2973 struct {
2974 unsigned mask;
2975 unsigned idx_bits;
2976 TAILQ_HEAD(, dp_ast_entry) * bins;
2977 } ast_hash;
2978
2979 #ifdef DP_TX_HW_DESC_HISTORY
2980 struct dp_tx_hw_desc_history tx_hw_desc_history;
2981 #endif
2982
2983 #ifdef WLAN_FEATURE_DP_RX_RING_HISTORY
2984 struct dp_rx_history *rx_ring_history[MAX_REO_DEST_RINGS];
2985 struct dp_rx_refill_history *rx_refill_ring_history[MAX_PDEV_CNT];
2986 struct dp_rx_err_history *rx_err_ring_history;
2987 struct dp_rx_reinject_history *rx_reinject_ring_history;
2988 #endif
2989
2990 #ifdef WLAN_FEATURE_DP_MON_STATUS_RING_HISTORY
2991 struct dp_mon_status_ring_history *mon_status_ring_history;
2992 #endif
2993
2994 #ifdef WLAN_FEATURE_DP_TX_DESC_HISTORY
2995 struct dp_tx_tcl_history tx_tcl_history;
2996 struct dp_tx_comp_history tx_comp_history;
2997 #endif
2998
2999 #ifdef WLAN_FEATURE_DP_CFG_EVENT_HISTORY
3000 struct dp_cfg_event_history cfg_event_history;
3001 #endif
3002
3003 qdf_spinlock_t ast_lock;
3004 /*Timer for AST entry ageout maintenance */
3005 qdf_timer_t ast_aging_timer;
3006
3007 /*Timer counter for WDS AST entry ageout*/
3008 uint8_t wds_ast_aging_timer_cnt;
3009 bool pending_ageout;
3010 bool ast_offload_support;
3011 bool host_ast_db_enable;
3012 uint32_t max_ast_ageout_count;
3013 uint8_t eapol_over_control_port;
3014
3015 uint8_t sta_mode_search_policy;
3016 qdf_timer_t lmac_reap_timer;
3017 uint8_t lmac_timer_init;
3018 qdf_timer_t int_timer;
3019 uint8_t intr_mode;
3020 uint8_t lmac_polled_mode;
3021
3022 qdf_list_t reo_desc_freelist;
3023 qdf_spinlock_t reo_desc_freelist_lock;
3024
3025 /* htt stats */
3026 struct htt_t2h_stats htt_stats;
3027
3028 void *external_txrx_handle; /* External data path handle */
3029 qdf_atomic_t ipa_map_allowed;
3030 #ifdef IPA_OFFLOAD
3031 struct ipa_dp_tx_rsc ipa_uc_tx_rsc;
3032 #ifdef IPA_WDI3_TX_TWO_PIPES
3033 /* Resources for the alternative IPA TX pipe */
3034 struct ipa_dp_tx_rsc ipa_uc_tx_rsc_alt;
3035 #endif
3036
3037 struct ipa_dp_rx_rsc ipa_uc_rx_rsc;
3038 #ifdef IPA_WDI3_VLAN_SUPPORT
3039 struct ipa_dp_rx_rsc ipa_uc_rx_rsc_alt;
3040 #endif
3041 qdf_atomic_t ipa_pipes_enabled;
3042 bool ipa_first_tx_db_access;
3043 qdf_spinlock_t ipa_rx_buf_map_lock;
3044 bool ipa_rx_buf_map_lock_initialized;
3045 uint8_t ipa_reo_ctx_lock_required[MAX_REO_DEST_RINGS];
3046 #endif
3047
3048 #ifdef WLAN_FEATURE_STATS_EXT
3049 struct {
3050 uint32_t rx_mpdu_received;
3051 uint32_t rx_mpdu_missed;
3052 } ext_stats;
3053 qdf_event_t rx_hw_stats_event;
3054 qdf_spinlock_t rx_hw_stats_lock;
3055 bool is_last_stats_ctx_init;
3056 struct dp_req_rx_hw_stats_t *rx_hw_stats;
3057 #endif /* WLAN_FEATURE_STATS_EXT */
3058
3059 /* Indicates HTT map/unmap versions*/
3060 uint8_t peer_map_unmap_versions;
3061 /* Per peer per Tid ba window size support */
3062 uint8_t per_tid_basize_max_tid;
3063 /* Soc level flag to enable da_war */
3064 uint8_t da_war_enabled;
3065 /* number of active ast entries */
3066 uint32_t num_ast_entries;
3067 /* peer extended rate statistics context at soc level*/
3068 struct cdp_soc_rate_stats_ctx *rate_stats_ctx;
3069 /* peer extended rate statistics control flag */
3070 bool peerstats_enabled;
3071
3072 /* 8021p PCP-TID map values */
3073 uint8_t pcp_tid_map[PCP_TID_MAP_MAX];
3074 /* TID map priority value */
3075 uint8_t tidmap_prty;
3076 /* Pointer to global per ring type specific configuration table */
3077 struct wlan_srng_cfg *wlan_srng_cfg;
3078 /* Num Tx outstanding on device */
3079 qdf_atomic_t num_tx_outstanding;
3080 /* Num Tx exception on device */
3081 qdf_atomic_t num_tx_exception;
3082 /* Num Tx allowed */
3083 uint32_t num_tx_allowed;
3084 /* Num Regular Tx allowed */
3085 uint32_t num_reg_tx_allowed;
3086 /* Num Tx allowed for special frames*/
3087 uint32_t num_tx_spl_allowed;
3088 /* Preferred HW mode */
3089 uint8_t preferred_hw_mode;
3090
3091 /**
3092 * Flag to indicate whether WAR to address single cache entry
3093 * invalidation bug is enabled or not
3094 */
3095 bool is_rx_fse_full_cache_invalidate_war_enabled;
3096 #if defined(WLAN_SUPPORT_RX_FLOW_TAG)
3097 /**
3098 * Pointer to DP RX Flow FST at SOC level if
3099 * is_rx_flow_search_table_per_pdev is false
3100 * TBD: rx_fst[num_macs] if we decide to have per mac FST
3101 */
3102 struct dp_rx_fst *rx_fst;
3103 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
3104 /* SG supported for msdu continued packets from wbm release ring */
3105 bool wbm_release_desc_rx_sg_support;
3106 bool peer_map_attach_success;
3107 /* Flag to disable mac1 ring interrupts */
3108 bool disable_mac1_intr;
3109 /* Flag to disable mac2 ring interrupts */
3110 bool disable_mac2_intr;
3111
3112 struct {
3113 /* 1st msdu in sg for msdu continued packets in wbm rel ring */
3114 bool wbm_is_first_msdu_in_sg;
3115 /* Wbm sg list head */
3116 qdf_nbuf_t wbm_sg_nbuf_head;
3117 /* Wbm sg list tail */
3118 qdf_nbuf_t wbm_sg_nbuf_tail;
3119 uint32_t wbm_sg_desc_msdu_len;
3120 } wbm_sg_param;
3121 /* Number of msdu exception descriptors */
3122 uint32_t num_msdu_exception_desc;
3123
3124 /* RX buffer params */
3125 struct rx_buff_pool rx_buff_pool[MAX_PDEV_CNT];
3126 struct rx_refill_buff_pool rx_refill_buff_pool;
3127 /* Save recent operation related variable */
3128 struct dp_last_op_info last_op_info;
3129 TAILQ_HEAD(, dp_peer) inactive_peer_list;
3130 qdf_spinlock_t inactive_peer_list_lock;
3131 TAILQ_HEAD(, dp_vdev) inactive_vdev_list;
3132 qdf_spinlock_t inactive_vdev_list_lock;
3133 /* lock to protect vdev_id_map table*/
3134 qdf_spinlock_t vdev_map_lock;
3135
3136 /* Flow Search Table is in CMEM */
3137 bool fst_in_cmem;
3138
3139 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
3140 struct dp_swlm swlm;
3141 #endif
3142
3143 #ifdef FEATURE_RUNTIME_PM
3144 /* DP Rx timestamp */
3145 qdf_time_t rx_last_busy;
3146 /* Dp runtime refcount */
3147 qdf_atomic_t dp_runtime_refcount;
3148 /* Dp tx pending count in RTPM */
3149 qdf_atomic_t tx_pending_rtpm;
3150 #endif
3151 /* Invalid buffer that allocated for RX buffer */
3152 qdf_nbuf_queue_t invalid_buf_queue;
3153
3154 #ifdef FEATURE_MEC
3155 /** @mec_lock: spinlock for MEC table */
3156 qdf_spinlock_t mec_lock;
3157 /** @mec_cnt: number of active mec entries */
3158 qdf_atomic_t mec_cnt;
3159 struct {
3160 /** @mask: mask bits */
3161 uint32_t mask;
3162 /** @idx_bits: index to shift bits */
3163 uint32_t idx_bits;
3164 /** @bins: MEC table */
3165 TAILQ_HEAD(, dp_mec_entry) * bins;
3166 } mec_hash;
3167 #endif
3168
3169 #ifdef WLAN_DP_FEATURE_DEFERRED_REO_QDESC_DESTROY
3170 qdf_list_t reo_desc_deferred_freelist;
3171 qdf_spinlock_t reo_desc_deferred_freelist_lock;
3172 bool reo_desc_deferred_freelist_init;
3173 #endif
3174 /* BM id for first WBM2SW ring */
3175 uint32_t wbm_sw0_bm_id;
3176
3177 /* Store arch_id from device_id */
3178 uint16_t arch_id;
3179
3180 /* link desc ID start per device type */
3181 uint32_t link_desc_id_start;
3182
3183 /* CMEM buffer target reserved for host usage */
3184 uint64_t cmem_base;
3185 /* CMEM size in bytes */
3186 uint64_t cmem_total_size;
3187 /* CMEM free size in bytes */
3188 uint64_t cmem_avail_size;
3189
3190 /* SOC level feature flags */
3191 struct dp_soc_features features;
3192
3193 #ifdef WIFI_MONITOR_SUPPORT
3194 struct dp_mon_soc *monitor_soc;
3195 #endif
3196 uint8_t rxdma2sw_rings_not_supported:1,
3197 wbm_sg_last_msdu_war:1,
3198 mec_fw_offload:1,
3199 multi_peer_grp_cmd_supported:1,
3200 umac_reset_supported:1;
3201
3202 /* Number of Rx refill rings */
3203 uint8_t num_rx_refill_buf_rings;
3204 #ifdef FEATURE_RUNTIME_PM
3205 /* flag to indicate vote for runtime_pm for high tput castt*/
3206 qdf_atomic_t rtpm_high_tput_flag;
3207 #endif
3208 /* Buffer manager ID for idle link descs */
3209 uint8_t idle_link_bm_id;
3210 qdf_atomic_t ref_count;
3211
3212 unsigned long vdev_stats_id_map;
3213 bool txmon_hw_support;
3214
3215 #ifdef DP_UMAC_HW_RESET_SUPPORT
3216 struct dp_soc_umac_reset_ctx umac_reset_ctx;
3217 #endif
3218 /* PPDU to link_id mapping parameters */
3219 uint8_t link_id_offset;
3220 uint8_t link_id_bits;
3221 #ifdef FEATURE_RX_LINKSPEED_ROAM_TRIGGER
3222 /* A flag using to decide the switch of rx link speed */
3223 bool high_throughput;
3224 #endif
3225 bool is_tx_pause;
3226
3227 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
3228 /* number of IPv4 flows inserted */
3229 qdf_atomic_t ipv4_fse_cnt;
3230 /* number of IPv6 flows inserted */
3231 qdf_atomic_t ipv6_fse_cnt;
3232 #endif
3233 /* Reo queue ref table items */
3234 struct reo_queue_ref_table reo_qref;
3235 #ifdef DP_TX_PACKET_INSPECT_FOR_ILP
3236 /* Flag to show if TX ILP is enabled */
3237 bool tx_ilp_enable;
3238 #endif
3239 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
3240 uint8_t mld_mode_ap;
3241 #endif
3242 struct test_qaddr_del *list_shared_qaddr_del;
3243 struct test_qaddr_del *reo_write_list;
3244 struct test_mem_free *list_qdesc_addr_free;
3245 struct test_mem_free *list_qdesc_addr_alloc;
3246 uint64_t free_addr_list_idx;
3247 uint64_t alloc_addr_list_idx;
3248 uint64_t shared_qaddr_del_idx;
3249 uint64_t write_paddr_list_idx;
3250
3251 #ifdef WLAN_FEATURE_TX_LATENCY_STATS
3252 /* callback function for tx latency stats */
3253 cdp_tx_latency_cb tx_latency_cb;
3254 #endif
3255
3256 #ifdef DP_TX_COMP_RING_DESC_SANITY_CHECK
3257 struct {
3258 uint32_t detected;
3259 uint64_t start_time;
3260 } stale_entry[MAX_TCL_DATA_RINGS];
3261 #endif
3262 #ifdef DP_RX_MSDU_DONE_FAIL_HISTORY
3263 struct dp_msdu_done_fail_history *msdu_done_fail_hist;
3264 #endif
3265 #ifdef DP_RX_PEEK_MSDU_DONE_WAR
3266 struct dp_rx_msdu_done_fail_desc_list msdu_done_fail_desc_list;
3267 #endif
3268 /* monitor interface flags */
3269 uint32_t mon_flags;
3270 };
3271
3272 #ifdef IPA_OFFLOAD
3273 /**
3274 * struct dp_ipa_resources - Resources needed for IPA
3275 * @tx_ring:
3276 * @tx_num_alloc_buffer:
3277 * @tx_comp_ring:
3278 * @rx_rdy_ring:
3279 * @rx_refill_ring:
3280 * @tx_comp_doorbell_paddr: IPA UC doorbell registers paddr
3281 * @tx_comp_doorbell_vaddr:
3282 * @rx_ready_doorbell_paddr:
3283 * @is_db_ddr_mapped:
3284 * @tx_alt_ring:
3285 * @tx_alt_ring_num_alloc_buffer:
3286 * @tx_alt_comp_ring:
3287 * @tx_alt_comp_doorbell_paddr: IPA UC doorbell registers paddr
3288 * @tx_alt_comp_doorbell_vaddr:
3289 * @rx_alt_rdy_ring:
3290 * @rx_alt_refill_ring:
3291 * @rx_alt_ready_doorbell_paddr:
3292 */
3293 struct dp_ipa_resources {
3294 qdf_shared_mem_t tx_ring;
3295 uint32_t tx_num_alloc_buffer;
3296
3297 qdf_shared_mem_t tx_comp_ring;
3298 qdf_shared_mem_t rx_rdy_ring;
3299 qdf_shared_mem_t rx_refill_ring;
3300
3301 /* IPA UC doorbell registers paddr */
3302 qdf_dma_addr_t tx_comp_doorbell_paddr;
3303 uint32_t *tx_comp_doorbell_vaddr;
3304 qdf_dma_addr_t rx_ready_doorbell_paddr;
3305
3306 bool is_db_ddr_mapped;
3307
3308 #ifdef IPA_WDI3_TX_TWO_PIPES
3309 qdf_shared_mem_t tx_alt_ring;
3310 uint32_t tx_alt_ring_num_alloc_buffer;
3311 qdf_shared_mem_t tx_alt_comp_ring;
3312
3313 /* IPA UC doorbell registers paddr */
3314 qdf_dma_addr_t tx_alt_comp_doorbell_paddr;
3315 uint32_t *tx_alt_comp_doorbell_vaddr;
3316 #endif
3317 #ifdef IPA_WDI3_VLAN_SUPPORT
3318 qdf_shared_mem_t rx_alt_rdy_ring;
3319 qdf_shared_mem_t rx_alt_refill_ring;
3320 qdf_dma_addr_t rx_alt_ready_doorbell_paddr;
3321 #endif
3322 };
3323 #endif
3324
3325 #define MAX_RX_MAC_RINGS 2
3326 /* Same as NAC_MAX_CLENT */
3327 #define DP_NAC_MAX_CLIENT 24
3328
3329 /*
3330 * 24 bits cookie size
3331 * 10 bits page id 0 ~ 1023 for MCL
3332 * 3 bits page id 0 ~ 7 for WIN
3333 * WBM Idle List Desc size = 128,
3334 * Num descs per page = 4096/128 = 32 for MCL
3335 * Num descs per page = 2MB/128 = 16384 for WIN
3336 */
3337 /*
3338 * Macros to setup link descriptor cookies - for link descriptors, we just
3339 * need first 3 bits to store bank/page ID for WIN. The
3340 * remaining bytes will be used to set a unique ID, which will
3341 * be useful in debugging
3342 */
3343 #ifdef MAX_ALLOC_PAGE_SIZE
3344 #if PAGE_SIZE == 4096
3345 #define LINK_DESC_PAGE_ID_MASK 0x007FE0
3346 #define LINK_DESC_ID_SHIFT 5
3347 #define LINK_DESC_ID_START_21_BITS_COOKIE 0x8000
3348 #elif PAGE_SIZE == 65536
3349 #define LINK_DESC_PAGE_ID_MASK 0x007E00
3350 #define LINK_DESC_ID_SHIFT 9
3351 #define LINK_DESC_ID_START_21_BITS_COOKIE 0x800
3352 #else
3353 #error "Unsupported kernel PAGE_SIZE"
3354 #endif
3355 #define LINK_DESC_COOKIE(_desc_id, _page_id, _desc_id_start) \
3356 ((((_page_id) + (_desc_id_start)) << LINK_DESC_ID_SHIFT) | (_desc_id))
3357 #define LINK_DESC_COOKIE_PAGE_ID(_cookie) \
3358 (((_cookie) & LINK_DESC_PAGE_ID_MASK) >> LINK_DESC_ID_SHIFT)
3359 #else
3360 #define LINK_DESC_PAGE_ID_MASK 0x7
3361 #define LINK_DESC_ID_SHIFT 3
3362 #define LINK_DESC_COOKIE(_desc_id, _page_id, _desc_id_start) \
3363 ((((_desc_id) + (_desc_id_start)) << LINK_DESC_ID_SHIFT) | (_page_id))
3364 #define LINK_DESC_COOKIE_PAGE_ID(_cookie) \
3365 ((_cookie) & LINK_DESC_PAGE_ID_MASK)
3366 #define LINK_DESC_ID_START_21_BITS_COOKIE 0x8000
3367 #endif
3368 #define LINK_DESC_ID_START_20_BITS_COOKIE 0x4000
3369
3370 /* same as ieee80211_nac_param */
3371 enum dp_nac_param_cmd {
3372 /* IEEE80211_NAC_PARAM_ADD */
3373 DP_NAC_PARAM_ADD = 1,
3374 /* IEEE80211_NAC_PARAM_DEL */
3375 DP_NAC_PARAM_DEL,
3376 /* IEEE80211_NAC_PARAM_LIST */
3377 DP_NAC_PARAM_LIST,
3378 };
3379
3380 /**
3381 * struct dp_neighbour_peer - neighbour peer list type for smart mesh
3382 * @neighbour_peers_macaddr: neighbour peer's mac address
3383 * @vdev: associated vdev
3384 * @ast_entry: ast_entry for neighbour peer
3385 * @rssi: rssi value
3386 * @neighbour_peer_list_elem: neighbour peer list TAILQ element
3387 */
3388 struct dp_neighbour_peer {
3389 union dp_align_mac_addr neighbour_peers_macaddr;
3390 struct dp_vdev *vdev;
3391 struct dp_ast_entry *ast_entry;
3392 uint8_t rssi;
3393 TAILQ_ENTRY(dp_neighbour_peer) neighbour_peer_list_elem;
3394 };
3395
3396 #ifdef WLAN_TX_PKT_CAPTURE_ENH
3397 #define WLAN_TX_PKT_CAPTURE_ENH 1
3398 #define DP_TX_PPDU_PROC_THRESHOLD 8
3399 #define DP_TX_PPDU_PROC_TIMEOUT 10
3400 #endif
3401
3402 /**
3403 * struct ppdu_info - PPDU Status info descriptor
3404 * @ppdu_id: Unique ppduid assigned by firmware for every tx packet
3405 * @sched_cmdid: schedule command id, which will be same in a burst
3406 * @max_ppdu_id: wrap around for ppdu id
3407 * @tsf_l32:
3408 * @tlv_bitmap:
3409 * @last_tlv_cnt: Keep track for missing ppdu tlvs
3410 * @last_user: last ppdu processed for user
3411 * @is_ampdu: set if Ampdu aggregate
3412 * @nbuf: ppdu descriptor payload
3413 * @ppdu_desc: ppdu descriptor
3414 * @ulist: Union of lists
3415 * @ppdu_info_dlist_elem: linked list of ppdu tlvs
3416 * @ppdu_info_slist_elem: Singly linked list (queue) of ppdu tlvs
3417 * @ppdu_info_list_elem: linked list of ppdu tlvs
3418 * @ppdu_info_queue_elem: Singly linked list (queue) of ppdu tlvs
3419 * @compltn_common_tlv: Successful tlv counter from COMPLTN COMMON tlv
3420 * @ack_ba_tlv: Successful tlv counter from ACK BA tlv
3421 * @done:
3422 */
3423 struct ppdu_info {
3424 uint32_t ppdu_id;
3425 uint32_t sched_cmdid;
3426 uint32_t max_ppdu_id;
3427 uint32_t tsf_l32;
3428 uint16_t tlv_bitmap;
3429 uint16_t last_tlv_cnt;
3430 uint16_t last_user:8,
3431 is_ampdu:1;
3432 qdf_nbuf_t nbuf;
3433 struct cdp_tx_completion_ppdu *ppdu_desc;
3434 #ifdef WLAN_TX_PKT_CAPTURE_ENH
3435 union {
3436 TAILQ_ENTRY(ppdu_info) ppdu_info_dlist_elem;
3437 STAILQ_ENTRY(ppdu_info) ppdu_info_slist_elem;
3438 } ulist;
3439 #define ppdu_info_list_elem ulist.ppdu_info_dlist_elem
3440 #define ppdu_info_queue_elem ulist.ppdu_info_slist_elem
3441 #else
3442 TAILQ_ENTRY(ppdu_info) ppdu_info_list_elem;
3443 #endif
3444 uint8_t compltn_common_tlv;
3445 uint8_t ack_ba_tlv;
3446 bool done;
3447 };
3448
3449 /**
3450 * struct msdu_completion_info - wbm msdu completion info
3451 * @ppdu_id: Unique ppduid assigned by firmware for every tx packet
3452 * @peer_id: peer_id
3453 * @tid: tid which used during transmit
3454 * @first_msdu: first msdu indication
3455 * @last_msdu: last msdu indication
3456 * @msdu_part_of_amsdu: msdu part of amsdu
3457 * @transmit_cnt: retried count
3458 * @status: transmit status
3459 * @tsf: timestamp which it transmitted
3460 */
3461 struct msdu_completion_info {
3462 uint32_t ppdu_id;
3463 uint16_t peer_id;
3464 uint8_t tid;
3465 uint8_t first_msdu:1,
3466 last_msdu:1,
3467 msdu_part_of_amsdu:1;
3468 uint8_t transmit_cnt;
3469 uint8_t status;
3470 uint32_t tsf;
3471 };
3472
3473 #ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
3474 struct rx_protocol_tag_map {
3475 /* This is the user configured tag for the said protocol type */
3476 uint16_t tag;
3477 };
3478
3479 #ifdef WLAN_SUPPORT_RX_TAG_STATISTICS
3480 /**
3481 * struct rx_protocol_tag_stats - protocol statistics
3482 * @tag_ctr: number of rx msdus matching this tag
3483 */
3484 struct rx_protocol_tag_stats {
3485 uint32_t tag_ctr;
3486 };
3487 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
3488
3489 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
3490
3491 #ifdef WLAN_RX_PKT_CAPTURE_ENH
3492 /* Template data to be set for Enhanced RX Monitor packets */
3493 #define RX_MON_CAP_ENH_TRAILER 0xdeadc0dedeadda7a
3494
3495 /**
3496 * struct dp_rx_mon_enh_trailer_data - Data structure to set a known pattern
3497 * at end of each MSDU in monitor-lite mode
3498 * @reserved1: reserved for future use
3499 * @reserved2: reserved for future use
3500 * @flow_tag: flow tag value read from skb->cb
3501 * @protocol_tag: protocol tag value read from skb->cb
3502 */
3503 struct dp_rx_mon_enh_trailer_data {
3504 uint16_t reserved1;
3505 uint16_t reserved2;
3506 uint16_t flow_tag;
3507 uint16_t protocol_tag;
3508 };
3509 #endif /* WLAN_RX_PKT_CAPTURE_ENH */
3510
3511 #ifdef HTT_STATS_DEBUGFS_SUPPORT
3512 /* Number of debugfs entries created for HTT stats */
3513 #define PDEV_HTT_STATS_DBGFS_SIZE HTT_DBG_NUM_EXT_STATS
3514
3515 /**
3516 * struct pdev_htt_stats_dbgfs_priv - Structure to maintain debugfs information
3517 * of HTT stats
3518 * @pdev: dp pdev of debugfs entry
3519 * @stats_id: stats id of debugfs entry
3520 */
3521 struct pdev_htt_stats_dbgfs_priv {
3522 struct dp_pdev *pdev;
3523 uint16_t stats_id;
3524 };
3525
3526 /**
3527 * struct pdev_htt_stats_dbgfs_cfg - PDEV level data structure for debugfs
3528 * support for HTT stats
3529 * @debugfs_entry: qdf_debugfs directory entry
3530 * @m: qdf debugfs file handler
3531 * @pdev_htt_stats_dbgfs_ops: File operations of entry created
3532 * @priv: HTT stats debugfs private object
3533 * @htt_stats_dbgfs_event: HTT stats event for debugfs support
3534 * @lock: HTT stats debugfs lock
3535 * @htt_stats_dbgfs_msg_process: Function callback to print HTT stats
3536 */
3537 struct pdev_htt_stats_dbgfs_cfg {
3538 qdf_dentry_t debugfs_entry[PDEV_HTT_STATS_DBGFS_SIZE];
3539 qdf_debugfs_file_t m;
3540 struct qdf_debugfs_fops
3541 pdev_htt_stats_dbgfs_ops[PDEV_HTT_STATS_DBGFS_SIZE - 1];
3542 struct pdev_htt_stats_dbgfs_priv priv[PDEV_HTT_STATS_DBGFS_SIZE - 1];
3543 qdf_event_t htt_stats_dbgfs_event;
3544 qdf_mutex_t lock;
3545 void (*htt_stats_dbgfs_msg_process)(void *data, A_INT32 len);
3546 };
3547 #endif /* HTT_STATS_DEBUGFS_SUPPORT */
3548
3549 struct dp_srng_ring_state {
3550 enum hal_ring_type ring_type;
3551 uint32_t sw_head;
3552 uint32_t sw_tail;
3553 uint32_t hw_head;
3554 uint32_t hw_tail;
3555
3556 };
3557
3558 struct dp_soc_srngs_state {
3559 uint32_t seq_num;
3560 uint32_t max_ring_id;
3561 struct dp_srng_ring_state ring_state[DP_MAX_SRNGS];
3562 TAILQ_ENTRY(dp_soc_srngs_state) list_elem;
3563 };
3564
3565 #ifdef WLAN_FEATURE_11BE_MLO
3566 /* struct dp_mlo_sync_timestamp - PDEV level data structure for storing
3567 * MLO timestamp received via HTT msg.
3568 * msg_type: This would be set to HTT_T2H_MSG_TYPE_MLO_TIMESTAMP_OFFSET_IND
3569 * pdev_id: pdev_id
3570 * chip_id: chip_id
3571 * mac_clk_freq: mac clock frequency of the mac HW block in MHz
3572 * sync_tstmp_lo_us: lower 32 bits of the WLAN global time stamp (in us) at
3573 * which last sync interrupt was received
3574 * sync_tstmp_hi_us: upper 32 bits of the WLAN global time stamp (in us) at
3575 * which last sync interrupt was received
3576 * mlo_offset_lo_us: lower 32 bits of the MLO time stamp offset in us
3577 * mlo_offset_hi_us: upper 32 bits of the MLO time stamp offset in us
3578 * mlo_offset_clks: MLO time stamp offset in clock ticks for sub us
3579 * mlo_comp_us: MLO time stamp compensation applied in us
3580 * mlo_comp_clks: MLO time stamp compensation applied in clock ticks
3581 * for sub us resolution
3582 * mlo_comp_timer: period of MLO compensation timer at which compensation
3583 * is applied, in us
3584 */
3585 struct dp_mlo_sync_timestamp {
3586 uint32_t msg_type:8,
3587 pdev_id:2,
3588 chip_id:2,
3589 rsvd1:4,
3590 mac_clk_freq:16;
3591 uint32_t sync_tstmp_lo_us;
3592 uint32_t sync_tstmp_hi_us;
3593 uint32_t mlo_offset_lo_us;
3594 uint32_t mlo_offset_hi_us;
3595 uint32_t mlo_offset_clks;
3596 uint32_t mlo_comp_us:16,
3597 mlo_comp_clks:10,
3598 rsvd2:6;
3599 uint32_t mlo_comp_timer:22,
3600 rsvd3:10;
3601 };
3602 #endif
3603
3604 /* PDEV level structure for data path */
3605 struct dp_pdev {
3606 /**
3607 * Re-use Memory Section Starts
3608 */
3609
3610 /* PDEV Id */
3611 uint8_t pdev_id;
3612
3613 /* LMAC Id */
3614 uint8_t lmac_id;
3615
3616 /* Target pdev Id */
3617 uint8_t target_pdev_id;
3618
3619 bool pdev_deinit;
3620
3621 /* TXRX SOC handle */
3622 struct dp_soc *soc;
3623
3624 /* pdev status down or up required to handle dynamic hw
3625 * mode switch between DBS and DBS_SBS.
3626 * 1 = down
3627 * 0 = up
3628 */
3629 bool is_pdev_down;
3630
3631 /* Enhanced Stats is enabled */
3632 uint8_t enhanced_stats_en:1,
3633 link_peer_stats:1;
3634
3635 /* Flag to indicate fast path Tx flags */
3636 uint32_t tx_fast_flag;
3637
3638 /* Flag to indicate fast RX */
3639 bool rx_fast_flag;
3640
3641 /* Second ring used to replenish rx buffers */
3642 struct dp_srng rx_refill_buf_ring2;
3643 #ifdef IPA_WDI3_VLAN_SUPPORT
3644 /* Third ring used to replenish rx buffers */
3645 struct dp_srng rx_refill_buf_ring3;
3646 #endif
3647
3648 #ifdef FEATURE_DIRECT_LINK
3649 /* Fourth ring used to replenish rx buffers */
3650 struct dp_srng rx_refill_buf_ring4;
3651 #endif
3652
3653 /* Empty ring used by firmware to post rx buffers to the MAC */
3654 struct dp_srng rx_mac_buf_ring[MAX_RX_MAC_RINGS];
3655
3656 int ch_band_lmac_id_mapping[REG_BAND_UNKNOWN];
3657
3658 /* wlan_cfg pdev ctxt*/
3659 struct wlan_cfg_dp_pdev_ctxt *wlan_cfg_ctx;
3660
3661 /**
3662 * TODO: See if we need a ring map here for LMAC rings.
3663 * 1. Monitor rings are currently planning to be processed on receiving
3664 * PPDU end interrupts and hence won't need ring based interrupts.
3665 * 2. Rx buffer rings will be replenished during REO destination
3666 * processing and doesn't require regular interrupt handling - we will
3667 * only handle low water mark interrupts which is not expected
3668 * frequently
3669 */
3670
3671 /* VDEV list */
3672 TAILQ_HEAD(, dp_vdev) vdev_list;
3673
3674 /* vdev list lock */
3675 qdf_spinlock_t vdev_list_lock;
3676
3677 /* Number of vdevs this device have */
3678 uint16_t vdev_count;
3679
3680 /* PDEV transmit lock */
3681 qdf_spinlock_t tx_lock;
3682
3683 /*tx_mutex for me*/
3684 DP_MUTEX_TYPE tx_mutex;
3685
3686 /* msdu chain head & tail */
3687 qdf_nbuf_t invalid_peer_head_msdu;
3688 qdf_nbuf_t invalid_peer_tail_msdu;
3689
3690 /* Band steering */
3691 /* TBD */
3692
3693 /* PDEV level data path statistics */
3694 struct cdp_pdev_stats stats;
3695
3696 /* Global RX decap mode for the device */
3697 enum htt_pkt_type rx_decap_mode;
3698
3699 qdf_atomic_t num_tx_outstanding;
3700 int32_t tx_descs_max;
3701
3702 qdf_atomic_t num_tx_exception;
3703
3704 /* MCL specific local peer handle */
3705 struct {
3706 uint8_t pool[OL_TXRX_NUM_LOCAL_PEER_IDS + 1];
3707 uint8_t freelist;
3708 qdf_spinlock_t lock;
3709 struct dp_peer *map[OL_TXRX_NUM_LOCAL_PEER_IDS];
3710 } local_peer_ids;
3711
3712 /* dscp_tid_map_*/
3713 uint8_t dscp_tid_map[DP_MAX_TID_MAPS][DSCP_TID_MAP_MAX];
3714
3715 /* operating channel */
3716 struct {
3717 uint8_t num;
3718 uint8_t band;
3719 uint16_t freq;
3720 } operating_channel;
3721
3722 /* pool addr for mcast enhance buff */
3723 struct {
3724 int size;
3725 uint32_t paddr;
3726 char *vaddr;
3727 struct dp_tx_me_buf_t *freelist;
3728 int buf_in_use;
3729 qdf_dma_mem_context(memctx);
3730 } me_buf;
3731
3732 bool hmmc_tid_override_en;
3733 uint8_t hmmc_tid;
3734
3735 /* Number of VAPs with mcast enhancement enabled */
3736 qdf_atomic_t mc_num_vap_attached;
3737
3738 qdf_atomic_t stats_cmd_complete;
3739
3740 #ifdef IPA_OFFLOAD
3741 ipa_uc_op_cb_type ipa_uc_op_cb;
3742 void *usr_ctxt;
3743 struct dp_ipa_resources ipa_resource;
3744 #endif
3745
3746 /* TBD */
3747
3748 /* map this pdev to a particular Reo Destination ring */
3749 enum cdp_host_reo_dest_ring reo_dest;
3750
3751 /* WDI event handlers */
3752 struct wdi_event_subscribe_t **wdi_event_list;
3753
3754 bool cfr_rcc_mode;
3755
3756 /* enable time latency check for tx completion */
3757 bool latency_capture_enable;
3758
3759 /* enable calculation of delay stats*/
3760 bool delay_stats_flag;
3761 /* vow stats */
3762 bool vow_stats;
3763 void *dp_txrx_handle; /* Advanced data path handle */
3764 uint32_t ppdu_id;
3765 bool first_nbuf;
3766 /* Current noise-floor reading for the pdev channel */
3767 int16_t chan_noise_floor;
3768
3769 /*
3770 * For multiradio device, this flag indicates if
3771 * this radio is primary or secondary.
3772 *
3773 * For HK 1.0, this is used for WAR for the AST issue.
3774 * HK 1.x mandates creation of only 1 AST entry with same MAC address
3775 * across 2 radios. is_primary indicates the radio on which DP should
3776 * install HW AST entry if there is a request to add 2 AST entries
3777 * with same MAC address across 2 radios
3778 */
3779 uint8_t is_primary;
3780 struct cdp_tx_sojourn_stats sojourn_stats;
3781 qdf_nbuf_t sojourn_buf;
3782
3783 union dp_rx_desc_list_elem_t *free_list_head;
3784 union dp_rx_desc_list_elem_t *free_list_tail;
3785 /* Cached peer_id from htt_peer_details_tlv */
3786 uint16_t fw_stats_peer_id;
3787
3788 /* qdf_event for fw_peer_stats */
3789 qdf_event_t fw_peer_stats_event;
3790
3791 /* qdf_event for fw_stats */
3792 qdf_event_t fw_stats_event;
3793
3794 /* qdf_event for fw__obss_stats */
3795 qdf_event_t fw_obss_stats_event;
3796
3797 /* To check if request is already sent for obss stats */
3798 bool pending_fw_obss_stats_response;
3799
3800 /* User configured max number of tx buffers */
3801 uint32_t num_tx_allowed;
3802
3803 /*
3804 * User configured max num of tx buffers excluding the
3805 * number of buffers reserved for handling special frames
3806 */
3807 uint32_t num_reg_tx_allowed;
3808
3809 /* User configured max number of tx buffers for the special frames*/
3810 uint32_t num_tx_spl_allowed;
3811
3812 /* unique cookie required for peer session */
3813 uint32_t next_peer_cookie;
3814
3815 /*
3816 * Run time enabled when the first protocol tag is added,
3817 * run time disabled when the last protocol tag is deleted
3818 */
3819 bool is_rx_protocol_tagging_enabled;
3820
3821 #ifdef WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG
3822 /*
3823 * The protocol type is used as array index to save
3824 * user provided tag info
3825 */
3826 struct rx_protocol_tag_map rx_proto_tag_map[RX_PROTOCOL_TAG_MAX];
3827
3828 #ifdef WLAN_SUPPORT_RX_TAG_STATISTICS
3829 /*
3830 * Track msdus received from each reo ring separately to avoid
3831 * simultaneous writes from different core
3832 */
3833 struct rx_protocol_tag_stats
3834 reo_proto_tag_stats[MAX_REO_DEST_RINGS][RX_PROTOCOL_TAG_MAX];
3835 /* Track msdus received from exception ring separately */
3836 struct rx_protocol_tag_stats
3837 rx_err_proto_tag_stats[RX_PROTOCOL_TAG_MAX];
3838 struct rx_protocol_tag_stats
3839 mon_proto_tag_stats[RX_PROTOCOL_TAG_MAX];
3840 #endif /* WLAN_SUPPORT_RX_TAG_STATISTICS */
3841 #endif /* WLAN_SUPPORT_RX_PROTOCOL_TYPE_TAG */
3842
3843 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
3844 /**
3845 * Pointer to DP Flow FST at SOC level if
3846 * is_rx_flow_search_table_per_pdev is true
3847 */
3848 struct dp_rx_fst *rx_fst;
3849 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
3850
3851 #ifdef FEATURE_TSO_STATS
3852 /* TSO Id to index into TSO packet information */
3853 qdf_atomic_t tso_idx;
3854 #endif /* FEATURE_TSO_STATS */
3855
3856 #ifdef WLAN_SUPPORT_DATA_STALL
3857 data_stall_detect_cb data_stall_detect_callback;
3858 #endif /* WLAN_SUPPORT_DATA_STALL */
3859
3860 /* flag to indicate whether LRO hash command has been sent to FW */
3861 uint8_t is_lro_hash_configured;
3862
3863 #ifdef HTT_STATS_DEBUGFS_SUPPORT
3864 /* HTT stats debugfs params */
3865 struct pdev_htt_stats_dbgfs_cfg *dbgfs_cfg;
3866 #endif
3867 struct {
3868 qdf_work_t work;
3869 qdf_workqueue_t *work_queue;
3870 uint32_t seq_num;
3871 uint8_t queue_depth;
3872 qdf_spinlock_t list_lock;
3873
3874 TAILQ_HEAD(, dp_soc_srngs_state) list;
3875 } bkp_stats;
3876 #ifdef WIFI_MONITOR_SUPPORT
3877 struct dp_mon_pdev *monitor_pdev;
3878 #endif
3879 #ifdef WLAN_FEATURE_11BE_MLO
3880 struct dp_mlo_sync_timestamp timestamp;
3881 #endif
3882 /* Is isolation mode enabled */
3883 bool isolation;
3884 #ifdef WLAN_FEATURE_MARK_FIRST_WAKEUP_PACKET
3885 uint8_t is_first_wakeup_packet;
3886 #endif
3887 #ifdef CONNECTIVITY_PKTLOG
3888 /* packetdump callback functions */
3889 ol_txrx_pktdump_cb dp_tx_packetdump_cb;
3890 ol_txrx_pktdump_cb dp_rx_packetdump_cb;
3891 #endif
3892
3893 /* Firmware Stats for TLV received from Firmware */
3894 uint64_t fw_stats_tlv_bitmap_rcvd;
3895
3896 /* For Checking Pending Firmware Response */
3897 bool pending_fw_stats_response;
3898 };
3899
3900 struct dp_peer;
3901
3902 #ifdef DP_RX_UDP_OVER_PEER_ROAM
3903 #define WLAN_ROAM_PEER_AUTH_STATUS_NONE 0x0
3904 /*
3905 * This macro is equivalent to macro ROAM_AUTH_STATUS_AUTHENTICATED used
3906 * in connection mgr
3907 */
3908 #define WLAN_ROAM_PEER_AUTH_STATUS_AUTHENTICATED 0x2
3909 #endif
3910
3911 #ifdef WLAN_FEATURE_TX_LATENCY_STATS
3912 /**
3913 * struct dp_tx_latency_config - configuration for per-link transmit latency
3914 * statistics
3915 * @enabled: the feature is enabled or not
3916 * @report: async report is enabled or not
3917 * @granularity: granularity(in microseconds) of the distribution for the types
3918 */
3919 struct dp_tx_latency_config {
3920 qdf_atomic_t enabled;
3921 qdf_atomic_t report;
3922 qdf_atomic_t granularity[CDP_TX_LATENCY_TYPE_MAX];
3923 };
3924
3925 /**
3926 * struct dp_tx_latency_stats - transmit latency distribution for a type
3927 * @latency_accum: accumulated latencies
3928 * @msdus_accum: accumulated number of msdus
3929 * @distribution: distribution of latencies
3930 */
3931 struct dp_tx_latency_stats {
3932 qdf_atomic_t latency_accum;
3933 qdf_atomic_t msdus_accum;
3934 qdf_atomic_t distribution[CDP_TX_LATENCY_DISTR_LV_MAX];
3935 };
3936
3937 /**
3938 * struct dp_tx_latency - transmit latency statistics for remote link peer
3939 * @cur_idx: current row index of the 2D stats array
3940 * @stats: two-dimensional array, to store the transmit latency statistics.
3941 * one row is used to store the stats of the current cycle, it's indicated
3942 * by cur_idx, the other is for the last cycle.
3943 */
3944 struct dp_tx_latency {
3945 uint8_t cur_idx;
3946 struct dp_tx_latency_stats stats[2][CDP_TX_LATENCY_TYPE_MAX];
3947 };
3948 #endif
3949
3950 /**
3951 * struct dp_vdev_stats - vdev stats structure for dp vdev
3952 * @tx_i: ingress tx stats, contains legacy and MLO ingress tx stats
3953 * @rx_i: ingress rx stats
3954 * @tx: cdp tx stats
3955 * @rx: cdp rx stats
3956 * @tso_stats: tso stats
3957 * @tid_tx_stats: tid tx stats
3958 */
3959 struct dp_vdev_stats {
3960 struct cdp_tx_ingress_stats tx_i[DP_INGRESS_STATS_MAX_SIZE];
3961 struct cdp_rx_ingress_stats rx_i;
3962 struct cdp_tx_stats tx;
3963 struct cdp_rx_stats rx;
3964 struct cdp_tso_stats tso_stats;
3965 #ifdef HW_TX_DELAY_STATS_ENABLE
3966 struct cdp_tid_tx_stats tid_tx_stats[CDP_MAX_TX_COMP_RINGS]
3967 [CDP_MAX_DATA_TIDS];
3968 #endif
3969 };
3970
3971 /* VDEV structure for data path state */
3972 struct dp_vdev {
3973 /* OS device abstraction */
3974 qdf_device_t osdev;
3975
3976 /* physical device that is the parent of this virtual device */
3977 struct dp_pdev *pdev;
3978
3979 /* VDEV operating mode */
3980 enum wlan_op_mode opmode;
3981
3982 /* VDEV subtype */
3983 enum wlan_op_subtype subtype;
3984
3985 /* Tx encapsulation type for this VAP */
3986 enum htt_cmn_pkt_type tx_encap_type;
3987
3988 /* Rx Decapsulation type for this VAP */
3989 enum htt_cmn_pkt_type rx_decap_type;
3990
3991 /* WDS enabled */
3992 bool wds_enabled;
3993
3994 /* MEC enabled */
3995 bool mec_enabled;
3996
3997 #ifdef QCA_SUPPORT_WDS_EXTENDED
3998 bool wds_ext_enabled;
3999 bool drop_tx_mcast;
4000 #endif /* QCA_SUPPORT_WDS_EXTENDED */
4001 bool drop_3addr_mcast;
4002 #ifdef WLAN_VENDOR_SPECIFIC_BAR_UPDATE
4003 bool skip_bar_update;
4004 unsigned long skip_bar_update_last_ts;
4005 #endif
4006 /* WDS Aging timer period */
4007 uint32_t wds_aging_timer_val;
4008
4009 /* NAWDS enabled */
4010 bool nawds_enabled;
4011
4012 /* Multicast enhancement enabled */
4013 uint8_t mcast_enhancement_en;
4014
4015 /* IGMP multicast enhancement enabled */
4016 uint8_t igmp_mcast_enhanc_en;
4017
4018 /* vdev_id - ID used to specify a particular vdev to the target */
4019 uint8_t vdev_id;
4020
4021 /* Default HTT meta data for this VDEV */
4022 /* TBD: check alignment constraints */
4023 uint16_t htt_tcl_metadata;
4024
4025 /* vdev lmac_id */
4026 uint8_t lmac_id;
4027
4028 /* vdev bank_id */
4029 uint8_t bank_id;
4030
4031 /* Mesh mode vdev */
4032 uint32_t mesh_vdev;
4033
4034 /* Mesh mode rx filter setting */
4035 uint32_t mesh_rx_filter;
4036
4037 /* DSCP-TID mapping table ID */
4038 uint8_t dscp_tid_map_id;
4039
4040 /* Address search type to be set in TX descriptor */
4041 uint8_t search_type;
4042
4043 /*
4044 * Flag to indicate if s/w tid classification should be
4045 * skipped
4046 */
4047 uint8_t skip_sw_tid_classification;
4048
4049 /* Flag to enable peer authorization */
4050 uint8_t peer_authorize;
4051
4052 /* AST hash value for BSS peer in HW valid for STA VAP*/
4053 uint16_t bss_ast_hash;
4054
4055 /* AST hash index for BSS peer in HW valid for STA VAP*/
4056 uint16_t bss_ast_idx;
4057
4058 bool multipass_en;
4059
4060 /* Address search flags to be configured in HAL descriptor */
4061 uint8_t hal_desc_addr_search_flags;
4062
4063 /* Handle to the OS shim SW's virtual device */
4064 ol_osif_vdev_handle osif_vdev;
4065
4066 /* MAC address */
4067 union dp_align_mac_addr mac_addr;
4068
4069 #ifdef WLAN_FEATURE_11BE_MLO
4070 /* MLO MAC address corresponding to vdev */
4071 union dp_align_mac_addr mld_mac_addr;
4072 #if defined(WLAN_MLO_MULTI_CHIP) && defined(WLAN_MCAST_MLO)
4073 uint8_t mlo_vdev:1,
4074 is_bridge_vdev:1,
4075 reserved_1:6;
4076 #endif
4077 #endif
4078
4079 /* node in the pdev's list of vdevs */
4080 TAILQ_ENTRY(dp_vdev) vdev_list_elem;
4081
4082 /* dp_peer list */
4083 TAILQ_HEAD(, dp_peer) peer_list;
4084 /* to protect peer_list */
4085 DP_MUTEX_TYPE peer_list_lock;
4086
4087 /* RX call back function to flush GRO packets*/
4088 ol_txrx_rx_gro_flush_ind_fp osif_gro_flush;
4089 /* default RX call back function called by dp */
4090 ol_txrx_rx_fp osif_rx;
4091 #ifdef QCA_SUPPORT_EAPOL_OVER_CONTROL_PORT
4092 /* callback to receive eapol frames */
4093 ol_txrx_rx_fp osif_rx_eapol;
4094 #endif
4095 /* callback to deliver rx frames to the OS */
4096 ol_txrx_rx_fp osif_rx_stack;
4097 /* Callback to handle rx fisa frames */
4098 ol_txrx_fisa_rx_fp osif_fisa_rx;
4099 ol_txrx_fisa_flush_fp osif_fisa_flush;
4100
4101 /* call back function to flush out queued rx packets*/
4102 ol_txrx_rx_flush_fp osif_rx_flush;
4103 ol_txrx_rsim_rx_decap_fp osif_rsim_rx_decap;
4104 ol_txrx_get_key_fp osif_get_key;
4105 ol_txrx_tx_free_ext_fp osif_tx_free_ext;
4106
4107 #ifdef notyet
4108 /* callback to check if the msdu is an WAI (WAPI) frame */
4109 ol_rx_check_wai_fp osif_check_wai;
4110 #endif
4111
4112 /* proxy arp function */
4113 ol_txrx_proxy_arp_fp osif_proxy_arp;
4114
4115 ol_txrx_mcast_me_fp me_convert;
4116
4117 /* completion function used by this vdev*/
4118 ol_txrx_completion_fp tx_comp;
4119
4120 ol_txrx_get_tsf_time get_tsf_time;
4121
4122 /* callback to classify critical packets */
4123 ol_txrx_classify_critical_pkt_fp tx_classify_critical_pkt_cb;
4124
4125 /* delete notifier to DP component */
4126 ol_txrx_vdev_delete_cb vdev_del_notify;
4127
4128 /* deferred vdev deletion state */
4129 struct {
4130 /* VDEV delete pending */
4131 int pending;
4132 /*
4133 * callback and a context argument to provide a
4134 * notification for when the vdev is deleted.
4135 */
4136 ol_txrx_vdev_delete_cb callback;
4137 void *context;
4138 } delete;
4139
4140 /* tx data delivery notification callback function */
4141 struct {
4142 ol_txrx_data_tx_cb func;
4143 void *ctxt;
4144 } tx_non_std_data_callback;
4145
4146
4147 /* safe mode control to bypass the encrypt and decipher process*/
4148 uint32_t safemode;
4149
4150 /* rx filter related */
4151 uint32_t drop_unenc;
4152 #ifdef notyet
4153 privacy_exemption privacy_filters[MAX_PRIVACY_FILTERS];
4154 uint32_t filters_num;
4155 #endif
4156 /* TDLS Link status */
4157 bool tdls_link_connected;
4158 bool is_tdls_frame;
4159
4160 /* per vdev rx nbuf queue */
4161 qdf_nbuf_queue_t rxq;
4162
4163 uint8_t tx_ring_id;
4164 struct dp_tx_desc_pool_s *tx_desc;
4165 struct dp_tx_ext_desc_pool_s *tx_ext_desc;
4166
4167 /* Capture timestamp of previous tx packet enqueued */
4168 uint64_t prev_tx_enq_tstamp;
4169
4170 /* Capture timestamp of previous rx packet delivered */
4171 uint64_t prev_rx_deliver_tstamp;
4172
4173 /* VDEV Stats */
4174 struct dp_vdev_stats stats;
4175
4176 /* Is this a proxySTA VAP */
4177 uint8_t proxysta_vdev : 1, /* Is this a proxySTA VAP */
4178 wrap_vdev : 1, /* Is this a QWRAP AP VAP */
4179 isolation_vdev : 1, /* Is this a QWRAP AP VAP */
4180 reserved : 5; /* Reserved */
4181
4182 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
4183 struct dp_tx_desc_pool_s *pool;
4184 #endif
4185 /* AP BRIDGE enabled */
4186 bool ap_bridge_enabled;
4187
4188 enum cdp_sec_type sec_type;
4189
4190 /* SWAR for HW: Enable WEP bit in the AMSDU frames for RAW mode */
4191 bool raw_mode_war;
4192
4193
4194 /* 8021p PCP-TID mapping table ID */
4195 uint8_t tidmap_tbl_id;
4196
4197 /* 8021p PCP-TID map values */
4198 uint8_t pcp_tid_map[PCP_TID_MAP_MAX];
4199
4200 /* TIDmap priority */
4201 uint8_t tidmap_prty;
4202
4203 #ifdef QCA_MULTIPASS_SUPPORT
4204 uint16_t *iv_vlan_map;
4205
4206 /* dp_peer special list */
4207 TAILQ_HEAD(, dp_txrx_peer) mpass_peer_list;
4208 DP_MUTEX_TYPE mpass_peer_mutex;
4209 #endif
4210 /* Extended data path handle */
4211 struct cdp_ext_vdev *vdev_dp_ext_handle;
4212 #ifdef VDEV_PEER_PROTOCOL_COUNT
4213 /*
4214 * Rx-Ingress and Tx-Egress are in the lower level DP layer
4215 * Rx-Egress and Tx-ingress are handled in osif layer for DP
4216 * So
4217 * Rx-Egress and Tx-ingress mask definitions are in OSIF layer
4218 * Rx-Ingress and Tx-Egress definitions are here below
4219 */
4220 #define VDEV_PEER_PROTOCOL_RX_INGRESS_MASK 1
4221 #define VDEV_PEER_PROTOCOL_TX_INGRESS_MASK 2
4222 #define VDEV_PEER_PROTOCOL_RX_EGRESS_MASK 4
4223 #define VDEV_PEER_PROTOCOL_TX_EGRESS_MASK 8
4224 bool peer_protocol_count_track;
4225 int peer_protocol_count_dropmask;
4226 #endif
4227 /* callback to collect connectivity stats */
4228 ol_txrx_stats_rx_fp stats_cb;
4229 uint32_t num_peers;
4230 /* entry to inactive_list*/
4231 TAILQ_ENTRY(dp_vdev) inactive_list_elem;
4232
4233 #ifdef WLAN_SUPPORT_RX_FISA
4234 /**
4235 * Params used for controlling the fisa aggregation dynamically
4236 */
4237 uint8_t fisa_disallowed[MAX_REO_DEST_RINGS];
4238 uint8_t fisa_force_flushed[MAX_REO_DEST_RINGS];
4239 #endif
4240 /*
4241 * Refcount for VDEV currently incremented when
4242 * peer is created for VDEV
4243 */
4244 qdf_atomic_t ref_cnt;
4245 qdf_atomic_t mod_refs[DP_MOD_ID_MAX];
4246 uint8_t num_latency_critical_conn;
4247 #ifdef WLAN_SUPPORT_MESH_LATENCY
4248 uint8_t peer_tid_latency_enabled;
4249 /* tid latency configuration parameters */
4250 struct {
4251 uint32_t service_interval;
4252 uint32_t burst_size;
4253 uint8_t latency_tid;
4254 } mesh_tid_latency_config;
4255 #endif
4256 #ifdef WIFI_MONITOR_SUPPORT
4257 struct dp_mon_vdev *monitor_vdev;
4258 #endif
4259 #if defined(WLAN_FEATURE_TSF_AUTO_REPORT) || defined(WLAN_CONFIG_TX_DELAY)
4260 /* Delta between TQM clock and TSF clock */
4261 uint32_t delta_tsf;
4262 #endif
4263 #ifdef WLAN_FEATURE_TSF_UPLINK_DELAY
4264 /* Indicate if uplink delay report is enabled or not */
4265 qdf_atomic_t ul_delay_report;
4266 /* accumulative delay for every TX completion */
4267 qdf_atomic_t ul_delay_accum;
4268 /* accumulative number of packets delay has accumulated */
4269 qdf_atomic_t ul_pkts_accum;
4270 #endif /* WLAN_FEATURE_TSF_UPLINK_DELAY */
4271
4272 /* vdev_stats_id - ID used for stats collection by FW from HW*/
4273 uint8_t vdev_stats_id;
4274 #ifdef HW_TX_DELAY_STATS_ENABLE
4275 /* hw tx delay stats enable */
4276 uint8_t hw_tx_delay_stats_enabled;
4277 #endif
4278 #ifdef DP_RX_UDP_OVER_PEER_ROAM
4279 uint32_t roaming_peer_status;
4280 union dp_align_mac_addr roaming_peer_mac;
4281 #endif
4282 #ifdef DP_TRAFFIC_END_INDICATION
4283 /* per vdev feature enable/disable status */
4284 bool traffic_end_ind_en;
4285 /* per vdev nbuf queue for traffic end indication packets */
4286 qdf_nbuf_queue_t end_ind_pkt_q;
4287 #endif
4288 #ifdef FEATURE_DIRECT_LINK
4289 /* Flag to indicate if to_fw should be set for tx pkts on this vdev */
4290 bool to_fw;
4291 #endif
4292 /* QDF VDEV operating mode */
4293 enum QDF_OPMODE qdf_opmode;
4294
4295 #ifdef WLAN_TX_PKT_CAPTURE_ENH
4296 /* TX capture feature to over ride return buffer manager */
4297 bool is_override_rbm_id;
4298 /* Return buffer manager ID */
4299 uint8_t rbm_id;
4300 #endif
4301
4302 #ifdef WLAN_FEATURE_TX_LATENCY_STATS
4303 /* configuration for tx latency stats */
4304 struct dp_tx_latency_config tx_latency_cfg;
4305 #endif
4306 };
4307
4308 enum {
4309 dp_sec_mcast = 0,
4310 dp_sec_ucast
4311 };
4312
4313 #ifdef WDS_VENDOR_EXTENSION
4314 typedef struct {
4315 uint8_t wds_tx_mcast_4addr:1,
4316 wds_tx_ucast_4addr:1,
4317 wds_rx_filter:1, /* enforce rx filter */
4318 wds_rx_ucast_4addr:1, /* when set, accept 4addr unicast frames */
4319 wds_rx_mcast_4addr:1; /* when set, accept 4addr multicast frames */
4320
4321 } dp_ecm_policy;
4322 #endif
4323
4324 /**
4325 * struct dp_peer_cached_bufq - cached_bufq to enqueue rx packets
4326 * @cached_bufq: nbuff list to enqueue rx packets
4327 * @bufq_lock: spinlock for nbuff list access
4328 * @thresh: maximum threshold for number of rx buff to enqueue
4329 * @entries: number of entries
4330 * @dropped: number of packets dropped
4331 */
4332 struct dp_peer_cached_bufq {
4333 qdf_list_t cached_bufq;
4334 qdf_spinlock_t bufq_lock;
4335 uint32_t thresh;
4336 uint32_t entries;
4337 uint32_t dropped;
4338 };
4339
4340 /**
4341 * enum dp_peer_ast_flowq
4342 * @DP_PEER_AST_FLOWQ_HI_PRIO: Hi Priority flow queue
4343 * @DP_PEER_AST_FLOWQ_LOW_PRIO: Low priority flow queue
4344 * @DP_PEER_AST_FLOWQ_UDP: flow queue type is UDP
4345 * @DP_PEER_AST_FLOWQ_NON_UDP: flow queue type is Non UDP
4346 * @DP_PEER_AST_FLOWQ_MAX: max value
4347 */
4348 enum dp_peer_ast_flowq {
4349 DP_PEER_AST_FLOWQ_HI_PRIO,
4350 DP_PEER_AST_FLOWQ_LOW_PRIO,
4351 DP_PEER_AST_FLOWQ_UDP,
4352 DP_PEER_AST_FLOWQ_NON_UDP,
4353 DP_PEER_AST_FLOWQ_MAX,
4354 };
4355
4356 /**
4357 * struct dp_ast_flow_override_info - ast override info
4358 * @ast_idx: ast indexes in peer map message
4359 * @ast_valid_mask: ast valid mask for each ast index
4360 * @ast_flow_mask: ast flow mask for each ast index
4361 * @tid_valid_low_pri_mask: per tid mask for low priority flow
4362 * @tid_valid_hi_pri_mask: per tid mask for hi priority flow
4363 */
4364 struct dp_ast_flow_override_info {
4365 uint16_t ast_idx[DP_PEER_AST_FLOWQ_MAX];
4366 uint8_t ast_valid_mask;
4367 uint8_t ast_flow_mask[DP_PEER_AST_FLOWQ_MAX];
4368 uint8_t tid_valid_low_pri_mask;
4369 uint8_t tid_valid_hi_pri_mask;
4370 };
4371
4372 #if defined(WLAN_FEATURE_11BE_MLO) && defined(DP_MLO_LINK_STATS_SUPPORT)
4373 /**
4374 * struct dp_peer_ext_evt_info - peer extended event info
4375 * @peer_id: peer_id from firmware
4376 * @vdev_id: vdev ID
4377 * @link_id: Link ID
4378 * @link_id_valid: link_id_valid
4379 * @peer_mac_addr: mac address of the peer
4380 */
4381 struct dp_peer_ext_evt_info {
4382 uint16_t peer_id;
4383 uint8_t vdev_id;
4384 uint8_t link_id;
4385 bool link_id_valid;
4386 uint8_t *peer_mac_addr;
4387 };
4388 #endif
4389
4390 /**
4391 * struct dp_peer_ast_params - ast parameters for a msdu flow-queue
4392 * @ast_idx: ast index populated by FW
4393 * @is_valid: ast flow valid mask
4394 * @valid_tid_mask: per tid mask for this ast index
4395 * @flowQ: flow queue id associated with this ast index
4396 */
4397 struct dp_peer_ast_params {
4398 uint16_t ast_idx;
4399 uint8_t is_valid;
4400 uint8_t valid_tid_mask;
4401 uint8_t flowQ;
4402 };
4403
4404 #define DP_MLO_FLOW_INFO_MAX 3
4405
4406 /**
4407 * struct dp_mlo_flow_override_info - Flow override info
4408 * @ast_idx: Primary TCL AST Index
4409 * @ast_idx_valid: Is AST index valid
4410 * @chip_id: CHIP ID
4411 * @tidmask: tidmask
4412 * @cache_set_num: Cache set number
4413 */
4414 struct dp_mlo_flow_override_info {
4415 uint16_t ast_idx;
4416 uint8_t ast_idx_valid;
4417 uint8_t chip_id;
4418 uint8_t tidmask;
4419 uint8_t cache_set_num;
4420 };
4421
4422 /**
4423 * struct dp_mlo_link_info - Link info
4424 * @peer_chip_id: Peer Chip ID
4425 * @vdev_id: Vdev ID
4426 */
4427 struct dp_mlo_link_info {
4428 uint8_t peer_chip_id;
4429 uint8_t vdev_id;
4430 };
4431
4432 #ifdef WLAN_SUPPORT_MSCS
4433 /*MSCS Procedure based macros */
4434 #define IEEE80211_MSCS_MAX_ELEM_SIZE 5
4435 #define IEEE80211_TCLAS_MASK_CLA_TYPE_4 4
4436 /**
4437 * struct dp_peer_mscs_parameter - MSCS database obtained from
4438 * MSCS Request and Response in the control path. This data is used
4439 * by the AP to find out what priority to set based on the tuple
4440 * classification during packet processing.
4441 * @user_priority_bitmap: User priority bitmap obtained during
4442 * handshake
4443 * @user_priority_limit: User priority limit obtained during
4444 * handshake
4445 * @classifier_mask: params to be compared during processing
4446 */
4447 struct dp_peer_mscs_parameter {
4448 uint8_t user_priority_bitmap;
4449 uint8_t user_priority_limit;
4450 uint8_t classifier_mask;
4451 };
4452 #endif
4453
4454 #ifdef QCA_SUPPORT_WDS_EXTENDED
4455 #define WDS_EXT_PEER_INIT_BIT 0
4456
4457 /**
4458 * struct dp_wds_ext_peer - wds ext peer structure
4459 * This is used when wds extended feature is enabled
4460 * both compile time and run time. It is created
4461 * when 1st 4 address frame is received from
4462 * wds backhaul.
4463 * @osif_peer: Handle to the OS shim SW's virtual device
4464 * @init: wds ext netdev state
4465 */
4466 struct dp_wds_ext_peer {
4467 ol_osif_peer_handle osif_peer;
4468 unsigned long init;
4469 };
4470 #endif /* QCA_SUPPORT_WDS_EXTENDED */
4471
4472 #ifdef WLAN_SUPPORT_MESH_LATENCY
4473 /*Advanced Mesh latency feature based macros */
4474
4475 /**
4476 * struct dp_peer_mesh_latency_parameter - Mesh latency related
4477 * parameters. This data is updated per peer per TID based on
4478 * the flow tuple classification in external rule database
4479 * during packet processing.
4480 * @service_interval_dl: Service interval associated with TID in DL
4481 * @burst_size_dl: Burst size additive over multiple flows in DL
4482 * @service_interval_ul: Service interval associated with TID in UL
4483 * @burst_size_ul: Burst size additive over multiple flows in UL
4484 * @ac: custom ac derived from service interval
4485 * @msduq: MSDU queue number within TID
4486 */
4487 struct dp_peer_mesh_latency_parameter {
4488 uint32_t service_interval_dl;
4489 uint32_t burst_size_dl;
4490 uint32_t service_interval_ul;
4491 uint32_t burst_size_ul;
4492 uint8_t ac;
4493 uint8_t msduq;
4494 };
4495 #endif
4496
4497 #ifdef WLAN_FEATURE_11BE_MLO
4498 /* Max number of links for MLO connection */
4499 #define DP_MAX_MLO_LINKS 4
4500
4501 /**
4502 * struct dp_peer_link_info - link peer information for MLO
4503 * @mac_addr: Mac address
4504 * @vdev_id: Vdev ID for current link peer
4505 * @is_valid: flag for link peer info valid or not
4506 * @chip_id: chip id
4507 * @is_bridge_peer: flag to indicate if peer is bridge peer
4508 */
4509 struct dp_peer_link_info {
4510 union dp_align_mac_addr mac_addr;
4511 uint8_t vdev_id;
4512 uint8_t is_valid;
4513 uint8_t chip_id;
4514 uint8_t is_bridge_peer;
4515 };
4516
4517 /**
4518 * struct dp_mld_link_peers - this structure is used to get link peers
4519 * pointer from mld peer
4520 * @link_peers: link peers pointer array
4521 * @num_links: number of link peers fetched
4522 */
4523 struct dp_mld_link_peers {
4524 struct dp_peer *link_peers[DP_MAX_MLO_LINKS];
4525 uint8_t num_links;
4526 };
4527 #else
4528 #define DP_MAX_MLO_LINKS 0
4529 #endif
4530
4531 typedef void *dp_txrx_ref_handle;
4532
4533 /**
4534 * struct dp_peer_per_pkt_tx_stats- Peer Tx stats updated in per pkt
4535 * Tx completion path
4536 * @ucast: Unicast Packet Count
4537 * @mcast: Multicast Packet Count
4538 * @bcast: Broadcast Packet Count
4539 * @nawds_mcast: NAWDS Multicast Packet Count
4540 * @tx_success: Successful Tx Packets
4541 * @nawds_mcast_drop: NAWDS Multicast Drop Count
4542 * @ofdma: Total Packets as ofdma
4543 * @non_amsdu_cnt: Number of MSDUs with no MSDU level aggregation
4544 * @amsdu_cnt: Number of MSDUs part of AMSDU
4545 * @dropped: Dropped packet statistics
4546 * @dropped.fw_rem: Discarded by firmware
4547 * @dropped.fw_rem_notx: firmware_discard_untransmitted
4548 * @dropped.fw_rem_tx: firmware_discard_transmitted
4549 * @dropped.age_out: aged out in mpdu/msdu queues
4550 * @dropped.fw_reason1: discarded by firmware reason 1
4551 * @dropped.fw_reason2: discarded by firmware reason 2
4552 * @dropped.fw_reason3: discarded by firmware reason 3
4553 * @dropped.fw_rem_no_match: dropped due to fw no match command
4554 * @dropped.drop_threshold: dropped due to HW threshold
4555 * @dropped.drop_link_desc_na: dropped due resource not available in HW
4556 * @dropped.invalid_drop: Invalid msdu drop
4557 * @dropped.mcast_vdev_drop: MCAST drop configured for VDEV in HW
4558 * @dropped.invalid_rr: Invalid TQM release reason
4559 * @failed_retry_count: packets failed due to retry above 802.11 retry limit
4560 * @retry_count: packets successfully send after one or more retry
4561 * @multiple_retry_count: packets successfully sent after more than one retry
4562 * @no_ack_count: no ack pkt count for different protocols
4563 * @tx_success_twt: Successful Tx Packets in TWT session
4564 * @last_tx_ts: last timestamp in jiffies when tx comp occurred
4565 * @avg_sojourn_msdu: Avg sojourn msdu stat
4566 * @protocol_trace_cnt: per-peer protocol counter
4567 * @release_src_not_tqm: Counter to keep track of release source is not TQM
4568 * in TX completion status processing
4569 * @inval_link_id_pkt_cnt: Counter to capture Invalid Link Id
4570 */
4571 struct dp_peer_per_pkt_tx_stats {
4572 struct cdp_pkt_info ucast;
4573 struct cdp_pkt_info mcast;
4574 struct cdp_pkt_info bcast;
4575 struct cdp_pkt_info nawds_mcast;
4576 struct cdp_pkt_info tx_success;
4577 uint32_t nawds_mcast_drop;
4578 uint32_t ofdma;
4579 uint32_t non_amsdu_cnt;
4580 uint32_t amsdu_cnt;
4581 struct {
4582 struct cdp_pkt_info fw_rem;
4583 uint32_t fw_rem_notx;
4584 uint32_t fw_rem_tx;
4585 uint32_t age_out;
4586 uint32_t fw_reason1;
4587 uint32_t fw_reason2;
4588 uint32_t fw_reason3;
4589 uint32_t fw_rem_queue_disable;
4590 uint32_t fw_rem_no_match;
4591 uint32_t drop_threshold;
4592 uint32_t drop_link_desc_na;
4593 uint32_t invalid_drop;
4594 uint32_t mcast_vdev_drop;
4595 uint32_t invalid_rr;
4596 } dropped;
4597 uint32_t failed_retry_count;
4598 uint32_t retry_count;
4599 uint32_t multiple_retry_count;
4600 uint32_t no_ack_count[QDF_PROTO_SUBTYPE_MAX];
4601 struct cdp_pkt_info tx_success_twt;
4602 unsigned long last_tx_ts;
4603 qdf_ewma_tx_lag avg_sojourn_msdu[CDP_DATA_TID_MAX];
4604 #ifdef VDEV_PEER_PROTOCOL_COUNT
4605 struct protocol_trace_count protocol_trace_cnt[CDP_TRACE_MAX];
4606 #endif
4607 uint32_t release_src_not_tqm;
4608 uint32_t inval_link_id_pkt_cnt;
4609 };
4610
4611 /**
4612 * struct dp_peer_extd_tx_stats - Peer Tx stats updated in either
4613 * per pkt Tx completion path when macro QCA_ENHANCED_STATS_SUPPORT is
4614 * disabled or in HTT Tx PPDU completion path when macro is enabled
4615 * @stbc: Packets in STBC
4616 * @ldpc: Packets in LDPC
4617 * @retries: Packet retries
4618 * @pkt_type: pkt count for different .11 modes
4619 * @wme_ac_type: Wireless Multimedia type Count
4620 * @excess_retries_per_ac: Wireless Multimedia type Count
4621 * @ampdu_cnt: completion of aggregation
4622 * @non_ampdu_cnt: tx completion not aggregated
4623 * @num_ppdu_cookie_valid: no. of valid ppdu cookies rcvd from FW
4624 * @tx_ppdus: ppdus in tx
4625 * @tx_mpdus_success: mpdus successful in tx
4626 * @tx_mpdus_tried: mpdus tried in tx
4627 * @tx_rate: Tx Rate in kbps
4628 * @last_tx_rate: Last tx rate for unicast packets
4629 * @last_tx_rate_mcs: Tx rate mcs for unicast packets
4630 * @mcast_last_tx_rate: Last tx rate for multicast packets
4631 * @mcast_last_tx_rate_mcs: Last tx rate mcs for multicast
4632 * @rnd_avg_tx_rate: Rounded average tx rate
4633 * @avg_tx_rate: Average TX rate
4634 * @tx_ratecode: Tx rate code of last frame
4635 * @pream_punct_cnt: Preamble Punctured count
4636 * @sgi_count: SGI count
4637 * @nss: Packet count for different num_spatial_stream values
4638 * @bw: Packet Count for different bandwidths
4639 * @ru_start: RU start index
4640 * @ru_tones: RU tones size
4641 * @ru_loc: pkt info for RU location 26/ 52/ 106/ 242/ 484 counter
4642 * @transmit_type: pkt info for tx transmit type
4643 * @mu_group_id: mumimo mu group id
4644 * @last_ack_rssi: RSSI of last acked packet
4645 * @nss_info: NSS 1,2, ...8
4646 * @mcs_info: MCS index
4647 * @bw_info: Bandwidth
4648 * <enum 0 bw_20_MHz>
4649 * <enum 1 bw_40_MHz>
4650 * <enum 2 bw_80_MHz>
4651 * <enum 3 bw_160_MHz>
4652 * @gi_info: <enum 0 0_8_us_sgi > Legacy normal GI
4653 * <enum 1 0_4_us_sgi > Legacy short GI
4654 * <enum 2 1_6_us_sgi > HE related GI
4655 * <enum 3 3_2_us_sgi > HE
4656 * @preamble_info: preamble
4657 * @tx_ucast_total: total ucast count
4658 * @tx_ucast_success: total ucast success count
4659 * @retries_mpdu: mpdu number of successfully transmitted after retries
4660 * @mpdu_success_with_retries: mpdu retry count in case of successful tx
4661 * @su_be_ppdu_cnt: SU Tx packet count for 11BE
4662 * @mu_be_ppdu_cnt: MU Tx packet count for 11BE
4663 * @punc_bw: MSDU count for punctured bw
4664 * @rts_success: RTS success count
4665 * @rts_failure: RTS failure count
4666 * @bar_cnt: Block ACK Request frame count
4667 * @ndpa_cnt: NDP announcement frame count
4668 * @rssi_chain: rssi chain
4669 * @wme_ac_type_bytes: Wireless Multimedia bytes Count
4670 */
4671 struct dp_peer_extd_tx_stats {
4672 uint32_t stbc;
4673 uint32_t ldpc;
4674 uint32_t retries;
4675 struct cdp_pkt_type pkt_type[DOT11_MAX];
4676 uint32_t wme_ac_type[WME_AC_MAX];
4677 uint32_t excess_retries_per_ac[WME_AC_MAX];
4678 uint32_t ampdu_cnt;
4679 uint32_t non_ampdu_cnt;
4680 uint32_t num_ppdu_cookie_valid;
4681 uint32_t tx_ppdus;
4682 uint32_t tx_mpdus_success;
4683 uint32_t tx_mpdus_tried;
4684
4685 uint32_t tx_rate;
4686 uint32_t last_tx_rate;
4687 uint32_t last_tx_rate_mcs;
4688 uint32_t mcast_last_tx_rate;
4689 uint32_t mcast_last_tx_rate_mcs;
4690 uint64_t rnd_avg_tx_rate;
4691 uint64_t avg_tx_rate;
4692 uint16_t tx_ratecode;
4693
4694 uint32_t sgi_count[MAX_GI];
4695 uint32_t pream_punct_cnt;
4696 uint32_t nss[SS_COUNT];
4697 uint32_t bw[MAX_BW];
4698 uint32_t ru_start;
4699 uint32_t ru_tones;
4700 struct cdp_tx_pkt_info ru_loc[MAX_RU_LOCATIONS];
4701
4702 struct cdp_tx_pkt_info transmit_type[MAX_TRANSMIT_TYPES];
4703 uint32_t mu_group_id[MAX_MU_GROUP_ID];
4704
4705 uint32_t last_ack_rssi;
4706
4707 uint32_t nss_info:4,
4708 mcs_info:4,
4709 bw_info:4,
4710 gi_info:4,
4711 preamble_info:4;
4712
4713 uint32_t retries_mpdu;
4714 uint32_t mpdu_success_with_retries;
4715 struct cdp_pkt_info tx_ucast_total;
4716 struct cdp_pkt_info tx_ucast_success;
4717 #ifdef WLAN_FEATURE_11BE
4718 struct cdp_pkt_type su_be_ppdu_cnt;
4719 struct cdp_pkt_type mu_be_ppdu_cnt[TXRX_TYPE_MU_MAX];
4720 uint32_t punc_bw[MAX_PUNCTURED_MODE];
4721 #endif
4722 uint32_t rts_success;
4723 uint32_t rts_failure;
4724 uint32_t bar_cnt;
4725 uint32_t ndpa_cnt;
4726 int32_t rssi_chain[CDP_RSSI_CHAIN_LEN];
4727 uint64_t wme_ac_type_bytes[WME_AC_MAX];
4728 };
4729
4730 /**
4731 * struct dp_peer_per_pkt_rx_stats - Peer Rx stats updated in per pkt Rx path
4732 * @rcvd_reo: Packets received on the reo ring
4733 * @rx_lmac: Packets received on each lmac
4734 * @unicast: Total unicast packets
4735 * @multicast: Total multicast packets
4736 * @bcast: Broadcast Packet Count
4737 * @raw: Raw Pakets received
4738 * @nawds_mcast_drop: Total NAWDS multicast packets dropped
4739 * @mec_drop: Total MEC packets dropped
4740 * @ppeds_drop: Total DS packets dropped
4741 * @last_rx_ts: last timestamp in jiffies when RX happened
4742 * @intra_bss: Intra BSS statistics
4743 * @intra_bss.pkts: Intra BSS packets received
4744 * @intra_bss.fail: Intra BSS packets failed
4745 * @intra_bss.mdns_no_fws: Intra BSS MDNS packets not forwarded
4746 * @err: error counters
4747 * @err.mic_err: Rx MIC errors CCMP
4748 * @err.decrypt_err: Rx Decryption Errors CRC
4749 * @err.fcserr: rx MIC check failed (CCMP)
4750 * @err.pn_err: pn check failed
4751 * @err.oor_err: Rx OOR errors
4752 * @err.jump_2k_err: 2k jump errors
4753 * @err.rxdma_wifi_parse_err: rxdma wifi parse errors
4754 * @non_amsdu_cnt: Number of MSDUs with no MSDU level aggregation
4755 * @amsdu_cnt: Number of MSDUs part of AMSDU
4756 * @rx_retries: retries of packet in rx
4757 * @multipass_rx_pkt_drop: Dropped multipass rx pkt
4758 * @peer_unauth_rx_pkt_drop: Unauth rx packet drops
4759 * @policy_check_drop: policy check drops
4760 * @to_stack_twt: Total packets sent up the stack in TWT session
4761 * @rx_success: Total RX success count
4762 * @protocol_trace_cnt: per-peer protocol counters
4763 * @mcast_3addr_drop:
4764 * @rx_total: total rx count
4765 * @inval_link_id_pkt_cnt: Counter to capture Invalid Link Id
4766 */
4767 struct dp_peer_per_pkt_rx_stats {
4768 struct cdp_pkt_info rcvd_reo[CDP_MAX_RX_RINGS];
4769 struct cdp_pkt_info rx_lmac[CDP_MAX_LMACS];
4770 struct cdp_pkt_info unicast;
4771 struct cdp_pkt_info multicast;
4772 struct cdp_pkt_info bcast;
4773 struct cdp_pkt_info raw;
4774 uint32_t nawds_mcast_drop;
4775 struct cdp_pkt_info mec_drop;
4776 struct cdp_pkt_info ppeds_drop;
4777 unsigned long last_rx_ts;
4778 struct {
4779 struct cdp_pkt_info pkts;
4780 struct cdp_pkt_info fail;
4781 uint32_t mdns_no_fwd;
4782 } intra_bss;
4783 struct {
4784 uint32_t mic_err;
4785 uint32_t decrypt_err;
4786 uint32_t fcserr;
4787 uint32_t pn_err;
4788 uint32_t oor_err;
4789 uint32_t jump_2k_err;
4790 uint32_t rxdma_wifi_parse_err;
4791 } err;
4792 uint32_t non_amsdu_cnt;
4793 uint32_t amsdu_cnt;
4794 uint32_t rx_retries;
4795 uint32_t multipass_rx_pkt_drop;
4796 uint32_t peer_unauth_rx_pkt_drop;
4797 uint32_t policy_check_drop;
4798 struct cdp_pkt_info to_stack_twt;
4799 struct cdp_pkt_info rx_success;
4800 #ifdef VDEV_PEER_PROTOCOL_COUNT
4801 struct protocol_trace_count protocol_trace_cnt[CDP_TRACE_MAX];
4802 #endif
4803 uint32_t mcast_3addr_drop;
4804 #ifdef IPA_OFFLOAD
4805 struct cdp_pkt_info rx_total;
4806 #endif
4807 uint32_t inval_link_id_pkt_cnt;
4808 };
4809
4810 /**
4811 * struct dp_peer_extd_rx_stats - Peer Rx stats updated in either
4812 * per pkt Rx path when macro QCA_ENHANCED_STATS_SUPPORT is disabled or in
4813 * Rx monitor patch when macro is enabled
4814 * @pkt_type: pkt counter for different .11 modes
4815 * @wme_ac_type: Wireless Multimedia type Count
4816 * @mpdu_cnt_fcs_ok: SU Rx success mpdu count
4817 * @mpdu_cnt_fcs_err: SU Rx fail mpdu count
4818 * @non_ampdu_cnt: Number of MSDUs with no MPDU level aggregation
4819 * @ampdu_cnt: Number of MSDUs part of AMSPU
4820 * @rx_mpdus: mpdu in rx
4821 * @rx_ppdus: ppdu in rx
4822 * @su_ax_ppdu_cnt: SU Rx packet count for .11ax
4823 * @rx_mu: Rx MU stats
4824 * @reception_type: Reception type of packets
4825 * @ppdu_cnt: PPDU packet count in reception type
4826 * @sgi_count: sgi count
4827 * @nss: packet count in spatiel Streams
4828 * @ppdu_nss: PPDU packet count in spatial streams
4829 * @bw: Packet Count in different bandwidths
4830 * @rx_mpdu_cnt: rx mpdu count per MCS rate
4831 * @rx_rate: Rx rate
4832 * @last_rx_rate: Previous rx rate
4833 * @rnd_avg_rx_rate: Rounded average rx rate
4834 * @avg_rx_rate: Average Rx rate
4835 * @rx_ratecode: Rx rate code of last frame
4836 * @avg_snr: Average snr
4837 * @rx_snr_measured_time: Time at which snr is measured
4838 * @snr: SNR of received signal
4839 * @last_snr: Previous snr
4840 * @nss_info: NSS 1,2, ...8
4841 * @mcs_info: MCS index
4842 * @bw_info: Bandwidth
4843 * <enum 0 bw_20_MHz>
4844 * <enum 1 bw_40_MHz>
4845 * <enum 2 bw_80_MHz>
4846 * <enum 3 bw_160_MHz>
4847 * @gi_info: <enum 0 0_8_us_sgi > Legacy normal GI
4848 * <enum 1 0_4_us_sgi > Legacy short GI
4849 * <enum 2 1_6_us_sgi > HE related GI
4850 * <enum 3 3_2_us_sgi > HE
4851 * @preamble_info: preamble
4852 * @mpdu_retry_cnt: retries of mpdu in rx
4853 * @su_be_ppdu_cnt: SU Rx packet count for BE
4854 * @mu_be_ppdu_cnt: MU rx packet count for BE
4855 * @punc_bw: MSDU count for punctured bw
4856 * @bar_cnt: Block ACK Request frame count
4857 * @ndpa_cnt: NDP announcement frame count
4858 * @wme_ac_type_bytes: Wireless Multimedia type Bytes Count
4859 */
4860 struct dp_peer_extd_rx_stats {
4861 struct cdp_pkt_type pkt_type[DOT11_MAX];
4862 uint32_t wme_ac_type[WME_AC_MAX];
4863 uint32_t mpdu_cnt_fcs_ok;
4864 uint32_t mpdu_cnt_fcs_err;
4865 uint32_t non_ampdu_cnt;
4866 uint32_t ampdu_cnt;
4867 uint32_t rx_mpdus;
4868 uint32_t rx_ppdus;
4869
4870 struct cdp_pkt_type su_ax_ppdu_cnt;
4871 struct cdp_rx_mu rx_mu[TXRX_TYPE_MU_MAX];
4872 uint32_t reception_type[MAX_RECEPTION_TYPES];
4873 uint32_t ppdu_cnt[MAX_RECEPTION_TYPES];
4874
4875 uint32_t sgi_count[MAX_GI];
4876 uint32_t nss[SS_COUNT];
4877 uint32_t ppdu_nss[SS_COUNT];
4878 uint32_t bw[MAX_BW];
4879 uint32_t rx_mpdu_cnt[MAX_MCS];
4880
4881 uint32_t rx_rate;
4882 uint32_t last_rx_rate;
4883 uint32_t rnd_avg_rx_rate;
4884 uint32_t avg_rx_rate;
4885 uint32_t rx_ratecode;
4886
4887 uint32_t avg_snr;
4888 unsigned long rx_snr_measured_time;
4889 uint8_t snr;
4890 uint8_t last_snr;
4891
4892 uint32_t nss_info:4,
4893 mcs_info:4,
4894 bw_info:4,
4895 gi_info:4,
4896 preamble_info:4;
4897
4898 uint32_t mpdu_retry_cnt;
4899 #ifdef WLAN_FEATURE_11BE
4900 struct cdp_pkt_type su_be_ppdu_cnt;
4901 struct cdp_pkt_type mu_be_ppdu_cnt[TXRX_TYPE_MU_MAX];
4902 uint32_t punc_bw[MAX_PUNCTURED_MODE];
4903 #endif
4904 uint32_t bar_cnt;
4905 uint32_t ndpa_cnt;
4906 uint64_t wme_ac_type_bytes[WME_AC_MAX];
4907 };
4908
4909 /**
4910 * struct dp_peer_per_pkt_stats - Per pkt stats for peer
4911 * @tx: Per pkt Tx stats
4912 * @rx: Per pkt Rx stats
4913 */
4914 struct dp_peer_per_pkt_stats {
4915 struct dp_peer_per_pkt_tx_stats tx;
4916 struct dp_peer_per_pkt_rx_stats rx;
4917 };
4918
4919 /**
4920 * struct dp_peer_extd_stats - Stats from extended path for peer
4921 * @tx: Extended path tx stats
4922 * @rx: Extended path rx stats
4923 */
4924 struct dp_peer_extd_stats {
4925 struct dp_peer_extd_tx_stats tx;
4926 struct dp_peer_extd_rx_stats rx;
4927 };
4928
4929 /**
4930 * struct dp_peer_stats - Peer stats
4931 * @per_pkt_stats: Per packet path stats
4932 * @extd_stats: Extended path stats
4933 * @tx_latency: transmit latency stats
4934 */
4935 struct dp_peer_stats {
4936 struct dp_peer_per_pkt_stats per_pkt_stats;
4937 #ifndef QCA_ENHANCED_STATS_SUPPORT
4938 struct dp_peer_extd_stats extd_stats;
4939 #endif
4940 #ifdef WLAN_FEATURE_TX_LATENCY_STATS
4941 struct dp_tx_latency tx_latency;
4942 #endif
4943 };
4944
4945 /**
4946 * struct dp_local_link_id_peer_map - Mapping table entry for link peer mac
4947 * address to local_link_id
4948 * @in_use: set if this entry is having valid mapping between local_link_id
4949 * and the link peer mac address.
4950 * @local_link_id: local_link_id assigned to the link peer
4951 * @mac_addr: link peer mac address
4952 */
4953 struct dp_local_link_id_peer_map {
4954 uint8_t in_use;
4955 uint8_t local_link_id;
4956 union dp_align_mac_addr mac_addr;
4957 };
4958
4959 /**
4960 * struct dp_txrx_peer: DP txrx_peer structure used in per pkt path
4961 * @vdev: VDEV to which this peer is associated
4962 * @peer_id: peer ID for this peer
4963 * @authorize: Set when authorized
4964 * @in_twt: in TWT session
4965 * @hw_txrx_stats_en: Indicate HW offload vdev stats
4966 * @is_mld_peer:1: MLD peer
4967 * @tx_failed: Total Tx failure
4968 * @comp_pkt: Pkt Info for which completions were received
4969 * @to_stack: Total packets sent up the stack
4970 * @delay_stats: Peer delay stats
4971 * @jitter_stats: Peer jitter stats
4972 * @security: Security credentials
4973 * @nawds_enabled: NAWDS flag
4974 * @bss_peer: set for bss peer
4975 * @isolation: enable peer isolation for this peer
4976 * @wds_enabled: WDS peer
4977 * @wds_ecm:
4978 * @flush_in_progress:
4979 * @bufq_info:
4980 * @mpass_peer_list_elem: node in the special peer list element
4981 * @vlan_id: vlan id for key
4982 * @wds_ext:
4983 * @osif_rx:
4984 * @rx_tid:
4985 * @sawf_stats:
4986 * @bw: bandwidth of peer connection
4987 * @mpdu_retry_threshold: MPDU retry threshold to increment tx bad count
4988 * @band: Link ID to band mapping
4989 * @ll_id_peer_map: Mapping table for link peer mac address to local_link_id
4990 * @ll_band: Local link id band mapping
4991 * @stats_arr_size: peer stats array size
4992 * @stats: Peer link and mld statistics
4993 */
4994 struct dp_txrx_peer {
4995 struct dp_vdev *vdev;
4996 uint16_t peer_id;
4997 uint8_t authorize:1,
4998 in_twt:1,
4999 hw_txrx_stats_en:1,
5000 is_mld_peer:1;
5001 uint32_t tx_failed;
5002 struct cdp_pkt_info comp_pkt;
5003 struct cdp_pkt_info to_stack;
5004
5005 struct dp_peer_delay_stats *delay_stats;
5006
5007 struct cdp_peer_tid_stats *jitter_stats;
5008
5009 struct {
5010 enum cdp_sec_type sec_type;
5011 u_int32_t michael_key[2]; /* relevant for TKIP */
5012 } security[2]; /* 0 -> multicast, 1 -> unicast */
5013
5014 uint16_t nawds_enabled:1,
5015 bss_peer:1,
5016 isolation:1,
5017 wds_enabled:1;
5018 #ifdef WDS_VENDOR_EXTENSION
5019 dp_ecm_policy wds_ecm;
5020 #endif
5021 #ifdef PEER_CACHE_RX_PKTS
5022 qdf_atomic_t flush_in_progress;
5023 struct dp_peer_cached_bufq bufq_info;
5024 #endif
5025 #ifdef QCA_MULTIPASS_SUPPORT
5026 TAILQ_ENTRY(dp_txrx_peer) mpass_peer_list_elem;
5027 uint16_t vlan_id;
5028 #endif
5029 #ifdef QCA_SUPPORT_WDS_EXTENDED
5030 struct dp_wds_ext_peer wds_ext;
5031 ol_txrx_rx_fp osif_rx;
5032 #endif
5033 struct dp_rx_tid_defrag rx_tid[DP_MAX_TIDS];
5034 #ifdef CONFIG_SAWF
5035 struct dp_peer_sawf_stats *sawf_stats;
5036 #endif
5037 #ifdef DP_PEER_EXTENDED_API
5038 enum cdp_peer_bw bw;
5039 uint8_t mpdu_retry_threshold;
5040 #endif
5041 #if defined WLAN_FEATURE_11BE_MLO && defined DP_MLO_LINK_STATS_SUPPORT
5042 /* Link ID to band mapping, (1 MLD + DP_MAX_MLO_LINKS) */
5043 uint8_t band[DP_MAX_MLO_LINKS + 1];
5044
5045 struct dp_local_link_id_peer_map ll_id_peer_map[DP_MAX_MLO_LINKS];
5046 uint8_t ll_band[DP_MAX_MLO_LINKS + 1];
5047 #endif
5048 uint8_t stats_arr_size;
5049
5050 /* dp_peer_stats should be the last member in the structure */
5051 struct dp_peer_stats stats[];
5052 };
5053
5054 /* Peer structure for data path state */
5055 struct dp_peer {
5056 struct dp_txrx_peer *txrx_peer;
5057 #ifdef WIFI_MONITOR_SUPPORT
5058 struct dp_mon_peer *monitor_peer;
5059 #endif
5060 /* peer ID for this peer */
5061 uint16_t peer_id;
5062
5063 /* VDEV to which this peer is associated */
5064 struct dp_vdev *vdev;
5065
5066 struct dp_ast_entry *self_ast_entry;
5067
5068 qdf_atomic_t ref_cnt;
5069
5070 union dp_align_mac_addr mac_addr;
5071
5072 /* node in the vdev's list of peers */
5073 TAILQ_ENTRY(dp_peer) peer_list_elem;
5074 /* node in the hash table bin's list of peers */
5075 TAILQ_ENTRY(dp_peer) hash_list_elem;
5076
5077 /* TID structures pointer */
5078 struct dp_rx_tid *rx_tid;
5079
5080 /* TBD: No transmit TID state required? */
5081
5082 struct {
5083 enum cdp_sec_type sec_type;
5084 u_int32_t michael_key[2]; /* relevant for TKIP */
5085 } security[2]; /* 0 -> multicast, 1 -> unicast */
5086
5087 /* NAWDS Flag and Bss Peer bit */
5088 uint16_t bss_peer:1, /* set for bss peer */
5089 authorize:1, /* Set when authorized */
5090 valid:1, /* valid bit */
5091 delete_in_progress:1, /* Indicate kickout sent */
5092 sta_self_peer:1, /* Indicate STA self peer */
5093 is_tdls_peer:1; /* Indicate TDLS peer */
5094
5095 /* MCL specific peer local id */
5096 uint16_t local_id;
5097 enum ol_txrx_peer_state state;
5098
5099 #ifdef WLAN_FEATURE_11BE_MLO
5100 uint8_t first_link:1, /* first link peer for MLO */
5101 primary_link:1; /* primary link for MLO */
5102 #endif
5103
5104 qdf_spinlock_t peer_info_lock;
5105
5106 /* Peer calibrated stats */
5107 struct cdp_calibr_stats stats;
5108
5109 TAILQ_HEAD(, dp_ast_entry) ast_entry_list;
5110 /* TBD */
5111
5112 /* Active Block ack sessions */
5113 uint16_t active_ba_session_cnt;
5114
5115 /* Current HW buffersize setting */
5116 uint16_t hw_buffer_size;
5117
5118 /*
5119 * Flag to check if sessions with 256 buffersize
5120 * should be terminated.
5121 */
5122 uint8_t kill_256_sessions;
5123 qdf_atomic_t is_default_route_set;
5124
5125 #ifdef QCA_PEER_MULTIQ_SUPPORT
5126 struct dp_peer_ast_params peer_ast_flowq_idx[DP_PEER_AST_FLOWQ_MAX];
5127 #endif
5128 /* entry to inactive_list*/
5129 TAILQ_ENTRY(dp_peer) inactive_list_elem;
5130
5131 qdf_atomic_t mod_refs[DP_MOD_ID_MAX];
5132
5133 uint8_t peer_state;
5134 qdf_spinlock_t peer_state_lock;
5135 #ifdef WLAN_SUPPORT_MSCS
5136 struct dp_peer_mscs_parameter mscs_ipv4_parameter, mscs_ipv6_parameter;
5137 bool mscs_active;
5138 #endif
5139 #ifdef WLAN_SUPPORT_MESH_LATENCY
5140 struct dp_peer_mesh_latency_parameter mesh_latency_params[DP_MAX_TIDS];
5141 #endif
5142 #ifdef WLAN_FEATURE_11BE_MLO
5143 /* peer type */
5144 enum cdp_peer_type peer_type;
5145 /*---------for link peer---------*/
5146 struct dp_peer *mld_peer;
5147
5148 /*Link ID of link peer*/
5149 uint8_t link_id;
5150 bool link_id_valid;
5151 uint8_t local_link_id;
5152
5153 /*---------for mld peer----------*/
5154 struct dp_peer_link_info link_peers[DP_MAX_MLO_LINKS];
5155 uint8_t num_links;
5156 DP_MUTEX_TYPE link_peers_info_lock;
5157 #ifdef WLAN_FEATURE_11BE_MLO_3_LINK_TX
5158 uint32_t flow_cnt[CDP_DATA_TID_MAX];
5159 #endif
5160 #endif
5161 #ifdef CONFIG_SAWF_DEF_QUEUES
5162 struct dp_peer_sawf *sawf;
5163 #endif
5164 /* AST hash index for peer in HW */
5165 uint16_t ast_idx;
5166
5167 /* AST hash value for peer in HW */
5168 uint16_t ast_hash;
5169
5170 /* Peer Frequency */
5171 uint32_t freq;
5172 };
5173
5174 /**
5175 * struct dp_invalid_peer_msg - Invalid peer message
5176 * @nbuf: data buffer
5177 * @wh: 802.11 header
5178 * @vdev_id: id of vdev
5179 */
5180 struct dp_invalid_peer_msg {
5181 qdf_nbuf_t nbuf;
5182 struct ieee80211_frame *wh;
5183 uint8_t vdev_id;
5184 };
5185
5186 /**
5187 * struct dp_tx_me_buf_t - ME buffer
5188 * @next: pointer to next buffer
5189 * @data: Destination Mac address
5190 * @paddr_macbuf: physical address for dest_mac
5191 */
5192 struct dp_tx_me_buf_t {
5193 /* Note: ME buf pool initialization logic expects next pointer to
5194 * be the first element. Dont add anything before next */
5195 struct dp_tx_me_buf_t *next;
5196 uint8_t data[QDF_MAC_ADDR_SIZE];
5197 qdf_dma_addr_t paddr_macbuf;
5198 };
5199
5200 #if defined(WLAN_SUPPORT_RX_FLOW_TAG) || defined(WLAN_SUPPORT_RX_FISA)
5201 struct hal_rx_fst;
5202 #endif /* WLAN_SUPPORT_RX_FLOW_TAG || WLAN_SUPPORT_RX_FISA */
5203
5204 #ifdef WLAN_SUPPORT_RX_FLOW_TAG
5205 struct dp_rx_fse {
5206 /* HAL Rx Flow Search Entry which matches HW definition */
5207 void *hal_rx_fse;
5208 /* Toeplitz hash value */
5209 uint32_t flow_hash;
5210 /* Flow index, equivalent to hash value truncated to FST size */
5211 uint32_t flow_id;
5212 /* Stats tracking for this flow */
5213 struct cdp_flow_stats stats;
5214 /* Flag indicating whether flow is IPv4 address tuple */
5215 uint8_t is_ipv4_addr_entry;
5216 /* Flag indicating whether flow is valid */
5217 uint8_t is_valid;
5218 };
5219
5220 struct dp_rx_fst {
5221 /* Software (DP) FST */
5222 uint8_t *base;
5223 /* Pointer to HAL FST */
5224 struct hal_rx_fst *hal_rx_fst;
5225 /* Base physical address of HAL RX HW FST */
5226 uint64_t hal_rx_fst_base_paddr;
5227 /* Maximum number of flows FSE supports */
5228 uint16_t max_entries;
5229 /* Num entries in flow table */
5230 uint16_t num_entries;
5231 /* SKID Length */
5232 uint16_t max_skid_length;
5233 /* Hash mask to obtain legitimate hash entry */
5234 uint32_t hash_mask;
5235 /* Timer for bundling of flows */
5236 qdf_timer_t cache_invalidate_timer;
5237 /**
5238 * Flag which tracks whether cache update
5239 * is needed on timer expiry
5240 */
5241 qdf_atomic_t is_cache_update_pending;
5242 /* Flag to indicate completion of FSE setup in HW/FW */
5243 bool fse_setup_done;
5244 /* Last ring id used to add a flow */
5245 uint8_t ring_id;
5246 };
5247
5248 #define DP_RX_GET_SW_FT_ENTRY_SIZE sizeof(struct dp_rx_fse)
5249
5250 #endif /* WLAN_SUPPORT_RX_FLOW_TAG */
5251
5252 #ifdef WLAN_FEATURE_STATS_EXT
5253 /**
5254 * struct dp_req_rx_hw_stats_t - RX peer HW stats query structure
5255 * @pending_tid_stats_cnt: pending tid stats count which waits for REO status
5256 * @is_query_timeout: flag to show is stats query timeout
5257 */
5258 struct dp_req_rx_hw_stats_t {
5259 qdf_atomic_t pending_tid_stats_cnt;
5260 bool is_query_timeout;
5261 };
5262 #endif
5263 /* soc level structure to declare arch specific ops for DP */
5264
5265 #ifndef WLAN_SOFTUMAC_SUPPORT
5266 /**
5267 * dp_hw_link_desc_pool_banks_free() - Free h/w link desc pool banks
5268 * @soc: DP SOC handle
5269 * @mac_id: mac id
5270 *
5271 * Return: none
5272 */
5273 void dp_hw_link_desc_pool_banks_free(struct dp_soc *soc, uint32_t mac_id);
5274
5275 /**
5276 * dp_hw_link_desc_pool_banks_alloc() - Allocate h/w link desc pool banks
5277 * @soc: DP SOC handle
5278 * @mac_id: mac id
5279 *
5280 * Allocates memory pages for link descriptors, the page size is 4K for
5281 * MCL and 2MB for WIN. if the mac_id is invalid link descriptor pages are
5282 * allocated for regular RX/TX and if the there is a proper mac_id link
5283 * descriptors are allocated for RX monitor mode.
5284 *
5285 * Return: QDF_STATUS_SUCCESS: Success
5286 * QDF_STATUS_E_FAILURE: Failure
5287 */
5288 QDF_STATUS dp_hw_link_desc_pool_banks_alloc(struct dp_soc *soc,
5289 uint32_t mac_id);
5290 #else
dp_hw_link_desc_pool_banks_free(struct dp_soc * soc,uint32_t mac_id)5291 static inline void dp_hw_link_desc_pool_banks_free(struct dp_soc *soc,
5292 uint32_t mac_id)
5293 {
5294 }
5295
dp_hw_link_desc_pool_banks_alloc(struct dp_soc * soc,uint32_t mac_id)5296 static inline QDF_STATUS dp_hw_link_desc_pool_banks_alloc(struct dp_soc *soc,
5297 uint32_t mac_id)
5298 {
5299 return QDF_STATUS_SUCCESS;
5300 }
5301 #endif
5302
5303 /**
5304 * dp_link_desc_ring_replenish() - Replenish hw link desc rings
5305 * @soc: DP SOC handle
5306 * @mac_id: mac id
5307 *
5308 * Return: None
5309 */
5310 void dp_link_desc_ring_replenish(struct dp_soc *soc, uint32_t mac_id);
5311
5312 #ifdef WLAN_FEATURE_RX_PREALLOC_BUFFER_POOL
5313 void dp_rx_refill_buff_pool_enqueue(struct dp_soc *soc);
5314 #else
dp_rx_refill_buff_pool_enqueue(struct dp_soc * soc)5315 static inline void dp_rx_refill_buff_pool_enqueue(struct dp_soc *soc) {}
5316 #endif
5317
5318 /**
5319 * dp_srng_alloc() - Allocate memory for SRNG
5320 * @soc : Data path soc handle
5321 * @srng : SRNG pointer
5322 * @ring_type : Ring Type
5323 * @num_entries: Number of entries
5324 * @cached: cached flag variable
5325 *
5326 * Return: QDF_STATUS
5327 */
5328 QDF_STATUS dp_srng_alloc(struct dp_soc *soc, struct dp_srng *srng,
5329 int ring_type, uint32_t num_entries,
5330 bool cached);
5331
5332 /**
5333 * dp_srng_free() - Free SRNG memory
5334 * @soc: Data path soc handle
5335 * @srng: SRNG pointer
5336 *
5337 * Return: None
5338 */
5339 void dp_srng_free(struct dp_soc *soc, struct dp_srng *srng);
5340
5341 /**
5342 * dp_srng_init() - Initialize SRNG
5343 * @soc : Data path soc handle
5344 * @srng : SRNG pointer
5345 * @ring_type : Ring Type
5346 * @ring_num: Ring number
5347 * @mac_id: mac_id
5348 *
5349 * Return: QDF_STATUS
5350 */
5351 QDF_STATUS dp_srng_init(struct dp_soc *soc, struct dp_srng *srng,
5352 int ring_type, int ring_num, int mac_id);
5353
5354 /**
5355 * dp_srng_init_idx() - Initialize SRNG
5356 * @soc : Data path soc handle
5357 * @srng : SRNG pointer
5358 * @ring_type : Ring Type
5359 * @ring_num: Ring number
5360 * @mac_id: mac_id
5361 * @idx: ring index
5362 *
5363 * Return: QDF_STATUS
5364 */
5365 QDF_STATUS dp_srng_init_idx(struct dp_soc *soc, struct dp_srng *srng,
5366 int ring_type, int ring_num, int mac_id,
5367 uint32_t idx);
5368
5369 /**
5370 * dp_srng_deinit() - Internal function to deinit SRNG rings used by data path
5371 * @soc: DP SOC handle
5372 * @srng: source ring structure
5373 * @ring_type: type of ring
5374 * @ring_num: ring number
5375 *
5376 * Return: None
5377 */
5378 void dp_srng_deinit(struct dp_soc *soc, struct dp_srng *srng,
5379 int ring_type, int ring_num);
5380
5381 void dp_print_peer_txrx_stats_be(struct cdp_peer_stats *peer_stats,
5382 enum peer_stats_type stats_type);
5383 void dp_print_peer_txrx_stats_li(struct cdp_peer_stats *peer_stats,
5384 enum peer_stats_type stats_type);
5385
5386 void dp_print_peer_txrx_stats_rh(struct cdp_peer_stats *peer_stats,
5387 enum peer_stats_type stats_type);
5388
5389 /**
5390 * dp_should_timer_irq_yield() - Decide if the bottom half should yield
5391 * @soc: DP soc handle
5392 * @work_done: work done in softirq context
5393 * @start_time: start time for the softirq
5394 *
5395 * Return: enum with yield code
5396 */
5397 enum timer_yield_status
5398 dp_should_timer_irq_yield(struct dp_soc *soc, uint32_t work_done,
5399 uint64_t start_time);
5400
5401 /**
5402 * dp_vdev_get_default_reo_hash() - get reo dest ring and hash values for a vdev
5403 * @vdev: Datapath VDEV handle
5404 * @reo_dest: pointer to default reo_dest ring for vdev to be populated
5405 * @hash_based: pointer to hash value (enabled/disabled) to be populated
5406 *
5407 * Return: None
5408 */
5409 void dp_vdev_get_default_reo_hash(struct dp_vdev *vdev,
5410 enum cdp_host_reo_dest_ring *reo_dest,
5411 bool *hash_based);
5412
5413 /**
5414 * dp_reo_remap_config() - configure reo remap register value based
5415 * nss configuration.
5416 * @soc: DP soc handle
5417 * @remap0: output parameter indicates reo remap 0 register value
5418 * @remap1: output parameter indicates reo remap 1 register value
5419 * @remap2: output parameter indicates reo remap 2 register value
5420 *
5421 * based on offload_radio value below remap configuration
5422 * get applied.
5423 * 0 - both Radios handled by host (remap rings 1, 2, 3 & 4)
5424 * 1 - 1st Radio handled by NSS (remap rings 2, 3 & 4)
5425 * 2 - 2nd Radio handled by NSS (remap rings 1, 2 & 4)
5426 * 3 - both Radios handled by NSS (remap not required)
5427 * 4 - IPA OFFLOAD enabled (remap rings 1,2 & 3)
5428 *
5429 * Return: bool type, true if remap is configured else false.
5430 */
5431
5432 bool dp_reo_remap_config(struct dp_soc *soc, uint32_t *remap0,
5433 uint32_t *remap1, uint32_t *remap2);
5434
5435 #ifdef QCA_DP_TX_HW_SW_NBUF_DESC_PREFETCH
5436 /**
5437 * dp_tx_comp_get_prefetched_params_from_hal_desc() - Get prefetched TX desc
5438 * @soc: DP soc handle
5439 * @tx_comp_hal_desc: HAL TX Comp Descriptor
5440 * @r_tx_desc: SW Tx Descriptor retrieved from HAL desc.
5441 *
5442 * Return: None
5443 */
5444 void dp_tx_comp_get_prefetched_params_from_hal_desc(
5445 struct dp_soc *soc,
5446 void *tx_comp_hal_desc,
5447 struct dp_tx_desc_s **r_tx_desc);
5448 #endif
5449 #endif /* _DP_TYPES_H_ */
5450