xref: /wlan-driver/qca-wifi-host-cmn/dp/wifi3.0/dp_tx.h (revision 5113495b16420b49004c444715d2daae2066e7dc)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 #ifndef __DP_TX_H
20 #define __DP_TX_H
21 
22 #include <qdf_types.h>
23 #include <qdf_nbuf.h>
24 #include "dp_types.h"
25 #ifdef FEATURE_PERPKT_INFO
26 #if defined(QCA_SUPPORT_LATENCY_CAPTURE) || \
27 	defined(QCA_TX_CAPTURE_SUPPORT) || \
28 	defined(QCA_MCOPY_SUPPORT)
29 #include "if_meta_hdr.h"
30 #endif
31 #endif
32 #include "dp_internal.h"
33 #include "hal_tx.h"
34 #include <qdf_tracepoint.h>
35 #ifdef CONFIG_SAWF
36 #include "dp_sawf.h"
37 #endif
38 #include <qdf_pkt_add_timestamp.h>
39 #include "dp_ipa.h"
40 #ifdef IPA_OFFLOAD
41 #include <wlan_ipa_obj_mgmt_api.h>
42 #endif
43 
44 #define DP_INVALID_VDEV_ID 0xFF
45 
46 #define DP_TX_MAX_NUM_FRAGS 6
47 
48 /* invalid peer id for reinject*/
49 #define DP_INVALID_PEER 0XFFFE
50 
51 void dp_tx_nawds_handler(struct dp_soc *soc, struct dp_vdev *vdev,
52 			 struct dp_tx_msdu_info_s *msdu_info,
53 			 qdf_nbuf_t nbuf, uint16_t sa_peer_id);
54 int dp_tx_proxy_arp(struct dp_vdev *vdev, qdf_nbuf_t nbuf);
55 /*
56  * DP_TX_DESC_FLAG_FRAG flags should always be defined to 0x1
57  * please do not change this flag's definition
58  */
59 #define DP_TX_DESC_FLAG_FRAG		0x1
60 #define DP_TX_DESC_FLAG_TO_FW		0x2
61 #define DP_TX_DESC_FLAG_SIMPLE		0x4
62 #define DP_TX_DESC_FLAG_RAW		0x8
63 #define DP_TX_DESC_FLAG_MESH		0x10
64 #define DP_TX_DESC_FLAG_QUEUED_TX	0x20
65 #define DP_TX_DESC_FLAG_COMPLETED_TX	0x40
66 #define DP_TX_DESC_FLAG_ME		0x80
67 #define DP_TX_DESC_FLAG_TDLS_FRAME	0x100
68 #define DP_TX_DESC_FLAG_ALLOCATED	0x200
69 #define DP_TX_DESC_FLAG_MESH_MODE	0x400
70 #define DP_TX_DESC_FLAG_UNMAP_DONE	0x800
71 #define DP_TX_DESC_FLAG_TX_COMP_ERR	0x1000
72 #define DP_TX_DESC_FLAG_FLUSH		0x2000
73 #define DP_TX_DESC_FLAG_TRAFFIC_END_IND	0x4000
74 #define DP_TX_DESC_FLAG_RMNET		0x8000
75 #define DP_TX_DESC_FLAG_FASTPATH_SIMPLE 0x10000
76 #define DP_TX_DESC_FLAG_PPEDS		0x20000
77 #define DP_TX_DESC_FLAG_FAST		0x40000
78 #define DP_TX_DESC_FLAG_SPECIAL         0x80000
79 
80 #define DP_TX_EXT_DESC_FLAG_METADATA_VALID 0x1
81 
82 #define DP_TX_FREE_SINGLE_BUF(soc, buf)                  \
83 do {                                                           \
84 	qdf_nbuf_unmap(soc->osdev, buf, QDF_DMA_TO_DEVICE);  \
85 	qdf_nbuf_free(buf);                                    \
86 } while (0)
87 
88 #define OCB_HEADER_VERSION	 1
89 
90 #ifdef TX_PER_PDEV_DESC_POOL
91 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
92 #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id)
93 #else /* QCA_LL_TX_FLOW_CONTROL_V2 */
94 #define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->pdev->pdev_id)
95 #endif /* QCA_LL_TX_FLOW_CONTROL_V2 */
96 	#define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id)
97 #else
98 	#ifdef TX_PER_VDEV_DESC_POOL
99 		#define DP_TX_GET_DESC_POOL_ID(vdev) (vdev->vdev_id)
100 		#define DP_TX_GET_RING_ID(vdev) (vdev->pdev->pdev_id)
101 	#endif /* TX_PER_VDEV_DESC_POOL */
102 #endif /* TX_PER_PDEV_DESC_POOL */
103 #define DP_TX_QUEUE_MASK 0x3
104 
105 #define MAX_CDP_SEC_TYPE 12
106 
107 /* number of dwords for htt_tx_msdu_desc_ext2_t */
108 #define DP_TX_MSDU_INFO_META_DATA_DWORDS 9
109 
110 #define dp_tx_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_TX, params)
111 #define dp_tx_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_TX, params)
112 #define dp_tx_err_rl(params...) QDF_TRACE_ERROR_RL(QDF_MODULE_ID_DP_TX, params)
113 #define dp_tx_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_TX, params)
114 #define dp_tx_info(params...) \
115 	__QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_TX, ## params)
116 #define dp_tx_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_TX, params)
117 
118 #define dp_tx_comp_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_DP_TX_COMP, params)
119 #define dp_tx_comp_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_DP_TX_COMP, params)
120 #define dp_tx_comp_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_DP_TX_COMP, params)
121 #define dp_tx_comp_info(params...) \
122 	__QDF_TRACE_FL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_TX_COMP, ## params)
123 #define dp_tx_comp_info_rl(params...) \
124 	__QDF_TRACE_RL(QDF_TRACE_LEVEL_INFO_HIGH, QDF_MODULE_ID_DP_TX_COMP, ## params)
125 #define dp_tx_comp_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_DP_TX_COMP, params)
126 
127 #ifndef QCA_HOST_MODE_WIFI_DISABLED
128 
129 /**
130  * struct dp_tx_frag_info_s
131  * @vaddr: hlos virtual address for buffer
132  * @paddr_lo: physical address lower 32bits
133  * @paddr_hi: physical address higher bits
134  * @len: length of the buffer
135  */
136 struct dp_tx_frag_info_s {
137 	uint8_t  *vaddr;
138 	uint32_t paddr_lo;
139 	uint16_t paddr_hi;
140 	uint16_t len;
141 };
142 
143 /**
144  * struct dp_tx_seg_info_s - Segmentation Descriptor
145  * @nbuf: NBUF pointer if segment corresponds to separate nbuf
146  * @frag_cnt: Fragment count in this segment
147  * @total_len: Total length of segment
148  * @frags: per-Fragment information
149  * @next: pointer to next MSDU segment
150  */
151 struct dp_tx_seg_info_s  {
152 	qdf_nbuf_t nbuf;
153 	uint16_t frag_cnt;
154 	uint16_t total_len;
155 	struct dp_tx_frag_info_s frags[DP_TX_MAX_NUM_FRAGS];
156 	struct dp_tx_seg_info_s *next;
157 };
158 
159 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
160 
161 /**
162  * struct dp_tx_sg_info_s - Scatter Gather Descriptor
163  * @num_segs: Number of segments (TSO/ME) in the frame
164  * @total_len: Total length of the frame
165  * @curr_seg: Points to current segment descriptor to be processed. Chain of
166  * 	      descriptors for SG frames/multicast-unicast converted packets.
167  *
168  * Used for SG (802.3 or Raw) frames and Multicast-Unicast converted frames to
169  * carry fragmentation information
170  * Raw Frames will be handed over to driver as an SKB chain with MPDU boundaries
171  * indicated through flags in SKB CB (first_msdu and last_msdu). This will be
172  * converted into set of skb sg (nr_frags) structures.
173  */
174 struct dp_tx_sg_info_s {
175 	uint32_t num_segs;
176 	uint32_t total_len;
177 	struct dp_tx_seg_info_s *curr_seg;
178 };
179 
180 /**
181  * struct dp_tx_queue - Tx queue
182  * @desc_pool_id: Descriptor Pool to be used for the tx queue
183  * @ring_id: TCL descriptor ring ID corresponding to the tx queue
184  *
185  * Tx queue contains information of the software (Descriptor pool)
186  * and hardware resources (TCL ring id) to be used for a particular
187  * transmit queue (obtained from skb_queue_mapping in case of linux)
188  */
189 struct dp_tx_queue {
190 	uint8_t desc_pool_id;
191 	uint8_t ring_id;
192 };
193 
194 /**
195  * struct dp_tx_msdu_info_s - MSDU Descriptor
196  * @frm_type: Frame type - Regular/TSO/SG/Multicast enhancement
197  * @tx_queue: Tx queue on which this MSDU should be transmitted
198  * @num_seg: Number of segments (TSO)
199  * @tid: TID (override) that is sent from HLOS
200  * @exception_fw: Duplicate frame to be sent to firmware
201  * @is_tx_sniffer: Indicates if the packet has to be sniffed
202  * @u: union of frame information structs
203  * @u.tso_info: TSO information for TSO frame types
204  * 	     (chain of the TSO segments, number of segments)
205  * @u.sg_info: Scatter Gather information for non-TSO SG frames
206  * @meta_data: Mesh meta header information
207  * @ppdu_cookie: 16-bit ppdu_cookie that has to be replayed back in completions
208  * @xmit_type: xmit type of packet Link (0)/MLD (1)
209  * @gsn: global sequence for reinjected mcast packets
210  * @vdev_id : vdev_id for reinjected mcast packets
211  * @skip_hp_update : Skip HP update for TSO segments and update in last segment
212  * @buf_len:
213  * @payload_addr:
214  * @driver_ingress_ts: driver ingress timestamp
215  *
216  * This structure holds the complete MSDU information needed to program the
217  * Hardware TCL and MSDU extension descriptors for different frame types
218  *
219  */
220 struct dp_tx_msdu_info_s {
221 	enum dp_tx_frm_type frm_type;
222 	struct dp_tx_queue tx_queue;
223 	uint32_t num_seg;
224 	uint8_t tid;
225 	uint8_t exception_fw;
226 	uint8_t is_tx_sniffer;
227 	union {
228 		struct qdf_tso_info_t tso_info;
229 		struct dp_tx_sg_info_s sg_info;
230 	} u;
231 	uint32_t meta_data[DP_TX_MSDU_INFO_META_DATA_DWORDS];
232 	uint16_t ppdu_cookie;
233 	uint8_t xmit_type;
234 #if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
235 #ifdef WLAN_MCAST_MLO
236 	uint16_t gsn;
237 	uint8_t vdev_id;
238 #endif
239 #endif
240 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
241 	uint8_t skip_hp_update;
242 #endif
243 #ifdef QCA_DP_TX_RMNET_OPTIMIZATION
244 	uint16_t buf_len;
245 	uint8_t *payload_addr;
246 #endif
247 #ifdef WLAN_FEATURE_TX_LATENCY_STATS
248 	qdf_ktime_t driver_ingress_ts;
249 #endif
250 };
251 
252 #ifndef QCA_HOST_MODE_WIFI_DISABLED
253 /**
254  * dp_tx_deinit_pair_by_index() - Deinit TX rings based on index
255  * @soc: core txrx context
256  * @index: index of ring to deinit
257  *
258  * Deinit 1 TCL and 1 WBM2SW release ring on as needed basis using
259  * index of the respective TCL/WBM2SW release in soc structure.
260  * For example, if the index is 2 then &soc->tcl_data_ring[2]
261  * and &soc->tx_comp_ring[2] will be deinitialized.
262  *
263  * Return: none
264  */
265 void dp_tx_deinit_pair_by_index(struct dp_soc *soc, int index);
266 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
267 
268 /**
269  * dp_tx_comp_process_desc_list() - Tx complete software descriptor handler
270  * @soc: core txrx main context
271  * @comp_head: software descriptor head pointer
272  * @ring_id: ring number
273  *
274  * This function will process batch of descriptors reaped by dp_tx_comp_handler
275  * and release the software descriptors after processing is complete
276  *
277  * Return: none
278  */
279 void
280 dp_tx_comp_process_desc_list(struct dp_soc *soc,
281 			     struct dp_tx_desc_s *comp_head, uint8_t ring_id);
282 
283 /**
284  * dp_tx_comp_process_desc_list_fast() - Tx complete fast sw descriptor handler
285  * @soc: core txrx main context
286  * @head_desc: software descriptor head pointer
287  * @tail_desc: software descriptor tail pointer
288  * @ring_id: ring number
289  * @fast_desc_count: Total descriptor count in the list
290  *
291  * This function will process batch of descriptors reaped by dp_tx_comp_handler
292  * and append the list of descriptors to the freelist
293  *
294  * Return: none
295  */
296 void
297 dp_tx_comp_process_desc_list_fast(struct dp_soc *soc,
298 				  struct dp_tx_desc_s *head_desc,
299 				  struct dp_tx_desc_s *tail_desc,
300 				  uint8_t ring_id,
301 				  uint32_t fast_desc_count);
302 
303 /**
304  * dp_tx_comp_free_buf() - Free nbuf associated with the Tx Descriptor
305  * @soc: Soc handle
306  * @desc: software Tx descriptor to be processed
307  * @delayed_free: defer freeing of nbuf
308  *
309  * Return: nbuf to be freed later
310  */
311 qdf_nbuf_t dp_tx_comp_free_buf(struct dp_soc *soc, struct dp_tx_desc_s *desc,
312 			       bool delayed_free);
313 
314 /**
315  * dp_tx_desc_release() - Release Tx Descriptor
316  * @soc: Soc handle
317  * @tx_desc: Tx Descriptor
318  * @desc_pool_id: Descriptor Pool ID
319  *
320  * Deallocate all resources attached to Tx descriptor and free the Tx
321  * descriptor.
322  *
323  * Return:
324  */
325 void dp_tx_desc_release(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
326 			uint8_t desc_pool_id);
327 
328 /**
329  * dp_tx_compute_delay() - Compute and fill in all timestamps
330  *				to pass in correct fields
331  * @vdev: pdev handle
332  * @tx_desc: tx descriptor
333  * @tid: tid value
334  * @ring_id: TCL or WBM ring number for transmit path
335  *
336  * Return: none
337  */
338 void dp_tx_compute_delay(struct dp_vdev *vdev, struct dp_tx_desc_s *tx_desc,
339 			 uint8_t tid, uint8_t ring_id);
340 
341 /**
342  * dp_tx_comp_process_tx_status() - Parse and Dump Tx completion status info
343  * @soc: DP soc handle
344  * @tx_desc: software descriptor head pointer
345  * @ts: Tx completion status
346  * @txrx_peer: txrx peer handle
347  * @ring_id: ring number
348  *
349  * Return: none
350  */
351 void dp_tx_comp_process_tx_status(struct dp_soc *soc,
352 				  struct dp_tx_desc_s *tx_desc,
353 				  struct hal_tx_completion_status *ts,
354 				  struct dp_txrx_peer *txrx_peer,
355 				  uint8_t ring_id);
356 
357 /**
358  * dp_tx_comp_process_desc() - Process tx descriptor and free associated nbuf
359  * @soc: DP Soc handle
360  * @desc: software Tx descriptor
361  * @ts: Tx completion status from HAL/HTT descriptor
362  * @txrx_peer: DP peer context
363  *
364  * Return: none
365  */
366 void dp_tx_comp_process_desc(struct dp_soc *soc,
367 			     struct dp_tx_desc_s *desc,
368 			     struct hal_tx_completion_status *ts,
369 			     struct dp_txrx_peer *txrx_peer);
370 
371 /**
372  * dp_tx_reinject_handler() - Tx Reinject Handler
373  * @soc: datapath soc handle
374  * @vdev: datapath vdev handle
375  * @tx_desc: software descriptor head pointer
376  * @status: Tx completion status from HTT descriptor
377  * @reinject_reason: reinject reason from HTT descriptor
378  *
379  * This function reinjects frames back to Target.
380  * Todo - Host queue needs to be added
381  *
382  * Return: none
383  */
384 void dp_tx_reinject_handler(struct dp_soc *soc,
385 			    struct dp_vdev *vdev,
386 			    struct dp_tx_desc_s *tx_desc,
387 			    uint8_t *status,
388 			    uint8_t reinject_reason);
389 
390 /**
391  * dp_tx_inspect_handler() - Tx Inspect Handler
392  * @soc: datapath soc handle
393  * @vdev: datapath vdev handle
394  * @tx_desc: software descriptor head pointer
395  * @status: Tx completion status from HTT descriptor
396  *
397  * Handles Tx frames sent back to Host for inspection
398  * (ProxyARP)
399  *
400  * Return: none
401  */
402 void dp_tx_inspect_handler(struct dp_soc *soc,
403 			   struct dp_vdev *vdev,
404 			   struct dp_tx_desc_s *tx_desc,
405 			   uint8_t *status);
406 
407 /**
408  * dp_tx_update_peer_basic_stats() - Update peer basic stats
409  * @txrx_peer: Datapath txrx_peer handle
410  * @length: Length of the packet
411  * @tx_status: Tx status from TQM/FW
412  * @update: enhanced flag value present in dp_pdev
413  *
414  * Return: none
415  */
416 void dp_tx_update_peer_basic_stats(struct dp_txrx_peer *txrx_peer,
417 				   uint32_t length, uint8_t tx_status,
418 				   bool update);
419 
420 #ifdef DP_UMAC_HW_RESET_SUPPORT
421 /**
422  * dp_tx_drop() - Drop the frame on a given VAP
423  * @soc: DP soc handle
424  * @vdev_id: id of DP vdev handle
425  * @nbuf: skb
426  *
427  * Drop all the incoming packets
428  *
429  * Return: nbuf
430  */
431 qdf_nbuf_t dp_tx_drop(struct cdp_soc_t *soc, uint8_t vdev_id, qdf_nbuf_t nbuf);
432 
433 /**
434  * dp_tx_exc_drop() - Drop the frame on a given VAP
435  * @soc_hdl: DP soc handle
436  * @vdev_id: id of DP vdev handle
437  * @nbuf: skb
438  * @tx_exc_metadata: Handle that holds exception path meta data
439  *
440  * Drop all the incoming packets
441  *
442  * Return: nbuf
443  */
444 qdf_nbuf_t dp_tx_exc_drop(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
445 			  qdf_nbuf_t nbuf,
446 			  struct cdp_tx_exception_metadata *tx_exc_metadata);
447 #endif
448 #ifdef WLAN_SUPPORT_PPEDS
449 qdf_nbuf_t
450 dp_ppeds_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc);
451 #else
452 static inline qdf_nbuf_t
dp_ppeds_tx_desc_free(struct dp_soc * soc,struct dp_tx_desc_s * tx_desc)453 dp_ppeds_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc)
454 {
455 	return NULL;
456 }
457 #endif
458 
459 /**
460  * dp_get_updated_tx_desc() - get updated tx_desc value
461  * @psoc: psoc object
462  * @pool_num: Tx desc pool Id
463  * @current_desc: Current Tx Desc value
464  *
465  * In Lowmem profiles the number of Tx desc in 4th pool is reduced to quarter
466  * for memory optimizations via this flag DP_TX_DESC_POOL_OPTIMIZE
467  *
468  * Return: Updated Tx Desc value
469  */
470 #ifdef DP_TX_DESC_POOL_OPTIMIZE
dp_get_updated_tx_desc(struct cdp_ctrl_objmgr_psoc * psoc,uint8_t pool_num,uint32_t current_desc)471 static inline uint32_t dp_get_updated_tx_desc(struct cdp_ctrl_objmgr_psoc *psoc,
472 					      uint8_t pool_num,
473 					      uint32_t current_desc)
474 {
475 	if (pool_num == 3)
476 		return cfg_get(psoc, CFG_DP_TX_DESC_POOL_3);
477 	else
478 		return current_desc;
479 }
480 #else
dp_get_updated_tx_desc(struct cdp_ctrl_objmgr_psoc * psoc,uint8_t pool_num,uint32_t current_desc)481 static inline uint32_t dp_get_updated_tx_desc(struct cdp_ctrl_objmgr_psoc *psoc,
482 					      uint8_t pool_num,
483 					      uint32_t current_desc)
484 {
485 	return current_desc;
486 }
487 #endif
488 
489 #ifdef DP_TX_EXT_DESC_POOL_OPTIMIZE
490 /**
491  * dp_tx_ext_desc_pool_override() - Override tx ext desc pool Id
492  * @desc_pool_id: Desc pool Id
493  *
494  * For low mem profiles the number of ext_tx_desc_pool is reduced to 1.
495  * Since in Tx path the desc_pool_id is filled based on CPU core,
496  * dp_tx_ext_desc_pool_override will return the desc_pool_id as 0 for lowmem
497  * profiles.
498  *
499  * Return: updated tx_ext_desc_pool Id
500  */
dp_tx_ext_desc_pool_override(uint8_t desc_pool_id)501 static inline uint8_t dp_tx_ext_desc_pool_override(uint8_t desc_pool_id)
502 {
503 	return 0;
504 }
505 
506 /**
507  * dp_get_ext_tx_desc_pool_num() - get the number of ext_tx_desc pool
508  * @soc: core txrx main context
509  *
510  * For lowmem profiles the number of ext_tx_desc pool is reduced to 1 for
511  * memory optimizations.
512  * Based on this flag DP_TX_EXT_DESC_POOL_OPTIMIZE dp_get_ext_tx_desc_pool_num
513  * will return reduced desc_pool value 1 for low mem profile and for the other
514  * profiles it will return the same value as tx_desc pool.
515  *
516  * Return: number of ext_tx_desc pool
517  */
518 
dp_get_ext_tx_desc_pool_num(struct dp_soc * soc)519 static inline uint8_t dp_get_ext_tx_desc_pool_num(struct dp_soc *soc)
520 {
521 	return 1;
522 }
523 
524 #else
dp_tx_ext_desc_pool_override(uint8_t desc_pool_id)525 static inline uint8_t dp_tx_ext_desc_pool_override(uint8_t desc_pool_id)
526 {
527 	return desc_pool_id;
528 }
529 
dp_get_ext_tx_desc_pool_num(struct dp_soc * soc)530 static inline uint8_t dp_get_ext_tx_desc_pool_num(struct dp_soc *soc)
531 {
532 	return wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
533 }
534 #endif
535 
536 #ifndef QCA_HOST_MODE_WIFI_DISABLED
537 /**
538  * dp_tso_soc_attach() - TSO Attach handler
539  * @txrx_soc: Opaque Dp handle
540  *
541  * Reserve TSO descriptor buffers
542  *
543  * Return: QDF_STATUS_E_FAILURE on failure or
544  * QDF_STATUS_SUCCESS on success
545  */
546 QDF_STATUS dp_tso_soc_attach(struct cdp_soc_t *txrx_soc);
547 
548 /**
549  * dp_tso_soc_detach() - TSO Detach handler
550  * @txrx_soc: Opaque Dp handle
551  *
552  * Deallocate TSO descriptor buffers
553  *
554  * Return: QDF_STATUS_E_FAILURE on failure or
555  * QDF_STATUS_SUCCESS on success
556  */
557 QDF_STATUS dp_tso_soc_detach(struct cdp_soc_t *txrx_soc);
558 
559 /**
560  * dp_tx_send() - Transmit a frame on a given VAP
561  * @soc_hdl: DP soc handle
562  * @vdev_id: id of DP vdev handle
563  * @nbuf: skb
564  *
565  * Entry point for Core Tx layer (DP_TX) invoked from
566  * hard_start_xmit in OSIF/HDD or from dp_rx_process for intravap forwarding
567  * cases
568  *
569  * Return: NULL on success,
570  *         nbuf when it fails to send
571  */
572 qdf_nbuf_t dp_tx_send(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
573 		      qdf_nbuf_t nbuf);
574 
575 /**
576  * dp_tx_send_vdev_id_check() - Transmit a frame on a given VAP in special
577  *      case to avoid check in per-packet path.
578  * @soc_hdl: DP soc handle
579  * @vdev_id: id of DP vdev handle
580  * @nbuf: skb
581  *
582  * Entry point for Core Tx layer (DP_TX) invoked from
583  * hard_start_xmit in OSIF/HDD to transmit packet through dp_tx_send
584  * with special condition to avoid per pkt check in dp_tx_send
585  *
586  * Return: NULL on success,
587  *         nbuf when it fails to send
588  */
589 qdf_nbuf_t dp_tx_send_vdev_id_check(struct cdp_soc_t *soc_hdl,
590 				    uint8_t vdev_id, qdf_nbuf_t nbuf);
591 
592 /**
593  * dp_tx_send_exception() - Transmit a frame on a given VAP in exception path
594  * @soc_hdl: DP soc handle
595  * @vdev_id: id of DP vdev handle
596  * @nbuf: skb
597  * @tx_exc_metadata: Handle that holds exception path meta data
598  *
599  * Entry point for Core Tx layer (DP_TX) invoked from
600  * hard_start_xmit in OSIF/HDD to transmit frames through fw
601  *
602  * Return: NULL on success,
603  *         nbuf when it fails to send
604  */
605 qdf_nbuf_t
606 dp_tx_send_exception(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
607 		     qdf_nbuf_t nbuf,
608 		     struct cdp_tx_exception_metadata *tx_exc_metadata);
609 
610 /**
611  * dp_tx_send_exception_vdev_id_check() - Transmit a frame on a given VAP
612  *      in exception path in special case to avoid regular exception path chk.
613  * @soc_hdl: DP soc handle
614  * @vdev_id: id of DP vdev handle
615  * @nbuf: skb
616  * @tx_exc_metadata: Handle that holds exception path meta data
617  *
618  * Entry point for Core Tx layer (DP_TX) invoked from
619  * hard_start_xmit in OSIF/HDD to transmit frames through fw
620  *
621  * Return: NULL on success,
622  *         nbuf when it fails to send
623  */
624 qdf_nbuf_t
625 dp_tx_send_exception_vdev_id_check(struct cdp_soc_t *soc_hdl,
626 				   uint8_t vdev_id, qdf_nbuf_t nbuf,
627 				   struct cdp_tx_exception_metadata *tx_exc_metadata);
628 
629 /**
630  * dp_tx_send_mesh() - Transmit mesh frame on a given VAP
631  * @soc_hdl: DP soc handle
632  * @vdev_id: DP vdev handle
633  * @nbuf: skb
634  *
635  * Entry point for Core Tx layer (DP_TX) invoked from
636  * hard_start_xmit in OSIF/HDD
637  *
638  * Return: NULL on success,
639  *         nbuf when it fails to send
640  */
641 qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
642 			   qdf_nbuf_t nbuf);
643 
644 /**
645  * dp_tx_send_msdu_single() - Setup descriptor and enqueue single MSDU to TCL
646  * @vdev: DP vdev handle
647  * @nbuf: skb
648  * @msdu_info: MSDU information
649  * @peer_id: peer_id of the peer in case of NAWDS frames
650  * @tx_exc_metadata: Handle that holds exception path metadata
651  *
652  * Return: NULL on success,
653  *         nbuf when it fails to send
654  */
655 qdf_nbuf_t
656 dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
657 		       struct dp_tx_msdu_info_s *msdu_info, uint16_t peer_id,
658 		       struct cdp_tx_exception_metadata *tx_exc_metadata);
659 
660 /**
661  * dp_tx_mcast_enhance() - Multicast enhancement on TX
662  * @vdev: DP vdev handle
663  * @nbuf: network buffer to be transmitted
664  *
665  * Return: true on success
666  *         false on failure
667  */
668 bool dp_tx_mcast_enhance(struct dp_vdev *vdev, qdf_nbuf_t nbuf);
669 
670 /**
671  * dp_tx_send_msdu_multiple() - Enqueue multiple MSDUs
672  * @vdev: DP vdev handle
673  * @nbuf: skb
674  * @msdu_info: MSDU info to be setup in MSDU extension descriptor
675  *
676  * Prepare descriptors for multiple MSDUs (TSO segments) and enqueue to TCL
677  *
678  * Return: NULL on success,
679  *         nbuf when it fails to send
680  */
681 #if QDF_LOCK_STATS
682 noinline qdf_nbuf_t
683 dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
684 			 struct dp_tx_msdu_info_s *msdu_info);
685 #else
686 qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
687 				    struct dp_tx_msdu_info_s *msdu_info);
688 #endif
689 #ifdef FEATURE_WLAN_TDLS
690 /**
691  * dp_tx_non_std() - Allow the control-path SW to send data frames
692  * @soc_hdl: Datapath soc handle
693  * @vdev_id: id of vdev
694  * @tx_spec: what non-standard handling to apply to the tx data frames
695  * @msdu_list: NULL-terminated list of tx MSDUs
696  *
697  * Return: NULL on success,
698  *         nbuf when it fails to send
699  */
700 qdf_nbuf_t dp_tx_non_std(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
701 			 enum ol_tx_spec tx_spec, qdf_nbuf_t msdu_list);
702 #endif
703 
704 /**
705  * dp_tx_frame_is_drop() - checks if the packet is loopback
706  * @vdev: DP vdev handle
707  * @srcmac: source MAC address
708  * @dstmac: destination MAC address
709  *
710  * Return: 1 if frame needs to be dropped else 0
711  */
712 int dp_tx_frame_is_drop(struct dp_vdev *vdev, uint8_t *srcmac, uint8_t *dstmac);
713 
714 #ifndef WLAN_SOFTUMAC_SUPPORT
715 /**
716  * dp_tx_comp_handler() - Tx completion handler
717  * @int_ctx: pointer to DP interrupt context
718  * @soc: core txrx main context
719  * @hal_srng: Opaque HAL SRNG pointer
720  * @ring_id: completion ring id
721  * @quota: No. of packets/descriptors that can be serviced in one loop
722  *
723  * This function will collect hardware release ring element contents and
724  * handle descriptor contents. Based on contents, free packet or handle error
725  * conditions
726  *
727  * Return: Number of TX completions processed
728  */
729 uint32_t dp_tx_comp_handler(struct dp_intr *int_ctx, struct dp_soc *soc,
730 			    hal_ring_handle_t hal_srng, uint8_t ring_id,
731 			    uint32_t quota);
732 #endif
733 
734 void
735 dp_tx_comp_process_desc_list(struct dp_soc *soc,
736 			     struct dp_tx_desc_s *comp_head, uint8_t ring_id);
737 
738 QDF_STATUS
739 dp_tx_prepare_send_me(struct dp_vdev *vdev, qdf_nbuf_t nbuf);
740 
741 QDF_STATUS
742 dp_tx_prepare_send_igmp_me(struct dp_vdev *vdev, qdf_nbuf_t nbuf);
743 
744 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
745 
746 #if defined(QCA_HOST_MODE_WIFI_DISABLED) || !defined(ATH_SUPPORT_IQUE)
dp_tx_me_exit(struct dp_pdev * pdev)747 static inline void dp_tx_me_exit(struct dp_pdev *pdev)
748 {
749 	return;
750 }
751 #endif
752 
753 /**
754  * dp_tx_pdev_init() - dp tx pdev init
755  * @pdev: physical device instance
756  *
757  * Return: QDF_STATUS_SUCCESS: success
758  *         QDF_STATUS_E_RESOURCES: Error return
759  */
dp_tx_pdev_init(struct dp_pdev * pdev)760 static inline QDF_STATUS dp_tx_pdev_init(struct dp_pdev *pdev)
761 {
762 	struct dp_soc *soc = pdev->soc;
763 
764 	/* Initialize Flow control counters */
765 	qdf_atomic_init(&pdev->num_tx_outstanding);
766 	pdev->tx_descs_max = 0;
767 	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
768 		/* Initialize descriptors in TCL Ring */
769 		hal_tx_init_data_ring(soc->hal_soc,
770 				soc->tcl_data_ring[pdev->pdev_id].hal_srng);
771 	}
772 
773 	return QDF_STATUS_SUCCESS;
774 }
775 
776 /**
777  * dp_tx_prefetch_hw_sw_nbuf_desc() - function to prefetch HW and SW desc
778  * @soc: Handle to HAL Soc structure
779  * @hal_soc: HAL SOC handle
780  * @num_avail_for_reap: descriptors available for reap
781  * @hal_ring_hdl: ring pointer
782  * @last_prefetched_hw_desc: pointer to the last prefetched HW descriptor
783  * @last_prefetched_sw_desc: pointer to last prefetch SW desc
784  * @last_hw_desc: pointer to last HW desc
785  *
786  * Return: None
787  */
788 #ifdef QCA_DP_TX_HW_SW_NBUF_DESC_PREFETCH
789 static inline
dp_tx_prefetch_hw_sw_nbuf_desc(struct dp_soc * soc,hal_soc_handle_t hal_soc,uint32_t num_avail_for_reap,hal_ring_handle_t hal_ring_hdl,void ** last_prefetched_hw_desc,struct dp_tx_desc_s ** last_prefetched_sw_desc,void * last_hw_desc)790 void dp_tx_prefetch_hw_sw_nbuf_desc(struct dp_soc *soc,
791 				    hal_soc_handle_t hal_soc,
792 				    uint32_t num_avail_for_reap,
793 				    hal_ring_handle_t hal_ring_hdl,
794 				    void **last_prefetched_hw_desc,
795 				    struct dp_tx_desc_s
796 				    **last_prefetched_sw_desc,
797 				    void *last_hw_desc)
798 {
799 	if (*last_prefetched_sw_desc) {
800 		qdf_prefetch((uint8_t *)(*last_prefetched_sw_desc)->nbuf);
801 		qdf_prefetch((uint8_t *)(*last_prefetched_sw_desc)->nbuf + 64);
802 	}
803 
804 	if (qdf_unlikely(last_hw_desc &&
805 			 (*last_prefetched_hw_desc == last_hw_desc)))
806 		return;
807 
808 	if (num_avail_for_reap && *last_prefetched_hw_desc) {
809 		soc->arch_ops.tx_comp_get_params_from_hal_desc(soc,
810 						       *last_prefetched_hw_desc,
811 						       last_prefetched_sw_desc);
812 
813 		if ((uintptr_t)*last_prefetched_hw_desc & 0x3f)
814 			*last_prefetched_hw_desc =
815 				hal_srng_dst_prefetch_next_cached_desc(
816 					hal_soc,
817 					hal_ring_hdl,
818 					(uint8_t *)*last_prefetched_hw_desc);
819 		else
820 			*last_prefetched_hw_desc =
821 				hal_srng_dst_get_next_32_byte_desc(hal_soc,
822 					hal_ring_hdl,
823 					(uint8_t *)*last_prefetched_hw_desc);
824 	}
825 }
826 #else
827 static inline
dp_tx_prefetch_hw_sw_nbuf_desc(struct dp_soc * soc,hal_soc_handle_t hal_soc,uint32_t num_avail_for_reap,hal_ring_handle_t hal_ring_hdl,void ** last_prefetched_hw_desc,struct dp_tx_desc_s ** last_prefetched_sw_desc,void * last_hw_desc)828 void dp_tx_prefetch_hw_sw_nbuf_desc(struct dp_soc *soc,
829 				    hal_soc_handle_t hal_soc,
830 				    uint32_t num_avail_for_reap,
831 				    hal_ring_handle_t hal_ring_hdl,
832 				    void **last_prefetched_hw_desc,
833 				    struct dp_tx_desc_s
834 				    **last_prefetched_sw_desc,
835 				    void *last_hw_desc)
836 {
837 }
838 #endif
839 
840 #ifndef FEATURE_WDS
dp_tx_mec_handler(struct dp_vdev * vdev,uint8_t * status)841 static inline void dp_tx_mec_handler(struct dp_vdev *vdev, uint8_t *status)
842 {
843 	return;
844 }
845 #endif
846 
847 #ifndef QCA_MULTIPASS_SUPPORT
848 static inline
dp_tx_multipass_process(struct dp_soc * soc,struct dp_vdev * vdev,qdf_nbuf_t nbuf,struct dp_tx_msdu_info_s * msdu_info)849 bool dp_tx_multipass_process(struct dp_soc *soc, struct dp_vdev *vdev,
850 			     qdf_nbuf_t nbuf,
851 			     struct dp_tx_msdu_info_s *msdu_info)
852 {
853 	return true;
854 }
855 
856 static inline
dp_tx_vdev_multipass_deinit(struct dp_vdev * vdev)857 void dp_tx_vdev_multipass_deinit(struct dp_vdev *vdev)
858 {
859 }
860 
861 #else
862 /**
863  * dp_tx_multipass_process() - Process vlan frames in tx path
864  * @soc: dp soc handle
865  * @vdev: DP vdev handle
866  * @nbuf: skb
867  * @msdu_info: msdu descriptor
868  *
869  * Return: status whether frame needs to be dropped or transmitted
870  */
871 bool dp_tx_multipass_process(struct dp_soc *soc, struct dp_vdev *vdev,
872 			     qdf_nbuf_t nbuf,
873 			     struct dp_tx_msdu_info_s *msdu_info);
874 
875 /**
876  * dp_tx_vdev_multipass_deinit() - set vlan map for vdev
877  * @vdev: pointer to vdev
878  *
879  * return: void
880  */
881 void dp_tx_vdev_multipass_deinit(struct dp_vdev *vdev);
882 
883 /**
884  * dp_tx_add_groupkey_metadata() - Add group key in metadata
885  * @vdev: DP vdev handle
886  * @msdu_info: MSDU info to be setup in MSDU descriptor
887  * @group_key: Group key index programmed in metadata
888  *
889  * Return: void
890  */
891 void dp_tx_add_groupkey_metadata(struct dp_vdev *vdev,
892 				 struct dp_tx_msdu_info_s *msdu_info,
893 				 uint16_t group_key);
894 #endif
895 
896 /**
897  * dp_tx_hw_to_qdf()- convert hw status to qdf status
898  * @status: hw status
899  *
900  * Return: qdf tx rx status
901  */
dp_tx_hw_to_qdf(uint16_t status)902 static inline enum qdf_dp_tx_rx_status dp_tx_hw_to_qdf(uint16_t status)
903 {
904 	switch (status) {
905 	case HAL_TX_TQM_RR_FRAME_ACKED:
906 		return QDF_TX_RX_STATUS_OK;
907 	case HAL_TX_TQM_RR_REM_CMD_TX:
908 		return QDF_TX_RX_STATUS_NO_ACK;
909 	case HAL_TX_TQM_RR_REM_CMD_REM:
910 	case HAL_TX_TQM_RR_REM_CMD_NOTX:
911 	case HAL_TX_TQM_RR_REM_CMD_AGED:
912 		return QDF_TX_RX_STATUS_FW_DISCARD;
913 	default:
914 		return QDF_TX_RX_STATUS_DEFAULT;
915 	}
916 }
917 
918 #ifndef QCA_HOST_MODE_WIFI_DISABLED
919 /**
920  * dp_tx_get_queue() - Returns Tx queue IDs to be used for this Tx frame
921  * @vdev: DP Virtual device handle
922  * @nbuf: Buffer pointer
923  * @queue: queue ids container for nbuf
924  *
925  * TX packet queue has 2 instances, software descriptors id and dma ring id
926  * Based on tx feature and hardware configuration queue id combination could be
927  * different.
928  * For example -
929  * With XPS enabled,all TX descriptor pools and dma ring are assigned per cpu id
930  * With no XPS,lock based resource protection, Descriptor pool ids are different
931  * for each vdev, dma ring id will be same as single pdev id
932  *
933  * Return: None
934  */
935 #ifdef QCA_OL_TX_MULTIQ_SUPPORT
936 #if defined(IPA_OFFLOAD) && defined(QCA_IPA_LL_TX_FLOW_CONTROL)
937 #ifdef IPA_WDI3_TX_TWO_PIPES
dp_tx_get_queue(struct dp_vdev * vdev,qdf_nbuf_t nbuf,struct dp_tx_queue * queue)938 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
939 				   qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
940 {
941 	queue->ring_id = qdf_get_cpu();
942 	if (vdev->pdev->soc->wlan_cfg_ctx->ipa_enabled)
943 		if ((queue->ring_id == IPA_TCL_DATA_RING_IDX) ||
944 		    (queue->ring_id == IPA_TX_ALT_RING_IDX))
945 			queue->ring_id = 0;
946 
947 	queue->desc_pool_id = queue->ring_id;
948 }
949 #else
dp_tx_get_queue(struct dp_vdev * vdev,qdf_nbuf_t nbuf,struct dp_tx_queue * queue)950 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
951 				   qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
952 {
953 	queue->ring_id = qdf_get_cpu();
954 	if (vdev->pdev->soc->wlan_cfg_ctx->ipa_enabled)
955 		if (queue->ring_id == IPA_TCL_DATA_RING_IDX)
956 			queue->ring_id = 0;
957 
958 	queue->desc_pool_id = queue->ring_id;
959 }
960 #endif
961 #else
962 #ifdef WLAN_TX_PKT_CAPTURE_ENH
dp_tx_get_queue(struct dp_vdev * vdev,qdf_nbuf_t nbuf,struct dp_tx_queue * queue)963 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
964 				   qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
965 {
966 	if (qdf_unlikely(vdev->is_override_rbm_id))
967 		queue->ring_id = vdev->rbm_id;
968 	else
969 		queue->ring_id = qdf_get_cpu();
970 
971 	queue->desc_pool_id = queue->ring_id;
972 }
973 #else
dp_tx_get_queue(struct dp_vdev * vdev,qdf_nbuf_t nbuf,struct dp_tx_queue * queue)974 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
975 				   qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
976 {
977 	queue->ring_id = qdf_get_cpu();
978 	queue->desc_pool_id = queue->ring_id;
979 }
980 
981 #endif
982 #endif
983 
984 /**
985  * dp_tx_get_hal_ring_hdl() - Get the hal_tx_ring_hdl for data transmission
986  * @soc: DP soc structure pointer
987  * @ring_id: Transmit Queue/ring_id to be used when XPS is enabled
988  *
989  * Return: HAL ring handle
990  */
dp_tx_get_hal_ring_hdl(struct dp_soc * soc,uint8_t ring_id)991 static inline hal_ring_handle_t dp_tx_get_hal_ring_hdl(struct dp_soc *soc,
992 						       uint8_t ring_id)
993 {
994 	if (ring_id == soc->num_tcl_data_rings)
995 		return soc->tcl_cmd_credit_ring.hal_srng;
996 
997 	return soc->tcl_data_ring[ring_id].hal_srng;
998 }
999 
1000 #else /* QCA_OL_TX_MULTIQ_SUPPORT */
1001 
1002 #ifdef TX_MULTI_TCL
1003 #ifdef IPA_OFFLOAD
dp_tx_get_queue(struct dp_vdev * vdev,qdf_nbuf_t nbuf,struct dp_tx_queue * queue)1004 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
1005 				   qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
1006 {
1007 	/* get flow id */
1008 	queue->desc_pool_id = DP_TX_GET_DESC_POOL_ID(vdev);
1009 	if (vdev->pdev->soc->wlan_cfg_ctx->ipa_enabled &&
1010 	    !ipa_config_is_opt_wifi_dp_enabled())
1011 		queue->ring_id = DP_TX_GET_RING_ID(vdev);
1012 	else
1013 		queue->ring_id = (qdf_nbuf_get_queue_mapping(nbuf) %
1014 					vdev->pdev->soc->num_tcl_data_rings);
1015 }
1016 #else
dp_tx_get_queue(struct dp_vdev * vdev,qdf_nbuf_t nbuf,struct dp_tx_queue * queue)1017 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
1018 				   qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
1019 {
1020 	/* get flow id */
1021 	queue->desc_pool_id = DP_TX_GET_DESC_POOL_ID(vdev);
1022 	queue->ring_id = (qdf_nbuf_get_queue_mapping(nbuf) %
1023 				vdev->pdev->soc->num_tcl_data_rings);
1024 }
1025 #endif
1026 #else
dp_tx_get_queue(struct dp_vdev * vdev,qdf_nbuf_t nbuf,struct dp_tx_queue * queue)1027 static inline void dp_tx_get_queue(struct dp_vdev *vdev,
1028 				   qdf_nbuf_t nbuf, struct dp_tx_queue *queue)
1029 {
1030 	/* get flow id */
1031 	queue->desc_pool_id = DP_TX_GET_DESC_POOL_ID(vdev);
1032 	queue->ring_id = DP_TX_GET_RING_ID(vdev);
1033 }
1034 #endif
1035 
dp_tx_get_hal_ring_hdl(struct dp_soc * soc,uint8_t ring_id)1036 static inline hal_ring_handle_t dp_tx_get_hal_ring_hdl(struct dp_soc *soc,
1037 						       uint8_t ring_id)
1038 {
1039 	return soc->tcl_data_ring[ring_id].hal_srng;
1040 }
1041 #endif
1042 
1043 #ifdef QCA_OL_TX_LOCK_LESS_ACCESS
1044 /**
1045  * dp_tx_hal_ring_access_start() - hal_tx_ring access for data transmission
1046  * @soc: DP soc structure pointer
1047  * @hal_ring_hdl: HAL ring handle
1048  *
1049  * Return: None
1050  */
dp_tx_hal_ring_access_start(struct dp_soc * soc,hal_ring_handle_t hal_ring_hdl)1051 static inline int dp_tx_hal_ring_access_start(struct dp_soc *soc,
1052 					      hal_ring_handle_t hal_ring_hdl)
1053 {
1054 	return hal_srng_access_start_unlocked(soc->hal_soc, hal_ring_hdl);
1055 }
1056 
1057 /**
1058  * dp_tx_hal_ring_access_end() - hal_tx_ring access for data transmission
1059  * @soc: DP soc structure pointer
1060  * @hal_ring_hdl: HAL ring handle
1061  *
1062  * Return: None
1063  */
dp_tx_hal_ring_access_end(struct dp_soc * soc,hal_ring_handle_t hal_ring_hdl)1064 static inline void dp_tx_hal_ring_access_end(struct dp_soc *soc,
1065 					     hal_ring_handle_t hal_ring_hdl)
1066 {
1067 	hal_srng_access_end_unlocked(soc->hal_soc, hal_ring_hdl);
1068 }
1069 
1070 /**
1071  * dp_tx_hal_ring_access_end_reap() - hal_tx_ring access for data transmission
1072  * @soc: DP soc structure pointer
1073  * @hal_ring_hdl: HAL ring handle
1074  *
1075  * Return: None
1076  */
dp_tx_hal_ring_access_end_reap(struct dp_soc * soc,hal_ring_handle_t hal_ring_hdl)1077 static inline void dp_tx_hal_ring_access_end_reap(struct dp_soc *soc,
1078 						  hal_ring_handle_t
1079 						  hal_ring_hdl)
1080 {
1081 }
1082 
1083 #else
dp_tx_hal_ring_access_start(struct dp_soc * soc,hal_ring_handle_t hal_ring_hdl)1084 static inline int dp_tx_hal_ring_access_start(struct dp_soc *soc,
1085 					      hal_ring_handle_t hal_ring_hdl)
1086 {
1087 	return hal_srng_access_start(soc->hal_soc, hal_ring_hdl);
1088 }
1089 
dp_tx_hal_ring_access_end(struct dp_soc * soc,hal_ring_handle_t hal_ring_hdl)1090 static inline void dp_tx_hal_ring_access_end(struct dp_soc *soc,
1091 					     hal_ring_handle_t hal_ring_hdl)
1092 {
1093 	hal_srng_access_end(soc->hal_soc, hal_ring_hdl);
1094 }
1095 
dp_tx_hal_ring_access_end_reap(struct dp_soc * soc,hal_ring_handle_t hal_ring_hdl)1096 static inline void dp_tx_hal_ring_access_end_reap(struct dp_soc *soc,
1097 						  hal_ring_handle_t
1098 						  hal_ring_hdl)
1099 {
1100 	hal_srng_access_end_reap(soc->hal_soc, hal_ring_hdl);
1101 }
1102 #endif
1103 
1104 #ifdef ATH_TX_PRI_OVERRIDE
1105 #define DP_TX_TID_OVERRIDE(_msdu_info, _nbuf) \
1106 	((_msdu_info)->tid = qdf_nbuf_get_priority(_nbuf))
1107 #else
1108 #define DP_TX_TID_OVERRIDE(_msdu_info, _nbuf)
1109 #endif
1110 
1111 /* TODO TX_FEATURE_NOT_YET */
dp_tx_comp_process_exception(struct dp_tx_desc_s * tx_desc)1112 static inline void dp_tx_comp_process_exception(struct dp_tx_desc_s *tx_desc)
1113 {
1114 	return;
1115 }
1116 /* TODO TX_FEATURE_NOT_YET */
1117 
1118 /**
1119  * dp_tx_desc_flush() - release resources associated
1120  *                      to TX Desc
1121  *
1122  * @pdev: Handle to DP pdev structure
1123  * @vdev: virtual device instance
1124  * NULL: no specific Vdev is required and check all allcated TX desc
1125  * on this pdev.
1126  * Non-NULL: only check the allocated TX Desc associated to this Vdev.
1127  *
1128  * @force_free:
1129  * true: flush the TX desc.
1130  * false: only reset the Vdev in each allocated TX desc
1131  * that associated to current Vdev.
1132  *
1133  * This function will go through the TX desc pool to flush
1134  * the outstanding TX data or reset Vdev to NULL in associated TX
1135  * Desc.
1136  */
1137 void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
1138 		      bool force_free);
1139 
1140 /**
1141  * dp_tx_vdev_attach() - attach vdev to dp tx
1142  * @vdev: virtual device instance
1143  *
1144  * Return: QDF_STATUS_SUCCESS: success
1145  *         QDF_STATUS_E_RESOURCES: Error return
1146  */
1147 QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev);
1148 
1149 /**
1150  * dp_tx_vdev_detach() - detach vdev from dp tx
1151  * @vdev: virtual device instance
1152  *
1153  * Return: QDF_STATUS_SUCCESS: success
1154  *         QDF_STATUS_E_RESOURCES: Error return
1155  */
1156 QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev);
1157 
1158 /**
1159  * dp_tx_vdev_update_search_flags() - Update vdev flags as per opmode
1160  * @vdev: virtual device instance
1161  *
1162  * Return: void
1163  *
1164  */
1165 void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev);
1166 
1167 /**
1168  * dp_soc_tx_desc_sw_pools_alloc() - Allocate tx descriptor pool memory
1169  * @soc: core txrx main context
1170  *
1171  * This function allocates memory for following descriptor pools
1172  * 1. regular sw tx descriptor pools (static pools)
1173  * 2. TX extension descriptor pools (ME, RAW, TSO etc...)
1174  * 3. TSO descriptor pools
1175  *
1176  * Return: QDF_STATUS_SUCCESS: success
1177  *         QDF_STATUS_E_RESOURCES: Error return
1178  */
1179 QDF_STATUS dp_soc_tx_desc_sw_pools_alloc(struct dp_soc *soc);
1180 
1181 /**
1182  * dp_soc_tx_desc_sw_pools_init() - Initialise TX descriptor pools
1183  * @soc: core txrx main context
1184  *
1185  * This function initializes the following TX descriptor pools
1186  * 1. regular sw tx descriptor pools (static pools)
1187  * 2. TX extension descriptor pools (ME, RAW, TSO etc...)
1188  * 3. TSO descriptor pools
1189  *
1190  * Return: QDF_STATUS_SUCCESS: success
1191  *	   QDF_STATUS_E_RESOURCES: Error return
1192  */
1193 QDF_STATUS dp_soc_tx_desc_sw_pools_init(struct dp_soc *soc);
1194 
1195 /**
1196  * dp_soc_tx_desc_sw_pools_free() - free all TX descriptors
1197  * @soc: core txrx main context
1198  *
1199  * This function frees all tx related descriptors as below
1200  * 1. Regular TX descriptors (static pools)
1201  * 2. extension TX descriptors (used for ME, RAW, TSO etc...)
1202  * 3. TSO descriptors
1203  *
1204  */
1205 void dp_soc_tx_desc_sw_pools_free(struct dp_soc *soc);
1206 
1207 /**
1208  * dp_soc_tx_desc_sw_pools_deinit() - de-initialize all TX descriptors
1209  * @soc: core txrx main context
1210  *
1211  * This function de-initializes all tx related descriptors as below
1212  * 1. Regular TX descriptors (static pools)
1213  * 2. extension TX descriptors (used for ME, RAW, TSO etc...)
1214  * 3. TSO descriptors
1215  *
1216  */
1217 void dp_soc_tx_desc_sw_pools_deinit(struct dp_soc *soc);
1218 
1219 #ifndef WLAN_SOFTUMAC_SUPPORT
1220 /**
1221  * dp_handle_wbm_internal_error() - handles wbm_internal_error case
1222  * @soc: core DP main context
1223  * @hal_desc: hal descriptor
1224  * @buf_type: indicates if the buffer is of type link disc or msdu
1225  *
1226  * wbm_internal_error is seen in following scenarios :
1227  *
1228  * 1.  Null pointers detected in WBM_RELEASE_RING descriptors
1229  * 2.  Null pointers detected during delinking process
1230  *
1231  * Some null pointer cases:
1232  *
1233  * a. MSDU buffer pointer is NULL
1234  * b. Next_MSDU_Link_Desc pointer is NULL, with no last msdu flag
1235  * c. MSDU buffer pointer is NULL or Next_Link_Desc pointer is NULL
1236  *
1237  * Return: None
1238  */
1239 void
1240 dp_handle_wbm_internal_error(struct dp_soc *soc, void *hal_desc,
1241 			     uint32_t buf_type);
1242 #endif
1243 #else /* QCA_HOST_MODE_WIFI_DISABLED */
1244 
1245 static inline
dp_soc_tx_desc_sw_pools_alloc(struct dp_soc * soc)1246 QDF_STATUS dp_soc_tx_desc_sw_pools_alloc(struct dp_soc *soc)
1247 {
1248 	return QDF_STATUS_SUCCESS;
1249 }
1250 
1251 static inline
dp_soc_tx_desc_sw_pools_init(struct dp_soc * soc)1252 QDF_STATUS dp_soc_tx_desc_sw_pools_init(struct dp_soc *soc)
1253 {
1254 	return QDF_STATUS_SUCCESS;
1255 }
1256 
dp_soc_tx_desc_sw_pools_free(struct dp_soc * soc)1257 static inline void dp_soc_tx_desc_sw_pools_free(struct dp_soc *soc)
1258 {
1259 }
1260 
dp_soc_tx_desc_sw_pools_deinit(struct dp_soc * soc)1261 static inline void dp_soc_tx_desc_sw_pools_deinit(struct dp_soc *soc)
1262 {
1263 }
1264 
1265 static inline
dp_tx_desc_flush(struct dp_pdev * pdev,struct dp_vdev * vdev,bool force_free)1266 void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
1267 		      bool force_free)
1268 {
1269 }
1270 
dp_tx_vdev_attach(struct dp_vdev * vdev)1271 static inline QDF_STATUS dp_tx_vdev_attach(struct dp_vdev *vdev)
1272 {
1273 	return QDF_STATUS_SUCCESS;
1274 }
1275 
dp_tx_vdev_detach(struct dp_vdev * vdev)1276 static inline QDF_STATUS dp_tx_vdev_detach(struct dp_vdev *vdev)
1277 {
1278 	return QDF_STATUS_SUCCESS;
1279 }
1280 
dp_tx_vdev_update_search_flags(struct dp_vdev * vdev)1281 static inline void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev)
1282 {
1283 }
1284 
1285 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
1286 
1287 #if defined(QCA_SUPPORT_LATENCY_CAPTURE) || \
1288 	defined(QCA_TX_CAPTURE_SUPPORT) || \
1289 	defined(QCA_MCOPY_SUPPORT)
1290 #ifdef FEATURE_PERPKT_INFO
1291 
1292 /**
1293  * dp_get_completion_indication_for_stack() - send completion to stack
1294  * @soc : dp_soc handle
1295  * @pdev: dp_pdev handle
1296  * @txrx_peer: dp peer handle
1297  * @ts: transmit completion status structure
1298  * @netbuf: Buffer pointer for free
1299  * @time_latency:
1300  *
1301  * This function is used for indication whether buffer needs to be
1302  * sent to stack for freeing or not
1303  *
1304  * Return: QDF_STATUS
1305  */
1306 QDF_STATUS
1307 dp_get_completion_indication_for_stack(struct dp_soc *soc,
1308 				       struct dp_pdev *pdev,
1309 				       struct dp_txrx_peer *txrx_peer,
1310 				       struct hal_tx_completion_status *ts,
1311 				       qdf_nbuf_t netbuf,
1312 				       uint64_t time_latency);
1313 
1314 /**
1315  * dp_send_completion_to_stack() - send completion to stack
1316  * @soc :  dp_soc handle
1317  * @pdev:  dp_pdev handle
1318  * @peer_id: peer_id of the peer for which completion came
1319  * @ppdu_id: ppdu_id
1320  * @netbuf: Buffer pointer for free
1321  *
1322  * This function is used to send completion to stack
1323  * to free buffer
1324  *
1325  * Return: QDF_STATUS
1326  */
1327 void dp_send_completion_to_stack(struct dp_soc *soc,  struct dp_pdev *pdev,
1328 			    uint16_t peer_id, uint32_t ppdu_id,
1329 			    qdf_nbuf_t netbuf);
1330 #endif
1331 #else
1332 static inline
dp_get_completion_indication_for_stack(struct dp_soc * soc,struct dp_pdev * pdev,struct dp_txrx_peer * peer,struct hal_tx_completion_status * ts,qdf_nbuf_t netbuf,uint64_t time_latency)1333 QDF_STATUS dp_get_completion_indication_for_stack(struct dp_soc *soc,
1334 				       struct dp_pdev *pdev,
1335 				       struct dp_txrx_peer *peer,
1336 				       struct hal_tx_completion_status *ts,
1337 				       qdf_nbuf_t netbuf,
1338 				       uint64_t time_latency)
1339 {
1340 	return QDF_STATUS_E_NOSUPPORT;
1341 }
1342 
1343 static inline
dp_send_completion_to_stack(struct dp_soc * soc,struct dp_pdev * pdev,uint16_t peer_id,uint32_t ppdu_id,qdf_nbuf_t netbuf)1344 void dp_send_completion_to_stack(struct dp_soc *soc,  struct dp_pdev *pdev,
1345 			    uint16_t peer_id, uint32_t ppdu_id,
1346 			    qdf_nbuf_t netbuf)
1347 {
1348 }
1349 #endif
1350 
1351 #ifdef WLAN_FEATURE_PKT_CAPTURE_V2
1352 /**
1353  * dp_send_completion_to_pkt_capture() - send tx completion to packet capture
1354  * @soc: dp_soc handle
1355  * @desc: Tx Descriptor
1356  * @ts: HAL Tx completion descriptor contents
1357  *
1358  * This function is used to send tx completion to packet capture
1359  */
1360 
1361 void dp_send_completion_to_pkt_capture(struct dp_soc *soc,
1362 				       struct dp_tx_desc_s *desc,
1363 				       struct hal_tx_completion_status *ts);
1364 #else
1365 static inline void
dp_send_completion_to_pkt_capture(struct dp_soc * soc,struct dp_tx_desc_s * desc,struct hal_tx_completion_status * ts)1366 dp_send_completion_to_pkt_capture(struct dp_soc *soc,
1367 				  struct dp_tx_desc_s *desc,
1368 				  struct hal_tx_completion_status *ts)
1369 {
1370 }
1371 #endif
1372 
1373 #ifndef QCA_HOST_MODE_WIFI_DISABLED
1374 #ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
1375 /**
1376  * dp_tx_update_stats() - Update soc level tx stats
1377  * @soc: DP soc handle
1378  * @tx_desc: TX descriptor reference
1379  * @ring_id: TCL ring id
1380  *
1381  * Return: none
1382  */
1383 void dp_tx_update_stats(struct dp_soc *soc,
1384 			struct dp_tx_desc_s *tx_desc,
1385 			uint8_t ring_id);
1386 
1387 /**
1388  * dp_tx_attempt_coalescing() - Check and attempt TCL register write coalescing
1389  * @soc: Datapath soc handle
1390  * @vdev: DP vdev handle
1391  * @tx_desc: tx packet descriptor
1392  * @tid: TID for pkt transmission
1393  * @msdu_info: MSDU info of tx packet
1394  * @ring_id: TCL ring id
1395  *
1396  * Return: 1, if coalescing is to be done
1397  *	    0, if coalescing is not to be done
1398  */
1399 int
1400 dp_tx_attempt_coalescing(struct dp_soc *soc, struct dp_vdev *vdev,
1401 			 struct dp_tx_desc_s *tx_desc,
1402 			 uint8_t tid,
1403 			 struct dp_tx_msdu_info_s *msdu_info,
1404 			 uint8_t ring_id);
1405 
1406 /**
1407  * dp_tx_ring_access_end() - HAL ring access end for data transmission
1408  * @soc: Datapath soc handle
1409  * @hal_ring_hdl: HAL ring handle
1410  * @coalesce: Coalesce the current write or not
1411  *
1412  * Return: none
1413  */
1414 void
1415 dp_tx_ring_access_end(struct dp_soc *soc, hal_ring_handle_t hal_ring_hdl,
1416 		      int coalesce);
1417 #else
1418 /**
1419  * dp_tx_update_stats() - Update soc level tx stats
1420  * @soc: DP soc handle
1421  * @tx_desc: TX descriptor reference
1422  * @ring_id: TCL ring id
1423  *
1424  * Return: none
1425  */
dp_tx_update_stats(struct dp_soc * soc,struct dp_tx_desc_s * tx_desc,uint8_t ring_id)1426 static inline void dp_tx_update_stats(struct dp_soc *soc,
1427 				      struct dp_tx_desc_s *tx_desc,
1428 				      uint8_t ring_id){ }
1429 
1430 static inline void
dp_tx_ring_access_end(struct dp_soc * soc,hal_ring_handle_t hal_ring_hdl,int coalesce)1431 dp_tx_ring_access_end(struct dp_soc *soc, hal_ring_handle_t hal_ring_hdl,
1432 		      int coalesce)
1433 {
1434 	dp_tx_hal_ring_access_end(soc, hal_ring_hdl);
1435 }
1436 
1437 static inline int
dp_tx_attempt_coalescing(struct dp_soc * soc,struct dp_vdev * vdev,struct dp_tx_desc_s * tx_desc,uint8_t tid,struct dp_tx_msdu_info_s * msdu_info,uint8_t ring_id)1438 dp_tx_attempt_coalescing(struct dp_soc *soc, struct dp_vdev *vdev,
1439 			 struct dp_tx_desc_s *tx_desc,
1440 			 uint8_t tid,
1441 			 struct dp_tx_msdu_info_s *msdu_info,
1442 			 uint8_t ring_id)
1443 {
1444 	return 0;
1445 }
1446 
1447 #endif /* WLAN_DP_FEATURE_SW_LATENCY_MGR */
1448 
1449 #ifdef FEATURE_RUNTIME_PM
1450 /**
1451  * dp_set_rtpm_tput_policy_requirement() - Update RTPM throughput policy
1452  * @soc_hdl: DP soc handle
1453  * @is_high_tput: flag to indicate whether throughput is high
1454  *
1455  * Return: none
1456  */
1457 static inline
dp_set_rtpm_tput_policy_requirement(struct cdp_soc_t * soc_hdl,bool is_high_tput)1458 void dp_set_rtpm_tput_policy_requirement(struct cdp_soc_t *soc_hdl,
1459 					 bool is_high_tput)
1460 {
1461 	struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
1462 
1463 	qdf_atomic_set(&soc->rtpm_high_tput_flag, is_high_tput);
1464 }
1465 
1466 /**
1467  * dp_tx_ring_access_end_wrapper() - Wrapper for ring access end
1468  * @soc: Datapath soc handle
1469  * @hal_ring_hdl: HAL ring handle
1470  * @coalesce: Coalesce the current write or not
1471  *
1472  * Feature-specific wrapper for HAL ring access end for data
1473  * transmission
1474  *
1475  * Return: none
1476  */
1477 void
1478 dp_tx_ring_access_end_wrapper(struct dp_soc *soc,
1479 			      hal_ring_handle_t hal_ring_hdl,
1480 			      int coalesce);
1481 #else
1482 #ifdef DP_POWER_SAVE
1483 void
1484 dp_tx_ring_access_end_wrapper(struct dp_soc *soc,
1485 			      hal_ring_handle_t hal_ring_hdl,
1486 			      int coalesce);
1487 #else
1488 static inline void
dp_tx_ring_access_end_wrapper(struct dp_soc * soc,hal_ring_handle_t hal_ring_hdl,int coalesce)1489 dp_tx_ring_access_end_wrapper(struct dp_soc *soc,
1490 			      hal_ring_handle_t hal_ring_hdl,
1491 			      int coalesce)
1492 {
1493 	dp_tx_ring_access_end(soc, hal_ring_hdl, coalesce);
1494 }
1495 #endif
1496 
1497 static inline void
dp_set_rtpm_tput_policy_requirement(struct cdp_soc_t * soc_hdl,bool is_high_tput)1498 dp_set_rtpm_tput_policy_requirement(struct cdp_soc_t *soc_hdl,
1499 				    bool is_high_tput)
1500 { }
1501 #endif
1502 #endif /* QCA_HOST_MODE_WIFI_DISABLED */
1503 
1504 #ifdef DP_TX_HW_DESC_HISTORY
1505 static inline void
dp_tx_hw_desc_update_evt(uint8_t * hal_tx_desc_cached,hal_ring_handle_t hal_ring_hdl,struct dp_soc * soc,uint8_t ring_id)1506 dp_tx_hw_desc_update_evt(uint8_t *hal_tx_desc_cached,
1507 			 hal_ring_handle_t hal_ring_hdl,
1508 			 struct dp_soc *soc, uint8_t ring_id)
1509 {
1510 	struct dp_tx_hw_desc_history *tx_hw_desc_history =
1511 						&soc->tx_hw_desc_history;
1512 	struct dp_tx_hw_desc_evt *evt;
1513 	uint32_t idx = 0;
1514 	uint16_t slot = 0;
1515 
1516 	if (!tx_hw_desc_history->allocated)
1517 		return;
1518 
1519 	dp_get_frag_hist_next_atomic_idx(&tx_hw_desc_history->index, &idx,
1520 					 &slot,
1521 					 DP_TX_HW_DESC_HIST_SLOT_SHIFT,
1522 					 DP_TX_HW_DESC_HIST_PER_SLOT_MAX,
1523 					 DP_TX_HW_DESC_HIST_MAX);
1524 
1525 	evt = &tx_hw_desc_history->entry[slot][idx];
1526 	qdf_mem_copy(evt->tcl_desc, hal_tx_desc_cached, HAL_TX_DESC_LEN_BYTES);
1527 	evt->posted = qdf_get_log_timestamp();
1528 	evt->tcl_ring_id = ring_id;
1529 	hal_get_sw_hptp(soc->hal_soc, hal_ring_hdl, &evt->tp, &evt->hp);
1530 }
1531 #else
1532 static inline void
dp_tx_hw_desc_update_evt(uint8_t * hal_tx_desc_cached,hal_ring_handle_t hal_ring_hdl,struct dp_soc * soc,uint8_t ring_id)1533 dp_tx_hw_desc_update_evt(uint8_t *hal_tx_desc_cached,
1534 			 hal_ring_handle_t hal_ring_hdl,
1535 			 struct dp_soc *soc, uint8_t ring_id)
1536 {
1537 }
1538 #endif
1539 
1540 #if defined(WLAN_FEATURE_TSF_AUTO_REPORT) || defined(WLAN_CONFIG_TX_DELAY)
1541 /**
1542  * dp_tx_compute_hw_delay_us() - Compute hardware Tx completion delay
1543  * @ts: Tx completion status
1544  * @delta_tsf: Difference between TSF clock and qtimer
1545  * @delay_us: Delay in microseconds
1546  *
1547  * Return: QDF_STATUS_SUCCESS   : Success
1548  *         QDF_STATUS_E_INVAL   : Tx completion status is invalid or
1549  *                                delay_us is NULL
1550  *         QDF_STATUS_E_FAILURE : Error in delay calculation
1551  */
1552 QDF_STATUS
1553 dp_tx_compute_hw_delay_us(struct hal_tx_completion_status *ts,
1554 			  uint32_t delta_tsf,
1555 			  uint32_t *delay_us);
1556 
1557 /**
1558  * dp_set_delta_tsf() - Set delta_tsf to dp_soc structure
1559  * @soc_hdl: cdp soc pointer
1560  * @vdev_id: vdev id
1561  * @delta_tsf: difference between TSF clock and qtimer
1562  *
1563  * Return: None
1564  */
1565 void dp_set_delta_tsf(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
1566 		      uint32_t delta_tsf);
1567 #endif
1568 #ifdef WLAN_FEATURE_TSF_UPLINK_DELAY
1569 /**
1570  * dp_set_tsf_ul_delay_report() - Enable or disable reporting uplink delay
1571  * @soc_hdl: cdp soc pointer
1572  * @vdev_id: vdev id
1573  * @enable: true to enable and false to disable
1574  *
1575  * Return: QDF_STATUS
1576  */
1577 QDF_STATUS dp_set_tsf_ul_delay_report(struct cdp_soc_t *soc_hdl,
1578 				      uint8_t vdev_id, bool enable);
1579 
1580 /**
1581  * dp_get_uplink_delay() - Get uplink delay value
1582  * @soc_hdl: cdp soc pointer
1583  * @vdev_id: vdev id
1584  * @val: pointer to save uplink delay value
1585  *
1586  * Return: QDF_STATUS
1587  */
1588 QDF_STATUS dp_get_uplink_delay(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
1589 			       uint32_t *val);
1590 #endif /* WLAN_FEATURE_TSF_UPLINK_TSF */
1591 
1592 /**
1593  * dp_tx_pkt_tracepoints_enabled() - Get the state of tx pkt tracepoint
1594  *
1595  * Return: True if any tx pkt tracepoint is enabled else false
1596  */
1597 static inline
dp_tx_pkt_tracepoints_enabled(void)1598 bool dp_tx_pkt_tracepoints_enabled(void)
1599 {
1600 	return (qdf_trace_dp_tx_comp_tcp_pkt_enabled() ||
1601 		qdf_trace_dp_tx_comp_udp_pkt_enabled() ||
1602 		qdf_trace_dp_tx_comp_pkt_enabled());
1603 }
1604 
1605 #ifdef QCA_SUPPORT_DP_GLOBAL_CTX
1606 static inline
dp_get_tx_desc_pool(struct dp_soc * soc,uint8_t pool_id)1607 struct dp_tx_desc_pool_s *dp_get_tx_desc_pool(struct dp_soc *soc,
1608 					      uint8_t pool_id)
1609 {
1610 	struct dp_global_context *dp_global = NULL;
1611 
1612 	dp_global = wlan_objmgr_get_global_ctx();
1613 	return dp_global->tx_desc[soc->arch_id][pool_id];
1614 }
1615 
1616 static inline
dp_get_spcl_tx_desc_pool(struct dp_soc * soc,uint8_t pool_id)1617 struct dp_tx_desc_pool_s *dp_get_spcl_tx_desc_pool(struct dp_soc *soc,
1618 						   uint8_t pool_id)
1619 {
1620 	struct dp_global_context *dp_global = NULL;
1621 
1622 	dp_global = wlan_objmgr_get_global_ctx();
1623 	return dp_global->spcl_tx_desc[soc->arch_id][pool_id];
1624 }
1625 #else
1626 static inline
dp_get_tx_desc_pool(struct dp_soc * soc,uint8_t pool_id)1627 struct dp_tx_desc_pool_s *dp_get_tx_desc_pool(struct dp_soc *soc,
1628 					      uint8_t pool_id)
1629 {
1630 	return &soc->tx_desc[pool_id];
1631 }
1632 
1633 static inline
dp_get_spcl_tx_desc_pool(struct dp_soc * soc,uint8_t pool_id)1634 struct dp_tx_desc_pool_s *dp_get_spcl_tx_desc_pool(struct dp_soc *soc,
1635 						   uint8_t pool_id)
1636 {
1637 	return &soc->tx_desc[pool_id];
1638 }
1639 #endif
1640 
1641 #ifdef DP_TX_TRACKING
1642 /**
1643  * dp_tx_desc_set_timestamp() - set timestamp in tx descriptor
1644  * @tx_desc: tx descriptor
1645  *
1646  * Return: None
1647  */
1648 static inline
dp_tx_desc_set_timestamp(struct dp_tx_desc_s * tx_desc)1649 void dp_tx_desc_set_timestamp(struct dp_tx_desc_s *tx_desc)
1650 {
1651 	tx_desc->timestamp_tick = qdf_system_ticks();
1652 }
1653 
1654 /**
1655  * dp_tx_desc_check_corruption() - Verify magic pattern in tx descriptor
1656  * @tx_desc: tx descriptor
1657  *
1658  * Check for corruption in tx descriptor, if magic pattern is not matching
1659  * trigger self recovery
1660  *
1661  * Return: none
1662  */
1663 void dp_tx_desc_check_corruption(struct dp_tx_desc_s *tx_desc);
1664 #else
1665 static inline
dp_tx_desc_set_timestamp(struct dp_tx_desc_s * tx_desc)1666 void dp_tx_desc_set_timestamp(struct dp_tx_desc_s *tx_desc)
1667 {
1668 }
1669 
1670 static inline
dp_tx_desc_check_corruption(struct dp_tx_desc_s * tx_desc)1671 void dp_tx_desc_check_corruption(struct dp_tx_desc_s *tx_desc)
1672 {
1673 }
1674 #endif
1675 
1676 #ifndef CONFIG_SAWF
dp_sawf_tag_valid_get(qdf_nbuf_t nbuf)1677 static inline bool dp_sawf_tag_valid_get(qdf_nbuf_t nbuf)
1678 {
1679 	return false;
1680 }
1681 #endif
1682 
1683 #ifdef HW_TX_DELAY_STATS_ENABLE
1684 /**
1685  * dp_tx_desc_set_ktimestamp() - set kernel timestamp in tx descriptor
1686  * @vdev: DP vdev handle
1687  * @tx_desc: tx descriptor
1688  *
1689  * Return: true when descriptor is timestamped, false otherwise
1690  */
1691 static inline
dp_tx_desc_set_ktimestamp(struct dp_vdev * vdev,struct dp_tx_desc_s * tx_desc)1692 bool dp_tx_desc_set_ktimestamp(struct dp_vdev *vdev,
1693 			       struct dp_tx_desc_s *tx_desc)
1694 {
1695 	if (qdf_unlikely(vdev->pdev->delay_stats_flag) ||
1696 	    qdf_unlikely(vdev->pdev->soc->wlan_cfg_ctx->pext_stats_enabled) ||
1697 	    qdf_unlikely(dp_tx_pkt_tracepoints_enabled()) ||
1698 	    qdf_unlikely(vdev->pdev->soc->peerstats_enabled) ||
1699 	    qdf_unlikely(dp_is_vdev_tx_delay_stats_enabled(vdev)) ||
1700 	    qdf_unlikely(wlan_cfg_is_peer_jitter_stats_enabled(vdev->pdev->soc->wlan_cfg_ctx))) {
1701 		tx_desc->timestamp = qdf_ktime_real_get();
1702 		return true;
1703 	}
1704 	return false;
1705 }
1706 #else
1707 static inline
dp_tx_desc_set_ktimestamp(struct dp_vdev * vdev,struct dp_tx_desc_s * tx_desc)1708 bool dp_tx_desc_set_ktimestamp(struct dp_vdev *vdev,
1709 			       struct dp_tx_desc_s *tx_desc)
1710 {
1711 	if (qdf_unlikely(vdev->pdev->delay_stats_flag) ||
1712 	    qdf_unlikely(vdev->pdev->soc->wlan_cfg_ctx->pext_stats_enabled) ||
1713 	    qdf_unlikely(dp_tx_pkt_tracepoints_enabled()) ||
1714 	    qdf_unlikely(vdev->pdev->soc->peerstats_enabled) ||
1715 	    qdf_unlikely(wlan_cfg_is_peer_jitter_stats_enabled(vdev->pdev->soc->wlan_cfg_ctx))) {
1716 		tx_desc->timestamp = qdf_ktime_real_get();
1717 		return true;
1718 	}
1719 	return false;
1720 }
1721 #endif
1722 
1723 #ifdef CONFIG_DP_PKT_ADD_TIMESTAMP
1724 /**
1725  * dp_pkt_add_timestamp() - add timestamp in data payload
1726  *
1727  * @vdev: dp vdev
1728  * @index: index to decide offset in payload
1729  * @time: timestamp to add in data payload
1730  * @nbuf: network buffer
1731  *
1732  * Return: none
1733  */
1734 void dp_pkt_add_timestamp(struct dp_vdev *vdev,
1735 			  enum qdf_pkt_timestamp_index index, uint64_t time,
1736 			  qdf_nbuf_t nbuf);
1737 /**
1738  * dp_pkt_get_timestamp() - get current system time
1739  *
1740  * @time: return current system time
1741  *
1742  * Return: none
1743  */
1744 void dp_pkt_get_timestamp(uint64_t *time);
1745 #else
1746 #define dp_pkt_add_timestamp(vdev, index, time, nbuf)
1747 
1748 static inline
dp_pkt_get_timestamp(uint64_t * time)1749 void dp_pkt_get_timestamp(uint64_t *time)
1750 {
1751 }
1752 #endif
1753 
1754 #ifdef CONFIG_WLAN_SYSFS_MEM_STATS
1755 /**
1756  * dp_update_tx_desc_stats - Update the increase or decrease in
1757  * outstanding tx desc count
1758  * values on pdev and soc
1759  * @pdev: DP pdev handle
1760  *
1761  * Return: void
1762  */
1763 static inline void
dp_update_tx_desc_stats(struct dp_pdev * pdev)1764 dp_update_tx_desc_stats(struct dp_pdev *pdev)
1765 {
1766 	int32_t tx_descs_cnt =
1767 		qdf_atomic_read(&pdev->num_tx_outstanding);
1768 	if (pdev->tx_descs_max < tx_descs_cnt)
1769 		pdev->tx_descs_max = tx_descs_cnt;
1770 	qdf_mem_tx_desc_cnt_update(pdev->num_tx_outstanding,
1771 				   pdev->tx_descs_max);
1772 }
1773 
1774 #else /* CONFIG_WLAN_SYSFS_MEM_STATS */
1775 
1776 static inline void
dp_update_tx_desc_stats(struct dp_pdev * pdev)1777 dp_update_tx_desc_stats(struct dp_pdev *pdev)
1778 {
1779 }
1780 #endif /* CONFIG_WLAN_SYSFS_MEM_STATS */
1781 
1782 #ifdef QCA_SUPPORT_DP_GLOBAL_CTX
1783 /**
1784  * dp_tx_get_global_desc_in_use() - read global descriptors in usage
1785  * @dp_global: Datapath global context
1786  *
1787  * Return: global descriptors in use
1788  */
1789 static inline int32_t
dp_tx_get_global_desc_in_use(struct dp_global_context * dp_global)1790 dp_tx_get_global_desc_in_use(struct dp_global_context *dp_global)
1791 {
1792 	return qdf_atomic_read(&dp_global->global_descriptor_in_use);
1793 }
1794 #endif
1795 
1796 #ifdef QCA_TX_LIMIT_CHECK
is_spl_packet(qdf_nbuf_t nbuf)1797 static inline bool is_spl_packet(qdf_nbuf_t nbuf)
1798 {
1799 	if (qdf_nbuf_is_ipv4_eapol_pkt(nbuf))
1800 		return true;
1801 	return false;
1802 }
1803 
1804 #ifdef QCA_SUPPORT_DP_GLOBAL_CTX
1805 /**
1806  * dp_tx_limit_check - Check if allocated tx descriptors reached
1807  * global max reg limit and pdev max reg limit for regular packets. Also check
1808  * if the limit is reached for special packets.
1809  * @vdev: DP vdev handle
1810  * @nbuf: network buffer
1811  *
1812  * Return: true if allocated tx descriptors reached max limit for regular
1813  * packets and in case of special packets, if the limit is reached max
1814  * configured vale for the soc/pdev, else false
1815  */
1816 static inline bool
dp_tx_limit_check(struct dp_vdev * vdev,qdf_nbuf_t nbuf)1817 dp_tx_limit_check(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
1818 {
1819 	return false;
1820 }
1821 
1822 static inline bool
__dp_tx_limit_check(struct dp_soc * soc)1823 __dp_tx_limit_check(struct dp_soc *soc)
1824 {
1825 	return false;
1826 }
1827 #else
1828 /**
1829  * is_dp_spl_tx_limit_reached - Check if the packet is a special packet to allow
1830  * allocation if allocated tx descriptors are within the soc max limit
1831  * and pdev max limit.
1832  * @vdev: DP vdev handle
1833  * @nbuf: network buffer
1834  *
1835  * Return: true if allocated tx descriptors reached max configured value, else
1836  * false
1837  */
1838 static inline bool
is_dp_spl_tx_limit_reached(struct dp_vdev * vdev,qdf_nbuf_t nbuf)1839 is_dp_spl_tx_limit_reached(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
1840 {
1841 	struct dp_pdev *pdev = vdev->pdev;
1842 	struct dp_soc *soc = pdev->soc;
1843 
1844 	if (is_spl_packet(nbuf)) {
1845 		if (qdf_atomic_read(&soc->num_tx_outstanding) >=
1846 				soc->num_tx_allowed)
1847 			return true;
1848 
1849 		if (qdf_atomic_read(&pdev->num_tx_outstanding) >=
1850 			pdev->num_tx_allowed)
1851 			return true;
1852 
1853 		return false;
1854 	}
1855 
1856 	return true;
1857 }
1858 
1859 static inline bool
__dp_tx_limit_check(struct dp_soc * soc)1860 __dp_tx_limit_check(struct dp_soc *soc)
1861 {
1862 	return (qdf_atomic_read(&soc->num_tx_outstanding) >=
1863 					soc->num_reg_tx_allowed);
1864 }
1865 
1866 /**
1867  * dp_tx_limit_check - Check if allocated tx descriptors reached
1868  * soc max reg limit and pdev max reg limit for regular packets. Also check if
1869  * the limit is reached for special packets.
1870  * @vdev: DP vdev handle
1871  * @nbuf: network buffer
1872  *
1873  * Return: true if allocated tx descriptors reached max limit for regular
1874  * packets and in case of special packets, if the limit is reached max
1875  * configured vale for the soc/pdev, else false
1876  */
1877 static inline bool
dp_tx_limit_check(struct dp_vdev * vdev,qdf_nbuf_t nbuf)1878 dp_tx_limit_check(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
1879 {
1880 	struct dp_pdev *pdev = vdev->pdev;
1881 	struct dp_soc *soc = pdev->soc;
1882 	uint8_t xmit_type = qdf_nbuf_get_vdev_xmit_type(nbuf);
1883 
1884 	if (__dp_tx_limit_check(soc)) {
1885 		if (is_dp_spl_tx_limit_reached(vdev, nbuf)) {
1886 			dp_tx_info("queued packets are more than max tx, drop the frame");
1887 			DP_STATS_INC(vdev,
1888 				     tx_i[xmit_type].dropped.desc_na.num, 1);
1889 			return true;
1890 		}
1891 	}
1892 
1893 	if (qdf_atomic_read(&pdev->num_tx_outstanding) >=
1894 			pdev->num_reg_tx_allowed) {
1895 		if (is_dp_spl_tx_limit_reached(vdev, nbuf)) {
1896 			dp_tx_info("queued packets are more than max tx, drop the frame");
1897 			DP_STATS_INC(vdev,
1898 				     tx_i[xmit_type].dropped.desc_na.num, 1);
1899 			DP_STATS_INC(vdev,
1900 				     tx_i[xmit_type].dropped.desc_na_exc_outstand.num,
1901 				     1);
1902 			return true;
1903 		}
1904 	}
1905 	return false;
1906 }
1907 #endif
1908 
1909 /**
1910  * dp_tx_exception_limit_check - Check if allocated tx exception descriptors
1911  * reached soc max limit
1912  * @vdev: DP vdev handle
1913  * @xmit_type: xmit type of packet - MLD/Link
1914  *
1915  * Return: true if allocated tx descriptors reached max configured value, else
1916  * false
1917  */
1918 static inline bool
dp_tx_exception_limit_check(struct dp_vdev * vdev,uint8_t xmit_type)1919 dp_tx_exception_limit_check(struct dp_vdev *vdev, uint8_t xmit_type)
1920 {
1921 	struct dp_pdev *pdev = vdev->pdev;
1922 	struct dp_soc *soc = pdev->soc;
1923 
1924 	if (qdf_atomic_read(&soc->num_tx_exception) >=
1925 			soc->num_msdu_exception_desc) {
1926 		dp_info("exc packets are more than max drop the exc pkt");
1927 		DP_STATS_INC(vdev, tx_i[xmit_type].dropped.exc_desc_na.num, 1);
1928 		return true;
1929 	}
1930 
1931 	return false;
1932 }
1933 
1934 #ifdef QCA_SUPPORT_DP_GLOBAL_CTX
1935 /**
1936  * dp_tx_outstanding_inc - Inc outstanding tx desc values on global and pdev
1937  * @pdev: DP pdev handle
1938  *
1939  * Return: void
1940  */
1941 static inline void
dp_tx_outstanding_inc(struct dp_pdev * pdev)1942 dp_tx_outstanding_inc(struct dp_pdev *pdev)
1943 {
1944 }
1945 
1946 static inline void
__dp_tx_outstanding_inc(struct dp_soc * soc)1947 __dp_tx_outstanding_inc(struct dp_soc *soc)
1948 {
1949 }
1950 
1951 static inline void
__dp_tx_outstanding_dec(struct dp_soc * soc)1952 __dp_tx_outstanding_dec(struct dp_soc *soc)
1953 {
1954 }
1955 
1956 /**
1957  * dp_tx_outstanding_dec - Dec outstanding tx desc values on global and pdev
1958  * @pdev: DP pdev handle
1959  *
1960  * Return: void
1961  */
1962 static inline void
dp_tx_outstanding_dec(struct dp_pdev * pdev)1963 dp_tx_outstanding_dec(struct dp_pdev *pdev)
1964 {
1965 }
1966 
1967 /**
1968  * dp_tx_outstanding_sub - Subtract outstanding tx desc values on pdev
1969  * @pdev: DP pdev handle
1970  * @count: count of descs to subtract from outstanding
1971  *
1972  * Return: void
1973  */
1974 static inline void
dp_tx_outstanding_sub(struct dp_pdev * pdev,uint32_t count)1975 dp_tx_outstanding_sub(struct dp_pdev *pdev, uint32_t count)
1976 {
1977 }
1978 #else
1979 
1980 static inline void
__dp_tx_outstanding_inc(struct dp_soc * soc)1981 __dp_tx_outstanding_inc(struct dp_soc *soc)
1982 {
1983 	qdf_atomic_inc(&soc->num_tx_outstanding);
1984 }
1985 /**
1986  * dp_tx_outstanding_inc - Increment outstanding tx desc values on pdev and soc
1987  * @pdev: DP pdev handle
1988  *
1989  * Return: void
1990  */
1991 static inline void
dp_tx_outstanding_inc(struct dp_pdev * pdev)1992 dp_tx_outstanding_inc(struct dp_pdev *pdev)
1993 {
1994 	struct dp_soc *soc = pdev->soc;
1995 
1996 	__dp_tx_outstanding_inc(soc);
1997 	qdf_atomic_inc(&pdev->num_tx_outstanding);
1998 	dp_update_tx_desc_stats(pdev);
1999 }
2000 
2001 static inline void
__dp_tx_outstanding_dec(struct dp_soc * soc)2002 __dp_tx_outstanding_dec(struct dp_soc *soc)
2003 {
2004 	qdf_atomic_dec(&soc->num_tx_outstanding);
2005 }
2006 
2007 /**
2008  * dp_tx_outstanding_dec - Decrement outstanding tx desc values on pdev and soc
2009  * @pdev: DP pdev handle
2010  *
2011  * Return: void
2012  */
2013 static inline void
dp_tx_outstanding_dec(struct dp_pdev * pdev)2014 dp_tx_outstanding_dec(struct dp_pdev *pdev)
2015 {
2016 	struct dp_soc *soc = pdev->soc;
2017 
2018 	__dp_tx_outstanding_dec(soc);
2019 	qdf_atomic_dec(&pdev->num_tx_outstanding);
2020 	dp_update_tx_desc_stats(pdev);
2021 }
2022 
2023 /**
2024  * __dp_tx_outstanding_sub - Sub outstanding tx desc values from soc
2025  * @soc: DP soc handle
2026  * @count: count of descs to subtract from outstanding
2027  *
2028  * Return: void
2029  */
2030 static inline void
__dp_tx_outstanding_sub(struct dp_soc * soc,uint32_t count)2031 __dp_tx_outstanding_sub(struct dp_soc *soc, uint32_t count)
2032 {
2033 	qdf_atomic_sub(count, &soc->num_tx_outstanding);
2034 }
2035 
2036 /**
2037  * dp_tx_outstanding_sub - Subtract outstanding tx desc values on pdev
2038  * @pdev: DP pdev handle
2039  * @count: count of descs to subtract from outstanding
2040  *
2041  * Return: void
2042  */
2043 static inline void
dp_tx_outstanding_sub(struct dp_pdev * pdev,uint32_t count)2044 dp_tx_outstanding_sub(struct dp_pdev *pdev, uint32_t count)
2045 {
2046 	struct dp_soc *soc = pdev->soc;
2047 
2048 	__dp_tx_outstanding_sub(soc, count);
2049 	qdf_atomic_sub(count, &pdev->num_tx_outstanding);
2050 	dp_update_tx_desc_stats(pdev);
2051 }
2052 #endif /* QCA_SUPPORT_DP_GLOBAL_CTX */
2053 
2054 #else //QCA_TX_LIMIT_CHECK
2055 static inline bool
__dp_tx_limit_check(struct dp_soc * soc)2056 __dp_tx_limit_check(struct dp_soc *soc)
2057 {
2058 	return false;
2059 }
2060 
2061 static inline bool
dp_tx_limit_check(struct dp_vdev * vdev,qdf_nbuf_t nbuf)2062 dp_tx_limit_check(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
2063 {
2064 	return false;
2065 }
2066 
2067 static inline bool
dp_tx_exception_limit_check(struct dp_vdev * vdev,uint8_t xmit_type)2068 dp_tx_exception_limit_check(struct dp_vdev *vdev, uint8_t xmit_type)
2069 {
2070 	return false;
2071 }
2072 
2073 static inline void
__dp_tx_outstanding_inc(struct dp_soc * soc)2074 __dp_tx_outstanding_inc(struct dp_soc *soc)
2075 {
2076 }
2077 
2078 static inline void
dp_tx_outstanding_inc(struct dp_pdev * pdev)2079 dp_tx_outstanding_inc(struct dp_pdev *pdev)
2080 {
2081 	qdf_atomic_inc(&pdev->num_tx_outstanding);
2082 	dp_update_tx_desc_stats(pdev);
2083 }
2084 
2085 static inline void
__dp_tx_outstanding_dec(struct dp_soc * soc)2086 __dp_tx_outstanding_dec(struct dp_soc *soc)
2087 {
2088 }
2089 
2090 static inline void
dp_tx_outstanding_dec(struct dp_pdev * pdev)2091 dp_tx_outstanding_dec(struct dp_pdev *pdev)
2092 {
2093 	qdf_atomic_dec(&pdev->num_tx_outstanding);
2094 	dp_update_tx_desc_stats(pdev);
2095 }
2096 
2097 static inline void
__dp_tx_outstanding_sub(struct dp_soc * soc,uint32_t count)2098 __dp_tx_outstanding_sub(struct dp_soc *soc, uint32_t count)
2099 {
2100 }
2101 
2102 /**
2103  * dp_tx_outstanding_sub - Subtract outstanding tx desc values on pdev
2104  * @pdev: DP pdev handle
2105  * @count: count of descs to subtract from outstanding
2106  *
2107  * Return: void
2108  */
2109 static inline void
dp_tx_outstanding_sub(struct dp_pdev * pdev,uint32_t count)2110 dp_tx_outstanding_sub(struct dp_pdev *pdev, uint32_t count)
2111 {
2112 	qdf_atomic_sub(count, &pdev->num_tx_outstanding);
2113 	dp_update_tx_desc_stats(pdev);
2114 }
2115 #endif //QCA_TX_LIMIT_CHECK
2116 
2117 /**
2118  * dp_tx_get_pkt_len() - Get the packet length of a msdu
2119  * @tx_desc: tx descriptor
2120  *
2121  * Return: Packet length of a msdu. If the packet is fragmented,
2122  * it will return the single fragment length.
2123  *
2124  * In TSO mode, the msdu from stack will be fragmented into small
2125  * fragments and each of these new fragments will be transmitted
2126  * as an individual msdu.
2127  *
2128  * Please note that the length of a msdu from stack may be smaller
2129  * than the length of the total length of the fragments it has been
2130  * fragmentted because each of the fragments has a nbuf header.
2131  */
dp_tx_get_pkt_len(struct dp_tx_desc_s * tx_desc)2132 static inline uint32_t dp_tx_get_pkt_len(struct dp_tx_desc_s *tx_desc)
2133 {
2134 	return tx_desc->frm_type == dp_tx_frm_tso ?
2135 		tx_desc->msdu_ext_desc->tso_desc->seg.total_len :
2136 		tx_desc->length;
2137 }
2138 
2139 #ifdef FEATURE_RUNTIME_PM
dp_get_rtpm_tput_policy_requirement(struct dp_soc * soc)2140 static inline int dp_get_rtpm_tput_policy_requirement(struct dp_soc *soc)
2141 {
2142 	return qdf_atomic_read(&soc->rtpm_high_tput_flag) &&
2143 		(hif_rtpm_get_state() <= HIF_RTPM_STATE_ON);
2144 }
2145 #else
dp_get_rtpm_tput_policy_requirement(struct dp_soc * soc)2146 static inline int dp_get_rtpm_tput_policy_requirement(struct dp_soc *soc)
2147 {
2148 	return 0;
2149 }
2150 #endif
2151 #if defined WLAN_FEATURE_11BE_MLO && defined DP_MLO_LINK_STATS_SUPPORT
2152 /**
2153  * dp_tx_set_nbuf_band() - Set band info in nbuf cb
2154  * @nbuf: nbuf pointer
2155  * @txrx_peer: txrx_peer pointer
2156  * @link_id: Peer Link ID
2157  *
2158  * Return: None
2159  */
2160 static inline void
dp_tx_set_nbuf_band(qdf_nbuf_t nbuf,struct dp_txrx_peer * txrx_peer,uint8_t link_id)2161 dp_tx_set_nbuf_band(qdf_nbuf_t nbuf, struct dp_txrx_peer *txrx_peer,
2162 		    uint8_t link_id)
2163 {
2164 	qdf_nbuf_tx_set_band(nbuf, txrx_peer->band[link_id]);
2165 }
2166 #else
2167 static inline void
dp_tx_set_nbuf_band(qdf_nbuf_t nbuf,struct dp_txrx_peer * txrx_peer,uint8_t link_id)2168 dp_tx_set_nbuf_band(qdf_nbuf_t nbuf, struct dp_txrx_peer *txrx_peer,
2169 		    uint8_t link_id)
2170 {
2171 }
2172 #endif
2173 
2174 #ifdef WLAN_FEATURE_TX_LATENCY_STATS
2175 /**
2176  * dp_tx_latency_stats_fetch() - fetch transmit latency statistics for
2177  * specified link mac address
2178  * @soc_hdl: Handle to struct dp_soc
2179  * @vdev_id: vdev id
2180  * @mac: link mac address of remote peer
2181  * @latency: buffer to hold per-link transmit latency statistics
2182  *
2183  * Return: QDF_STATUS
2184  */
2185 QDF_STATUS
2186 dp_tx_latency_stats_fetch(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
2187 			  uint8_t *mac, struct cdp_tx_latency *latency);
2188 
2189 /**
2190  * dp_tx_latency_stats_config() - config transmit latency statistics for
2191  * specified vdev
2192  * @soc_hdl: Handle to struct dp_soc
2193  * @vdev_id: vdev id
2194  * @cfg: configuration for transmit latency statistics
2195  *
2196  * Return: QDF_STATUS
2197  */
2198 QDF_STATUS
2199 dp_tx_latency_stats_config(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
2200 			   struct cdp_tx_latency_config *cfg);
2201 
2202 /**
2203  * dp_tx_latency_stats_register_cb() - register transmit latency statistics
2204  * callback
2205  * @handle: Handle to struct dp_soc
2206  * @cb: callback function for transmit latency statistics
2207  *
2208  * Return: QDF_STATUS
2209  */
2210 QDF_STATUS dp_tx_latency_stats_register_cb(struct cdp_soc_t *handle,
2211 					   cdp_tx_latency_cb cb);
2212 #endif
2213 #endif
2214