xref: /wlan-driver/qca-wifi-host-cmn/hif/src/ce/ce_internal.h (revision 5113495b16420b49004c444715d2daae2066e7dc)
1*5113495bSYour Name /*
2*5113495bSYour Name  * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
3*5113495bSYour Name  * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
4*5113495bSYour Name  *
5*5113495bSYour Name  * Permission to use, copy, modify, and/or distribute this software for
6*5113495bSYour Name  * any purpose with or without fee is hereby granted, provided that the
7*5113495bSYour Name  * above copyright notice and this permission notice appear in all
8*5113495bSYour Name  * copies.
9*5113495bSYour Name  *
10*5113495bSYour Name  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11*5113495bSYour Name  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12*5113495bSYour Name  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13*5113495bSYour Name  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14*5113495bSYour Name  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15*5113495bSYour Name  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16*5113495bSYour Name  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17*5113495bSYour Name  * PERFORMANCE OF THIS SOFTWARE.
18*5113495bSYour Name  */
19*5113495bSYour Name 
20*5113495bSYour Name #ifndef __COPY_ENGINE_INTERNAL_H__
21*5113495bSYour Name #define __COPY_ENGINE_INTERNAL_H__
22*5113495bSYour Name 
23*5113495bSYour Name #include <hif.h>                /* A_TARGET_WRITE */
24*5113495bSYour Name 
25*5113495bSYour Name #ifndef QCA_WIFI_WCN6450
26*5113495bSYour Name /* Mask for packet offset in the CE descriptor */
27*5113495bSYour Name #define CE_DESC_PKT_OFFSET_BIT_M 0x0FFF0000
28*5113495bSYour Name 
29*5113495bSYour Name /* Packet offset start bit position in CE descriptor */
30*5113495bSYour Name #define CE_DESC_PKT_OFFSET_BIT_S 16
31*5113495bSYour Name 
32*5113495bSYour Name /* Packet type start bit position in CE descriptor */
33*5113495bSYour Name #define CE_DESC_PKT_TYPE_BIT_S 6
34*5113495bSYour Name 
35*5113495bSYour Name /* Tx classify start bit position in CE descriptor */
36*5113495bSYour Name #define CE_DESC_TX_CLASSIFY_BIT_S 5
37*5113495bSYour Name #else
38*5113495bSYour Name /* Mask for packet offset in the CE descriptor */
39*5113495bSYour Name #define CE_DESC_PKT_OFFSET_BIT_M 0x7FF80000
40*5113495bSYour Name 
41*5113495bSYour Name /* Packet offset start bit position in CE descriptor */
42*5113495bSYour Name #define CE_DESC_PKT_OFFSET_BIT_S  19
43*5113495bSYour Name 
44*5113495bSYour Name /* Packet type start bit position in CE descriptor */
45*5113495bSYour Name #define CE_DESC_PKT_TYPE_BIT_S   9
46*5113495bSYour Name 
47*5113495bSYour Name /* Tx classify start bit position in CE descriptor */
48*5113495bSYour Name #define CE_DESC_TX_CLASSIFY_BIT_S   8
49*5113495bSYour Name #endif
50*5113495bSYour Name 
51*5113495bSYour Name /* Copy Engine operational state */
52*5113495bSYour Name enum CE_op_state {
53*5113495bSYour Name 	CE_UNUSED,
54*5113495bSYour Name 	CE_PAUSED,
55*5113495bSYour Name 	CE_RUNNING,
56*5113495bSYour Name 	CE_PENDING,
57*5113495bSYour Name };
58*5113495bSYour Name 
59*5113495bSYour Name enum ol_ath_hif_ce_ecodes {
60*5113495bSYour Name 	CE_RING_DELTA_FAIL = 0
61*5113495bSYour Name };
62*5113495bSYour Name 
63*5113495bSYour Name struct CE_src_desc;
64*5113495bSYour Name 
65*5113495bSYour Name /* CE ring BIT mask
66*5113495bSYour Name  * CE_RING_FLUSH_EVENT: flush ce ring index in case of link down
67*5113495bSYour Name  */
68*5113495bSYour Name #define CE_RING_FLUSH_EVENT BIT(0)
69*5113495bSYour Name 
70*5113495bSYour Name /* Copy Engine Ring internal state */
71*5113495bSYour Name struct CE_ring_state {
72*5113495bSYour Name 
73*5113495bSYour Name 	/* Number of entries in this ring; must be power of 2 */
74*5113495bSYour Name 	unsigned int nentries;
75*5113495bSYour Name 	unsigned int nentries_mask;
76*5113495bSYour Name 
77*5113495bSYour Name 	/*
78*5113495bSYour Name 	 * For dest ring, this is the next index to be processed
79*5113495bSYour Name 	 * by software after it was/is received into.
80*5113495bSYour Name 	 *
81*5113495bSYour Name 	 * For src ring, this is the last descriptor that was sent
82*5113495bSYour Name 	 * and completion processed by software.
83*5113495bSYour Name 	 *
84*5113495bSYour Name 	 * Regardless of src or dest ring, this is an invariant
85*5113495bSYour Name 	 * (modulo ring size):
86*5113495bSYour Name 	 *     write index >= read index >= sw_index
87*5113495bSYour Name 	 */
88*5113495bSYour Name 	unsigned int sw_index;
89*5113495bSYour Name 	unsigned int write_index;       /* cached copy */
90*5113495bSYour Name 	/*
91*5113495bSYour Name 	 * For src ring, this is the next index not yet processed by HW.
92*5113495bSYour Name 	 * This is a cached copy of the real HW index (read index), used
93*5113495bSYour Name 	 * for avoiding reading the HW index register more often than
94*5113495bSYour Name 	 * necessary.
95*5113495bSYour Name 	 * This extends the invariant:
96*5113495bSYour Name 	 *     write index >= read index >= hw_index >= sw_index
97*5113495bSYour Name 	 *
98*5113495bSYour Name 	 * For dest ring, this is currently unused.
99*5113495bSYour Name 	 */
100*5113495bSYour Name 	unsigned int hw_index;  /* cached copy */
101*5113495bSYour Name 
102*5113495bSYour Name 	/* Start of DMA-coherent area reserved for descriptors */
103*5113495bSYour Name 	void *base_addr_owner_space_unaligned;  /* Host address space */
104*5113495bSYour Name 	qdf_dma_addr_t base_addr_CE_space_unaligned; /* CE address space */
105*5113495bSYour Name 
106*5113495bSYour Name 	/*
107*5113495bSYour Name 	 * Actual start of descriptors.
108*5113495bSYour Name 	 * Aligned to descriptor-size boundary.
109*5113495bSYour Name 	 * Points into reserved DMA-coherent area, above.
110*5113495bSYour Name 	 */
111*5113495bSYour Name 	void *base_addr_owner_space;    /* Host address space */
112*5113495bSYour Name 	qdf_dma_addr_t base_addr_CE_space;   /* CE address space */
113*5113495bSYour Name 	/*
114*5113495bSYour Name 	 * Start of shadow copy of descriptors, within regular memory.
115*5113495bSYour Name 	 * Aligned to descriptor-size boundary.
116*5113495bSYour Name 	 */
117*5113495bSYour Name 	char *shadow_base_unaligned;
118*5113495bSYour Name 	struct CE_src_desc *shadow_base;
119*5113495bSYour Name 
120*5113495bSYour Name 	unsigned int low_water_mark_nentries;
121*5113495bSYour Name 	unsigned int high_water_mark_nentries;
122*5113495bSYour Name 	void *srng_ctx;
123*5113495bSYour Name 	void **per_transfer_context;
124*5113495bSYour Name 
125*5113495bSYour Name 	/* HAL CE ring type */
126*5113495bSYour Name 	uint32_t hal_ring_type;
127*5113495bSYour Name 	/* ring memory prealloc */
128*5113495bSYour Name 	uint8_t is_ring_prealloc;
129*5113495bSYour Name 
130*5113495bSYour Name 	OS_DMA_MEM_CONTEXT(ce_dmacontext); /* OS Specific DMA context */
131*5113495bSYour Name 
132*5113495bSYour Name 	uint32_t flush_count;
133*5113495bSYour Name 	/*ce ring event */
134*5113495bSYour Name 	unsigned long event;
135*5113495bSYour Name 	/* last flushed time stamp */
136*5113495bSYour Name 	uint64_t last_flush_ts;
137*5113495bSYour Name };
138*5113495bSYour Name 
139*5113495bSYour Name #ifdef FEATURE_HIF_DELAYED_REG_WRITE
140*5113495bSYour Name /**
141*5113495bSYour Name  * struct ce_reg_write_stats - stats to keep track of register writes
142*5113495bSYour Name  * @enqueues: writes enqueued to delayed work
143*5113495bSYour Name  * @dequeues: writes dequeued from delayed work (not written yet)
144*5113495bSYour Name  * @coalesces: writes not enqueued since srng is already queued up
145*5113495bSYour Name  * @direct: writes not enqueued and written to register directly
146*5113495bSYour Name  * @dequeue_delay: dequeue operation be delayed
147*5113495bSYour Name  */
148*5113495bSYour Name struct ce_reg_write_stats {
149*5113495bSYour Name 	uint32_t enqueues;
150*5113495bSYour Name 	uint32_t dequeues;
151*5113495bSYour Name 	uint32_t coalesces;
152*5113495bSYour Name 	uint32_t direct;
153*5113495bSYour Name 	uint32_t dequeue_delay;
154*5113495bSYour Name };
155*5113495bSYour Name #endif
156*5113495bSYour Name 
157*5113495bSYour Name /* Copy Engine internal state */
158*5113495bSYour Name struct CE_state {
159*5113495bSYour Name 	struct hif_softc *scn;
160*5113495bSYour Name 	unsigned int id;
161*5113495bSYour Name 	unsigned int attr_flags;  /* CE_ATTR_* */
162*5113495bSYour Name 	uint32_t ctrl_addr;       /* relative to BAR */
163*5113495bSYour Name 	enum CE_op_state state;
164*5113495bSYour Name 
165*5113495bSYour Name #ifdef WLAN_FEATURE_FASTPATH
166*5113495bSYour Name 	fastpath_msg_handler fastpath_handler;
167*5113495bSYour Name 	void *context;
168*5113495bSYour Name #endif /* WLAN_FEATURE_FASTPATH */
169*5113495bSYour Name 	qdf_work_t oom_allocation_work;
170*5113495bSYour Name 
171*5113495bSYour Name 	ce_send_cb send_cb;
172*5113495bSYour Name 	void *send_context;
173*5113495bSYour Name 
174*5113495bSYour Name 	CE_recv_cb recv_cb;
175*5113495bSYour Name 	void *recv_context;
176*5113495bSYour Name 
177*5113495bSYour Name 	/* misc_cbs - are any callbacks besides send and recv enabled? */
178*5113495bSYour Name 	uint8_t misc_cbs;
179*5113495bSYour Name 
180*5113495bSYour Name 	CE_watermark_cb watermark_cb;
181*5113495bSYour Name 	void *wm_context;
182*5113495bSYour Name 
183*5113495bSYour Name #ifdef CUSTOM_CB_SCHEDULER_SUPPORT
184*5113495bSYour Name 	qdf_atomic_t custom_cb_pending;
185*5113495bSYour Name 	void (*custom_cb)(void *arg);
186*5113495bSYour Name 	void *custom_cb_context;
187*5113495bSYour Name #endif /* CUSTOM_CB_SCHEDULER_SUPPORT */
188*5113495bSYour Name 	/*Record the state of the copy compl interrupt */
189*5113495bSYour Name 	int disable_copy_compl_intr;
190*5113495bSYour Name 
191*5113495bSYour Name 	unsigned int src_sz_max;
192*5113495bSYour Name 	struct CE_ring_state *src_ring;
193*5113495bSYour Name 	struct CE_ring_state *dest_ring;
194*5113495bSYour Name 	struct CE_ring_state *status_ring;
195*5113495bSYour Name 	atomic_t rx_pending;
196*5113495bSYour Name 
197*5113495bSYour Name 	qdf_spinlock_t ce_index_lock;
198*5113495bSYour Name #ifdef CE_TASKLET_SCHEDULE_ON_FULL
199*5113495bSYour Name 	qdf_spinlock_t ce_interrupt_lock;
200*5113495bSYour Name #endif
201*5113495bSYour Name 	/* Flag to indicate whether to break out the DPC context */
202*5113495bSYour Name 	bool force_break;
203*5113495bSYour Name 
204*5113495bSYour Name 	/* time in nanoseconds to yield control of napi poll */
205*5113495bSYour Name 	unsigned long long ce_service_yield_time;
206*5113495bSYour Name 	/* CE service start time in nanoseconds */
207*5113495bSYour Name 	unsigned long long ce_service_start_time;
208*5113495bSYour Name 	/* Num Of Receive Buffers handled for one interrupt DPC routine */
209*5113495bSYour Name 	unsigned int receive_count;
210*5113495bSYour Name 	/* epping */
211*5113495bSYour Name 	bool timer_inited;
212*5113495bSYour Name 	qdf_timer_t poll_timer;
213*5113495bSYour Name 
214*5113495bSYour Name 	/* datapath - for faster access, use bools instead of a bitmap */
215*5113495bSYour Name 	bool htt_tx_data;
216*5113495bSYour Name 	bool htt_rx_data;
217*5113495bSYour Name 	qdf_lro_ctx_t lro_data;
218*5113495bSYour Name 
219*5113495bSYour Name 	void (*service)(struct hif_softc *scn, int CE_id);
220*5113495bSYour Name #ifdef WLAN_TRACEPOINTS
221*5113495bSYour Name 	/* CE tasklet sched time in nanoseconds */
222*5113495bSYour Name 	unsigned long long ce_tasklet_sched_time;
223*5113495bSYour Name #endif
224*5113495bSYour Name 	bool msi_supported;
225*5113495bSYour Name 	bool batch_intr_supported;
226*5113495bSYour Name #ifdef FEATURE_HIF_DELAYED_REG_WRITE
227*5113495bSYour Name 	struct ce_reg_write_stats wstats;
228*5113495bSYour Name 	uint8_t reg_write_in_progress;
229*5113495bSYour Name 	qdf_time_t last_dequeue_time;
230*5113495bSYour Name #endif
231*5113495bSYour Name 	uint32_t ce_wrt_idx_offset;
232*5113495bSYour Name };
233*5113495bSYour Name 
234*5113495bSYour Name /* Descriptor rings must be aligned to this boundary */
235*5113495bSYour Name #define CE_DESC_RING_ALIGN 8
236*5113495bSYour Name #define CLOCK_OVERRIDE 0x2
237*5113495bSYour Name 
238*5113495bSYour Name #ifdef QCA_WIFI_3_0
239*5113495bSYour Name #define HIF_CE_DESC_ADDR_TO_DMA(desc) \
240*5113495bSYour Name 	(qdf_dma_addr_t)(((uint64_t)(desc)->buffer_addr + \
241*5113495bSYour Name 	((uint64_t)((desc)->buffer_addr_hi & CE_RING_BASE_ADDR_HIGH_MASK) << \
242*5113495bSYour Name 	 32)))
243*5113495bSYour Name #else
244*5113495bSYour Name #define HIF_CE_DESC_ADDR_TO_DMA(desc) \
245*5113495bSYour Name 	(qdf_dma_addr_t)((desc)->buffer_addr)
246*5113495bSYour Name #endif
247*5113495bSYour Name 
248*5113495bSYour Name #if defined(QCA_WIFI_WCN6450)
249*5113495bSYour Name struct CE_src_desc {
250*5113495bSYour Name 	uint32_t buffer_addr:32;
251*5113495bSYour Name #if _BYTE_ORDER == _BIG_ENDIAN
252*5113495bSYour Name 	uint32_t gather:1,
253*5113495bSYour Name 		 packet_result_offset:12,
254*5113495bSYour Name 		 toeplitz_hash_enable:1, /* reserved */
255*5113495bSYour Name 		 addr_x_search_disable:1, /* reserved */
256*5113495bSYour Name 		 addr_y_search_disable:1, /* reserved */
257*5113495bSYour Name 		 misc_int_disable:1,
258*5113495bSYour Name 		 target_int_disable:1,
259*5113495bSYour Name 		 host_int_disable:1,
260*5113495bSYour Name 		 dest_byte_swap:1,
261*5113495bSYour Name 		 byte_swap:1,
262*5113495bSYour Name 		 type:2, /* reserved */
263*5113495bSYour Name 		 tx_classify:1,
264*5113495bSYour Name 		 buffer_addr_hi:8;
265*5113495bSYour Name 	uint32_t meta_data:16,
266*5113495bSYour Name 		 nbytes:16;
267*5113495bSYour Name #else
268*5113495bSYour Name 	uint32_t buffer_addr_hi:8,
269*5113495bSYour Name 		 tx_classify:1,
270*5113495bSYour Name 		 type:2, /* reserved */
271*5113495bSYour Name 		 byte_swap:1, /* src_byte_swap */
272*5113495bSYour Name 		 dest_byte_swap:1,
273*5113495bSYour Name 		 host_int_disable:1,
274*5113495bSYour Name 		 target_int_disable:1,
275*5113495bSYour Name 		 misc_int_disable:1,
276*5113495bSYour Name 		 addr_y_search_disable:1, /* reserved */
277*5113495bSYour Name 		 addr_x_search_disable:1, /* reserved */
278*5113495bSYour Name 		 toeplitz_hash_enable:1, /* reserved */
279*5113495bSYour Name 		 packet_result_offset:12,
280*5113495bSYour Name 		 gather:1;
281*5113495bSYour Name 	uint32_t nbytes:16,
282*5113495bSYour Name 		 meta_data:16;
283*5113495bSYour Name #endif
284*5113495bSYour Name 	uint32_t toeplitz_hash_result:32;
285*5113495bSYour Name };
286*5113495bSYour Name 
287*5113495bSYour Name struct CE_dest_desc {
288*5113495bSYour Name 	uint32_t buffer_addr:32;
289*5113495bSYour Name #if _BYTE_ORDER == _BIG_ENDIAN
290*5113495bSYour Name 	uint32_t gather:1,
291*5113495bSYour Name 		 packet_result_offset:12,
292*5113495bSYour Name 		 toeplitz_hash_enable:1, /* reserved */
293*5113495bSYour Name 		 addr_x_search_disable:1, /* reserved */
294*5113495bSYour Name 		 addr_y_search_disable:1, /* reserved */
295*5113495bSYour Name 		 misc_int_disable:1,
296*5113495bSYour Name 		 target_int_disable:1,
297*5113495bSYour Name 		 host_int_disable:1,
298*5113495bSYour Name 		 byte_swap:1, /* dest_byte_swap */
299*5113495bSYour Name 		 src_byte_swap:1,
300*5113495bSYour Name 		 type:2, /* reserved */
301*5113495bSYour Name 		 tx_classify:1,
302*5113495bSYour Name 		 buffer_addr_hi:8;
303*5113495bSYour Name 	uint32_t meta_data:16,
304*5113495bSYour Name 		 nbytes:16;
305*5113495bSYour Name #else
306*5113495bSYour Name 	uint32_t buffer_addr_hi:8,
307*5113495bSYour Name 		 tx_classify:1,
308*5113495bSYour Name 		 type:2, /* reserved */
309*5113495bSYour Name 		 src_byte_swap:1,
310*5113495bSYour Name 		 byte_swap:1, /* dest_byte_swap */
311*5113495bSYour Name 		 host_int_disable:1,
312*5113495bSYour Name 		 target_int_disable:1,
313*5113495bSYour Name 		 misc_int_disable:1,
314*5113495bSYour Name 		 addr_y_search_disable:1, /* reserved */
315*5113495bSYour Name 		 addr_x_search_disable:1, /* reserved */
316*5113495bSYour Name 		 toeplitz_hash_enable:1, /* reserved */
317*5113495bSYour Name 		 packet_result_offset:12,
318*5113495bSYour Name 		 gather:1;
319*5113495bSYour Name 	uint32_t nbytes:16,
320*5113495bSYour Name 		 meta_data:16;
321*5113495bSYour Name #endif
322*5113495bSYour Name 	uint32_t toeplitz_hash_result:32;
323*5113495bSYour Name };
324*5113495bSYour Name #elif defined(QCA_WIFI_3_0)
325*5113495bSYour Name struct CE_src_desc {
326*5113495bSYour Name 	uint32_t buffer_addr:32;
327*5113495bSYour Name #if _BYTE_ORDER == _BIG_ENDIAN
328*5113495bSYour Name 	uint32_t gather:1,
329*5113495bSYour Name 		enable_11h:1,
330*5113495bSYour Name 		meta_data_low:2, /* fw_metadata_low */
331*5113495bSYour Name 		packet_result_offset:12,
332*5113495bSYour Name 		toeplitz_hash_enable:1,
333*5113495bSYour Name 		addr_y_search_disable:1,
334*5113495bSYour Name 		addr_x_search_disable:1,
335*5113495bSYour Name 		misc_int_disable:1,
336*5113495bSYour Name 		target_int_disable:1,
337*5113495bSYour Name 		host_int_disable:1,
338*5113495bSYour Name 		dest_byte_swap:1,
339*5113495bSYour Name 		byte_swap:1,
340*5113495bSYour Name 		type:2,
341*5113495bSYour Name 		tx_classify:1,
342*5113495bSYour Name 		buffer_addr_hi:5;
343*5113495bSYour Name 		uint32_t meta_data:16, /* fw_metadata_high */
344*5113495bSYour Name 		nbytes:16;       /* length in register map */
345*5113495bSYour Name #else
346*5113495bSYour Name 	uint32_t buffer_addr_hi:5,
347*5113495bSYour Name 		tx_classify:1,
348*5113495bSYour Name 		type:2,
349*5113495bSYour Name 		byte_swap:1,          /* src_byte_swap */
350*5113495bSYour Name 		dest_byte_swap:1,
351*5113495bSYour Name 		host_int_disable:1,
352*5113495bSYour Name 		target_int_disable:1,
353*5113495bSYour Name 		misc_int_disable:1,
354*5113495bSYour Name 		addr_x_search_disable:1,
355*5113495bSYour Name 		addr_y_search_disable:1,
356*5113495bSYour Name 		toeplitz_hash_enable:1,
357*5113495bSYour Name 		packet_result_offset:12,
358*5113495bSYour Name 		meta_data_low:2, /* fw_metadata_low */
359*5113495bSYour Name 		enable_11h:1,
360*5113495bSYour Name 		gather:1;
361*5113495bSYour Name 		uint32_t nbytes:16, /* length in register map */
362*5113495bSYour Name 		meta_data:16; /* fw_metadata_high */
363*5113495bSYour Name #endif
364*5113495bSYour Name 	uint32_t toeplitz_hash_result:32;
365*5113495bSYour Name };
366*5113495bSYour Name 
367*5113495bSYour Name struct CE_dest_desc {
368*5113495bSYour Name 	uint32_t buffer_addr:32;
369*5113495bSYour Name #if _BYTE_ORDER == _BIG_ENDIAN
370*5113495bSYour Name 	uint32_t gather:1,
371*5113495bSYour Name 		enable_11h:1,
372*5113495bSYour Name 		meta_data_low:2, /* fw_metadata_low */
373*5113495bSYour Name 		packet_result_offset:12,
374*5113495bSYour Name 		toeplitz_hash_enable:1,
375*5113495bSYour Name 		addr_y_search_disable:1,
376*5113495bSYour Name 		addr_x_search_disable:1,
377*5113495bSYour Name 		misc_int_disable:1,
378*5113495bSYour Name 		target_int_disable:1,
379*5113495bSYour Name 		host_int_disable:1,
380*5113495bSYour Name 		byte_swap:1,
381*5113495bSYour Name 		src_byte_swap:1,
382*5113495bSYour Name 		type:2,
383*5113495bSYour Name 		tx_classify:1,
384*5113495bSYour Name 		buffer_addr_hi:5;
385*5113495bSYour Name 		uint32_t meta_data:16, /* fw_metadata_high */
386*5113495bSYour Name 		nbytes:16;          /* length in register map */
387*5113495bSYour Name #else
388*5113495bSYour Name 	uint32_t buffer_addr_hi:5,
389*5113495bSYour Name 		tx_classify:1,
390*5113495bSYour Name 		type:2,
391*5113495bSYour Name 		src_byte_swap:1,
392*5113495bSYour Name 		byte_swap:1,         /* dest_byte_swap */
393*5113495bSYour Name 		host_int_disable:1,
394*5113495bSYour Name 		target_int_disable:1,
395*5113495bSYour Name 		misc_int_disable:1,
396*5113495bSYour Name 		addr_x_search_disable:1,
397*5113495bSYour Name 		addr_y_search_disable:1,
398*5113495bSYour Name 		toeplitz_hash_enable:1,
399*5113495bSYour Name 		packet_result_offset:12,
400*5113495bSYour Name 		meta_data_low:2, /* fw_metadata_low */
401*5113495bSYour Name 		enable_11h:1,
402*5113495bSYour Name 		gather:1;
403*5113495bSYour Name 		uint32_t nbytes:16, /* length in register map */
404*5113495bSYour Name 		meta_data:16;    /* fw_metadata_high */
405*5113495bSYour Name #endif
406*5113495bSYour Name 	uint32_t toeplitz_hash_result:32;
407*5113495bSYour Name };
408*5113495bSYour Name #else
409*5113495bSYour Name struct CE_src_desc {
410*5113495bSYour Name 	uint32_t buffer_addr;
411*5113495bSYour Name #if _BYTE_ORDER == _BIG_ENDIAN
412*5113495bSYour Name 	uint32_t  meta_data:12,
413*5113495bSYour Name 		  target_int_disable:1,
414*5113495bSYour Name 		  host_int_disable:1,
415*5113495bSYour Name 		  byte_swap:1,
416*5113495bSYour Name 		  gather:1,
417*5113495bSYour Name 		  nbytes:16;
418*5113495bSYour Name #else
419*5113495bSYour Name 
420*5113495bSYour Name 	uint32_t nbytes:16,
421*5113495bSYour Name 		 gather:1,
422*5113495bSYour Name 		 byte_swap:1,
423*5113495bSYour Name 		 host_int_disable:1,
424*5113495bSYour Name 		 target_int_disable:1,
425*5113495bSYour Name 		 meta_data:12;
426*5113495bSYour Name #endif
427*5113495bSYour Name };
428*5113495bSYour Name 
429*5113495bSYour Name struct CE_dest_desc {
430*5113495bSYour Name 	uint32_t buffer_addr;
431*5113495bSYour Name #if _BYTE_ORDER == _BIG_ENDIAN
432*5113495bSYour Name 	uint32_t  meta_data:12,
433*5113495bSYour Name 		  target_int_disable:1,
434*5113495bSYour Name 		  host_int_disable:1,
435*5113495bSYour Name 		  byte_swap:1,
436*5113495bSYour Name 		  gather:1,
437*5113495bSYour Name 		  nbytes:16;
438*5113495bSYour Name #else
439*5113495bSYour Name 	uint32_t nbytes:16,
440*5113495bSYour Name 		 gather:1,
441*5113495bSYour Name 		 byte_swap:1,
442*5113495bSYour Name 		 host_int_disable:1,
443*5113495bSYour Name 		 target_int_disable:1,
444*5113495bSYour Name 		 meta_data:12;
445*5113495bSYour Name #endif
446*5113495bSYour Name };
447*5113495bSYour Name #endif /* QCA_WIFI_WCN6450 */
448*5113495bSYour Name 
449*5113495bSYour Name struct ce_srng_src_desc {
450*5113495bSYour Name 	uint32_t buffer_addr_lo;
451*5113495bSYour Name #if _BYTE_ORDER == _BIG_ENDIAN
452*5113495bSYour Name 	uint32_t nbytes:16,
453*5113495bSYour Name 		 rsvd:4,
454*5113495bSYour Name 		 gather:1,
455*5113495bSYour Name 		 dest_swap:1,
456*5113495bSYour Name 		 byte_swap:1,
457*5113495bSYour Name 		 toeplitz_hash_enable:1,
458*5113495bSYour Name 		 buffer_addr_hi:8;
459*5113495bSYour Name 	uint32_t rsvd1:16,
460*5113495bSYour Name 		 meta_data:16;
461*5113495bSYour Name 	uint32_t loop_count:4,
462*5113495bSYour Name 		 ring_id:8,
463*5113495bSYour Name 		 rsvd3:20;
464*5113495bSYour Name #else
465*5113495bSYour Name 	uint32_t buffer_addr_hi:8,
466*5113495bSYour Name 		 toeplitz_hash_enable:1,
467*5113495bSYour Name 		 byte_swap:1,
468*5113495bSYour Name 		 dest_swap:1,
469*5113495bSYour Name 		 gather:1,
470*5113495bSYour Name 		 rsvd:4,
471*5113495bSYour Name 		 nbytes:16;
472*5113495bSYour Name 	uint32_t meta_data:16,
473*5113495bSYour Name 		 rsvd1:16;
474*5113495bSYour Name 	uint32_t rsvd3:20,
475*5113495bSYour Name 		 ring_id:8,
476*5113495bSYour Name 		 loop_count:4;
477*5113495bSYour Name #endif
478*5113495bSYour Name };
479*5113495bSYour Name struct ce_srng_dest_desc {
480*5113495bSYour Name 	uint32_t buffer_addr_lo;
481*5113495bSYour Name #if _BYTE_ORDER == _BIG_ENDIAN
482*5113495bSYour Name 	uint32_t loop_count:4,
483*5113495bSYour Name 		 ring_id:8,
484*5113495bSYour Name 		 rsvd1:12,
485*5113495bSYour Name 		 buffer_addr_hi:8;
486*5113495bSYour Name #else
487*5113495bSYour Name 	uint32_t buffer_addr_hi:8,
488*5113495bSYour Name 		 rsvd1:12,
489*5113495bSYour Name 		 ring_id:8,
490*5113495bSYour Name 		 loop_count:4;
491*5113495bSYour Name #endif
492*5113495bSYour Name };
493*5113495bSYour Name struct ce_srng_dest_status_desc {
494*5113495bSYour Name #if _BYTE_ORDER == _BIG_ENDIAN
495*5113495bSYour Name 	uint32_t nbytes:16,
496*5113495bSYour Name 		 rsvd:4,
497*5113495bSYour Name 		 gather:1,
498*5113495bSYour Name 		 dest_swap:1,
499*5113495bSYour Name 		 byte_swap:1,
500*5113495bSYour Name 		 toeplitz_hash_enable:1,
501*5113495bSYour Name 		 rsvd0:8;
502*5113495bSYour Name 	uint32_t rsvd1:16,
503*5113495bSYour Name 		 meta_data:16;
504*5113495bSYour Name #else
505*5113495bSYour Name 	uint32_t rsvd0:8,
506*5113495bSYour Name 		 toeplitz_hash_enable:1,
507*5113495bSYour Name 		 byte_swap:1,
508*5113495bSYour Name 		 dest_swap:1,
509*5113495bSYour Name 		 gather:1,
510*5113495bSYour Name 		 rsvd:4,
511*5113495bSYour Name 		 nbytes:16;
512*5113495bSYour Name 	uint32_t meta_data:16,
513*5113495bSYour Name 		 rsvd1:16;
514*5113495bSYour Name #endif
515*5113495bSYour Name 	uint32_t toeplitz_hash;
516*5113495bSYour Name #if _BYTE_ORDER == _BIG_ENDIAN
517*5113495bSYour Name 	uint32_t loop_count:4,
518*5113495bSYour Name 		 ring_id:8,
519*5113495bSYour Name 		 rsvd3:20;
520*5113495bSYour Name #else
521*5113495bSYour Name 	uint32_t rsvd3:20,
522*5113495bSYour Name 		 ring_id:8,
523*5113495bSYour Name 		 loop_count:4;
524*5113495bSYour Name #endif
525*5113495bSYour Name };
526*5113495bSYour Name 
527*5113495bSYour Name #define CE_SENDLIST_ITEMS_MAX 12
528*5113495bSYour Name 
529*5113495bSYour Name /**
530*5113495bSYour Name  * union ce_desc - unified data type for ce descriptors
531*5113495bSYour Name  * @src_desc: source descriptor
532*5113495bSYour Name  * @dest_desc: destination descriptor
533*5113495bSYour Name  *
534*5113495bSYour Name  * Both src and destination descriptors follow the same format.
535*5113495bSYour Name  * They use different data structures for different access semantics.
536*5113495bSYour Name  * Here we provide a unifying data type.
537*5113495bSYour Name  */
538*5113495bSYour Name union ce_desc {
539*5113495bSYour Name 	struct CE_src_desc src_desc;
540*5113495bSYour Name 	struct CE_dest_desc dest_desc;
541*5113495bSYour Name };
542*5113495bSYour Name 
543*5113495bSYour Name /**
544*5113495bSYour Name  * union ce_srng_desc - unified data type for ce srng descriptors
545*5113495bSYour Name  * @src_desc: ce srng Source ring descriptor
546*5113495bSYour Name  * @dest_desc: ce srng destination ring descriptor
547*5113495bSYour Name  * @dest_status_desc: ce srng status ring descriptor
548*5113495bSYour Name  */
549*5113495bSYour Name union ce_srng_desc {
550*5113495bSYour Name 	struct ce_srng_src_desc src_desc;
551*5113495bSYour Name 	struct ce_srng_dest_desc dest_desc;
552*5113495bSYour Name 	struct ce_srng_dest_status_desc dest_status_desc;
553*5113495bSYour Name };
554*5113495bSYour Name 
555*5113495bSYour Name /**
556*5113495bSYour Name  * enum hif_ce_event_type - HIF copy engine event type
557*5113495bSYour Name  * @HIF_RX_DESC_POST: event recorded before updating write index of RX ring.
558*5113495bSYour Name  * @HIF_RX_DESC_COMPLETION: event recorded before updating sw index of RX ring.
559*5113495bSYour Name  * @HIF_TX_GATHER_DESC_POST: post gather desc. (no write index update)
560*5113495bSYour Name  * @HIF_TX_DESC_POST: event recorded before updating write index of TX ring.
561*5113495bSYour Name  * @HIF_TX_DESC_SOFTWARE_POST: event recorded when dropping a write to the write
562*5113495bSYour Name  *	index in a normal tx
563*5113495bSYour Name  * @HIF_TX_DESC_COMPLETION: event recorded before updating sw index of TX ring.
564*5113495bSYour Name  * @FAST_RX_WRITE_INDEX_UPDATE: event recorded before updating the write index
565*5113495bSYour Name  *	of the RX ring in fastpath
566*5113495bSYour Name  * @FAST_RX_SOFTWARE_INDEX_UPDATE: event recorded before updating the software
567*5113495bSYour Name  *	index of the RX ring in fastpath
568*5113495bSYour Name  * @FAST_TX_WRITE_INDEX_UPDATE: event recorded before updating the write index
569*5113495bSYour Name  *	of the TX ring in fastpath
570*5113495bSYour Name  * @FAST_TX_WRITE_INDEX_SOFTWARE_UPDATE: recorded when dropping a write to
571*5113495bSYour Name  *	the write index in fastpath
572*5113495bSYour Name  * @FAST_TX_SOFTWARE_INDEX_UPDATE: event recorded before updating the software
573*5113495bSYour Name  *	index of the RX ring in fastpath
574*5113495bSYour Name  * @RESUME_WRITE_INDEX_UPDATE:
575*5113495bSYour Name  * @HIF_IRQ_EVENT: event recorded in the irq before scheduling the bh
576*5113495bSYour Name  * @HIF_CE_TASKLET_ENTRY: records the start of the ce_tasklet
577*5113495bSYour Name  * @HIF_CE_TASKLET_RESCHEDULE: records the rescheduling of the wlan_tasklet
578*5113495bSYour Name  * @HIF_CE_TASKLET_REAP_REPOLL: records the repoll of the wlan_tasklet
579*5113495bSYour Name  * @HIF_CE_TASKLET_EXIT: records the exit of the wlan tasklet without reschedule
580*5113495bSYour Name  * @HIF_CE_REAP_ENTRY: records when we process completion outside of a bh
581*5113495bSYour Name  * @HIF_CE_REAP_EXIT:  records when we process completion outside of a bh
582*5113495bSYour Name  * @NAPI_SCHEDULE: records when napi is scheduled from the irq context
583*5113495bSYour Name  * @NAPI_POLL_ENTER: records the start of the napi poll function
584*5113495bSYour Name  * @NAPI_COMPLETE: records when interrupts are re-enabled
585*5113495bSYour Name  * @NAPI_POLL_EXIT: records when the napi poll function returns
586*5113495bSYour Name  * @HIF_RX_NBUF_ALLOC_FAILURE: record the packet when nbuf fails to allocate
587*5113495bSYour Name  * @HIF_RX_NBUF_MAP_FAILURE: record the packet when dma map fails
588*5113495bSYour Name  * @HIF_RX_NBUF_ENQUEUE_FAILURE: record the packet when enqueue to ce fails
589*5113495bSYour Name  * @HIF_CE_SRC_RING_BUFFER_POST: record the packet when buffer is posted to ce src ring
590*5113495bSYour Name  * @HIF_CE_DEST_RING_BUFFER_POST: record the packet when buffer is posted to ce dst ring
591*5113495bSYour Name  * @HIF_CE_DEST_RING_BUFFER_REAP: record the packet when buffer is reaped from ce dst ring
592*5113495bSYour Name  * @HIF_CE_DEST_STATUS_RING_REAP: record the packet when status ring is reaped
593*5113495bSYour Name  * @HIF_RX_DESC_PRE_NBUF_ALLOC: record the packet before nbuf allocation
594*5113495bSYour Name  * @HIF_RX_DESC_PRE_NBUF_MAP: record the packet before nbuf map
595*5113495bSYour Name  * @HIF_RX_DESC_POST_NBUF_MAP: record the packet after nbuf map
596*5113495bSYour Name  * @HIF_EVENT_TYPE_MAX: max event
597*5113495bSYour Name  */
598*5113495bSYour Name enum hif_ce_event_type {
599*5113495bSYour Name 	HIF_RX_DESC_POST,
600*5113495bSYour Name 	HIF_RX_DESC_COMPLETION,
601*5113495bSYour Name 	HIF_TX_GATHER_DESC_POST,
602*5113495bSYour Name 	HIF_TX_DESC_POST,
603*5113495bSYour Name 	HIF_TX_DESC_SOFTWARE_POST,
604*5113495bSYour Name 	HIF_TX_DESC_COMPLETION,
605*5113495bSYour Name 	FAST_RX_WRITE_INDEX_UPDATE,
606*5113495bSYour Name 	FAST_RX_SOFTWARE_INDEX_UPDATE,
607*5113495bSYour Name 	FAST_TX_WRITE_INDEX_UPDATE,
608*5113495bSYour Name 	FAST_TX_WRITE_INDEX_SOFTWARE_UPDATE,
609*5113495bSYour Name 	FAST_TX_SOFTWARE_INDEX_UPDATE,
610*5113495bSYour Name 	RESUME_WRITE_INDEX_UPDATE,
611*5113495bSYour Name 
612*5113495bSYour Name 	HIF_IRQ_EVENT = 0x10,
613*5113495bSYour Name 	HIF_CE_TASKLET_ENTRY,
614*5113495bSYour Name 	HIF_CE_TASKLET_RESCHEDULE,
615*5113495bSYour Name 	HIF_CE_TASKLET_REAP_REPOLL,
616*5113495bSYour Name 	HIF_CE_TASKLET_EXIT,
617*5113495bSYour Name 	HIF_CE_REAP_ENTRY,
618*5113495bSYour Name 	HIF_CE_REAP_EXIT,
619*5113495bSYour Name 	NAPI_SCHEDULE,
620*5113495bSYour Name 	NAPI_POLL_ENTER,
621*5113495bSYour Name 	NAPI_COMPLETE,
622*5113495bSYour Name 	NAPI_POLL_EXIT,
623*5113495bSYour Name 
624*5113495bSYour Name 	HIF_RX_NBUF_ALLOC_FAILURE = 0x20,
625*5113495bSYour Name 	HIF_RX_NBUF_MAP_FAILURE,
626*5113495bSYour Name 	HIF_RX_NBUF_ENQUEUE_FAILURE,
627*5113495bSYour Name 
628*5113495bSYour Name 	HIF_CE_SRC_RING_BUFFER_POST,
629*5113495bSYour Name 	HIF_CE_DEST_RING_BUFFER_POST,
630*5113495bSYour Name 	HIF_CE_DEST_RING_BUFFER_REAP,
631*5113495bSYour Name 	HIF_CE_DEST_STATUS_RING_REAP,
632*5113495bSYour Name 
633*5113495bSYour Name 	HIF_RX_DESC_PRE_NBUF_ALLOC,
634*5113495bSYour Name 	HIF_RX_DESC_PRE_NBUF_MAP,
635*5113495bSYour Name 	HIF_RX_DESC_POST_NBUF_MAP,
636*5113495bSYour Name 
637*5113495bSYour Name 	HIF_EVENT_TYPE_MAX,
638*5113495bSYour Name };
639*5113495bSYour Name 
640*5113495bSYour Name void ce_init_ce_desc_event_log(struct hif_softc *scn, int ce_id, int size);
641*5113495bSYour Name void ce_deinit_ce_desc_event_log(struct hif_softc *scn, int ce_id);
642*5113495bSYour Name void hif_record_ce_desc_event(struct hif_softc *scn, int ce_id,
643*5113495bSYour Name 			      enum hif_ce_event_type type,
644*5113495bSYour Name 			      union ce_desc *descriptor, void *memory,
645*5113495bSYour Name 			      int index, int len);
646*5113495bSYour Name 
647*5113495bSYour Name enum ce_sendlist_type_e {
648*5113495bSYour Name 	CE_SIMPLE_BUFFER_TYPE,
649*5113495bSYour Name 	/* TBDXXX: CE_RX_DESC_LIST, */
650*5113495bSYour Name };
651*5113495bSYour Name 
652*5113495bSYour Name /*
653*5113495bSYour Name  * There's a public "ce_sendlist" and a private "ce_sendlist_s".
654*5113495bSYour Name  * The former is an opaque structure with sufficient space
655*5113495bSYour Name  * to hold the latter.  The latter is the actual structure
656*5113495bSYour Name  * definition and it is only used internally.  The opaque version
657*5113495bSYour Name  * of the structure allows callers to allocate an instance on the
658*5113495bSYour Name  * run-time stack without knowing any of the details of the
659*5113495bSYour Name  * structure layout.
660*5113495bSYour Name  */
661*5113495bSYour Name struct ce_sendlist_s {
662*5113495bSYour Name 	unsigned int num_items;
663*5113495bSYour Name 	struct ce_sendlist_item {
664*5113495bSYour Name 		enum ce_sendlist_type_e send_type;
665*5113495bSYour Name 		dma_addr_t data;        /* e.g. buffer or desc list */
666*5113495bSYour Name 		union {
667*5113495bSYour Name 			unsigned int nbytes;    /* simple buffer */
668*5113495bSYour Name 			unsigned int ndesc;     /* Rx descriptor list */
669*5113495bSYour Name 		} u;
670*5113495bSYour Name 		/* flags: externally-specified flags;
671*5113495bSYour Name 		 * OR-ed with internal flags
672*5113495bSYour Name 		 */
673*5113495bSYour Name 		uint32_t flags;
674*5113495bSYour Name 		uint32_t user_flags;
675*5113495bSYour Name 	} item[CE_SENDLIST_ITEMS_MAX];
676*5113495bSYour Name };
677*5113495bSYour Name 
678*5113495bSYour Name bool hif_ce_service_should_yield(struct hif_softc *scn, struct CE_state
679*5113495bSYour Name 				 *ce_state);
680*5113495bSYour Name 
681*5113495bSYour Name #ifdef WLAN_FEATURE_FASTPATH
682*5113495bSYour Name void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl);
683*5113495bSYour Name void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl);
684*5113495bSYour Name #else
ce_h2t_tx_ce_cleanup(struct CE_handle * ce_hdl)685*5113495bSYour Name static inline void ce_h2t_tx_ce_cleanup(struct CE_handle *ce_hdl)
686*5113495bSYour Name {
687*5113495bSYour Name }
688*5113495bSYour Name 
ce_t2h_msg_ce_cleanup(struct CE_handle * ce_hdl)689*5113495bSYour Name static inline void ce_t2h_msg_ce_cleanup(struct CE_handle *ce_hdl)
690*5113495bSYour Name {
691*5113495bSYour Name }
692*5113495bSYour Name #endif
693*5113495bSYour Name 
694*5113495bSYour Name /* which ring of a CE? */
695*5113495bSYour Name #define CE_RING_SRC  0
696*5113495bSYour Name #define CE_RING_DEST 1
697*5113495bSYour Name #define CE_RING_STATUS 2
698*5113495bSYour Name 
699*5113495bSYour Name #define CDC_WAR_MAGIC_STR   0xceef0000
700*5113495bSYour Name #define CDC_WAR_DATA_CE     4
701*5113495bSYour Name 
702*5113495bSYour Name /* Additional internal-only ce_send flags */
703*5113495bSYour Name #define CE_SEND_FLAG_GATHER             0x00010000      /* Use Gather */
704*5113495bSYour Name 
705*5113495bSYour Name /**
706*5113495bSYour Name  * hif_get_wake_ce_id() - gets the copy engine id used for waking up
707*5113495bSYour Name  * @scn: The hif context to use
708*5113495bSYour Name  * @ce_id: a pointer where the copy engine Id should be populated
709*5113495bSYour Name  *
710*5113495bSYour Name  * Return: errno
711*5113495bSYour Name  */
712*5113495bSYour Name int hif_get_wake_ce_id(struct hif_softc *scn, uint8_t *ce_id);
713*5113495bSYour Name 
714*5113495bSYour Name /**
715*5113495bSYour Name  * hif_get_fw_diag_ce_id() - gets the copy engine id used for FW diag
716*5113495bSYour Name  * @scn: The hif context to use
717*5113495bSYour Name  * @ce_id: a pointer where the copy engine Id should be populated
718*5113495bSYour Name  *
719*5113495bSYour Name  * Return: errno
720*5113495bSYour Name  */
721*5113495bSYour Name int hif_get_fw_diag_ce_id(struct hif_softc *scn, uint8_t *ce_id);
722*5113495bSYour Name 
723*5113495bSYour Name #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
724*5113495bSYour Name 
725*5113495bSYour Name #ifndef HIF_CE_HISTORY_MAX
726*5113495bSYour Name #if defined(CONFIG_SLUB_DEBUG_ON)
727*5113495bSYour Name #define HIF_CE_HISTORY_MAX 1024
728*5113495bSYour Name #else
729*5113495bSYour Name #define HIF_CE_HISTORY_MAX 768
730*5113495bSYour Name #endif /* CONFIG_SLUB_DEBUG_ON */
731*5113495bSYour Name #endif /* !HIF_CE_HISTORY_MAX */
732*5113495bSYour Name 
733*5113495bSYour Name #define CE_DEBUG_MAX_DATA_BUF_SIZE 64
734*5113495bSYour Name 
735*5113495bSYour Name /**
736*5113495bSYour Name  * struct hif_ce_desc_event - structure for detailing a ce event
737*5113495bSYour Name  * @index: location of the descriptor in the ce ring;
738*5113495bSYour Name  * @type: what the event was
739*5113495bSYour Name  * @time: when it happened
740*5113495bSYour Name  * @cpu_id:
741*5113495bSYour Name  * @current_hp: holds the current ring hp value
742*5113495bSYour Name  * @current_tp: holds the current ring tp value
743*5113495bSYour Name  * @descriptor: descriptor enqueued or dequeued
744*5113495bSYour Name  * @memory: virtual address that was used
745*5113495bSYour Name  * @dma_addr: physical/iova address based on smmu status
746*5113495bSYour Name  * @dma_to_phy: physical address from iova address
747*5113495bSYour Name  * @virt_to_phy: physical address from virtual address
748*5113495bSYour Name  * @actual_data_len: length of the data
749*5113495bSYour Name  * @data: data pointed by descriptor
750*5113495bSYour Name  */
751*5113495bSYour Name struct hif_ce_desc_event {
752*5113495bSYour Name 	int index;
753*5113495bSYour Name 	enum hif_ce_event_type type;
754*5113495bSYour Name 	uint64_t time;
755*5113495bSYour Name 	int cpu_id;
756*5113495bSYour Name #ifdef HELIUMPLUS
757*5113495bSYour Name 	union ce_desc descriptor;
758*5113495bSYour Name #else
759*5113495bSYour Name 	uint32_t current_hp;
760*5113495bSYour Name 	uint32_t current_tp;
761*5113495bSYour Name 	union ce_srng_desc descriptor;
762*5113495bSYour Name #endif
763*5113495bSYour Name 	void *memory;
764*5113495bSYour Name 
765*5113495bSYour Name #ifdef HIF_RECORD_PADDR
766*5113495bSYour Name 	/* iova/pa based on smmu status */
767*5113495bSYour Name 	qdf_dma_addr_t dma_addr;
768*5113495bSYour Name 	/* store pa from iova address */
769*5113495bSYour Name 	qdf_dma_addr_t dma_to_phy;
770*5113495bSYour Name 	/* store pa */
771*5113495bSYour Name 	qdf_dma_addr_t virt_to_phy;
772*5113495bSYour Name #endif /* HIF_RECORD_ADDR */
773*5113495bSYour Name 
774*5113495bSYour Name #ifdef HIF_CE_DEBUG_DATA_BUF
775*5113495bSYour Name 	size_t actual_data_len;
776*5113495bSYour Name 	uint8_t *data;
777*5113495bSYour Name #endif /* HIF_CE_DEBUG_DATA_BUF */
778*5113495bSYour Name };
779*5113495bSYour Name #else
780*5113495bSYour Name struct hif_ce_desc_event;
781*5113495bSYour Name #endif /*#if defined(HIF_CONFIG_SLUB_DEBUG_ON)||defined(HIF_CE_DEBUG_DATA_BUF)*/
782*5113495bSYour Name 
783*5113495bSYour Name /**
784*5113495bSYour Name  * get_next_record_index() - get the next record index
785*5113495bSYour Name  * @table_index: atomic index variable to increment
786*5113495bSYour Name  * @array_size: array size of the circular buffer
787*5113495bSYour Name  *
788*5113495bSYour Name  * Increment the atomic index and reserve the value.
789*5113495bSYour Name  * Takes care of buffer wrap.
790*5113495bSYour Name  * Guaranteed to be thread safe as long as fewer than array_size contexts
791*5113495bSYour Name  * try to access the array.  If there are more than array_size contexts
792*5113495bSYour Name  * trying to access the array, full locking of the recording process would
793*5113495bSYour Name  * be needed to have sane logging.
794*5113495bSYour Name  */
795*5113495bSYour Name int get_next_record_index(qdf_atomic_t *table_index, int array_size);
796*5113495bSYour Name 
797*5113495bSYour Name #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
798*5113495bSYour Name /**
799*5113495bSYour Name  * hif_record_ce_srng_desc_event() - Record data pointed by the CE descriptor
800*5113495bSYour Name  * @scn: structure detailing a ce event
801*5113495bSYour Name  * @ce_id: length of the data
802*5113495bSYour Name  * @type: event_type
803*5113495bSYour Name  * @descriptor: ce src/dest/status ring descriptor
804*5113495bSYour Name  * @memory: nbuf
805*5113495bSYour Name  * @index: current sw/write index
806*5113495bSYour Name  * @len: len of the buffer
807*5113495bSYour Name  * @hal_ring: ce hw ring
808*5113495bSYour Name  *
809*5113495bSYour Name  * Return: None
810*5113495bSYour Name  */
811*5113495bSYour Name void hif_record_ce_srng_desc_event(struct hif_softc *scn, int ce_id,
812*5113495bSYour Name 				   enum hif_ce_event_type type,
813*5113495bSYour Name 				   union ce_srng_desc *descriptor,
814*5113495bSYour Name 				   void *memory, int index,
815*5113495bSYour Name 				   int len, void *hal_ring);
816*5113495bSYour Name 
817*5113495bSYour Name /**
818*5113495bSYour Name  * hif_clear_ce_desc_debug_data() - Clear the contents of hif_ce_desc_event
819*5113495bSYour Name  * upto data field before reusing it.
820*5113495bSYour Name  *
821*5113495bSYour Name  * @event: record every CE event
822*5113495bSYour Name  *
823*5113495bSYour Name  * Return: None
824*5113495bSYour Name  */
825*5113495bSYour Name void hif_clear_ce_desc_debug_data(struct hif_ce_desc_event *event);
826*5113495bSYour Name #else
827*5113495bSYour Name static inline
hif_record_ce_srng_desc_event(struct hif_softc * scn,int ce_id,enum hif_ce_event_type type,union ce_srng_desc * descriptor,void * memory,int index,int len,void * hal_ring)828*5113495bSYour Name void hif_record_ce_srng_desc_event(struct hif_softc *scn, int ce_id,
829*5113495bSYour Name 				   enum hif_ce_event_type type,
830*5113495bSYour Name 				   union ce_srng_desc *descriptor,
831*5113495bSYour Name 				   void *memory, int index,
832*5113495bSYour Name 				   int len, void *hal_ring)
833*5113495bSYour Name {
834*5113495bSYour Name }
835*5113495bSYour Name 
836*5113495bSYour Name static inline
hif_clear_ce_desc_debug_data(struct hif_ce_desc_event * event)837*5113495bSYour Name void hif_clear_ce_desc_debug_data(struct hif_ce_desc_event *event)
838*5113495bSYour Name {
839*5113495bSYour Name }
840*5113495bSYour Name #endif /* HIF_CONFIG_SLUB_DEBUG_ON || HIF_CE_DEBUG_DATA_BUF */
841*5113495bSYour Name 
842*5113495bSYour Name #ifdef HIF_CE_DEBUG_DATA_BUF
843*5113495bSYour Name /**
844*5113495bSYour Name  * hif_ce_desc_data_record() - Record data pointed by the CE descriptor
845*5113495bSYour Name  * @event: structure detailing a ce event
846*5113495bSYour Name  * @len: length of the data
847*5113495bSYour Name  * Return:
848*5113495bSYour Name  */
849*5113495bSYour Name void hif_ce_desc_data_record(struct hif_ce_desc_event *event, int len);
850*5113495bSYour Name 
851*5113495bSYour Name QDF_STATUS alloc_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id);
852*5113495bSYour Name void free_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id);
853*5113495bSYour Name #else
854*5113495bSYour Name static inline
alloc_mem_ce_debug_hist_data(struct hif_softc * scn,uint32_t ce_id)855*5113495bSYour Name QDF_STATUS alloc_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id)
856*5113495bSYour Name {
857*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
858*5113495bSYour Name }
859*5113495bSYour Name 
860*5113495bSYour Name static inline
free_mem_ce_debug_hist_data(struct hif_softc * scn,uint32_t ce_id)861*5113495bSYour Name void free_mem_ce_debug_hist_data(struct hif_softc *scn, uint32_t ce_id) { }
862*5113495bSYour Name 
863*5113495bSYour Name static inline
hif_ce_desc_data_record(struct hif_ce_desc_event * event,int len)864*5113495bSYour Name void hif_ce_desc_data_record(struct hif_ce_desc_event *event, int len)
865*5113495bSYour Name {
866*5113495bSYour Name }
867*5113495bSYour Name #endif /*HIF_CE_DEBUG_DATA_BUF*/
868*5113495bSYour Name 
869*5113495bSYour Name #ifdef HIF_CONFIG_SLUB_DEBUG_ON
870*5113495bSYour Name /**
871*5113495bSYour Name  * ce_validate_nbytes() - validate nbytes for slub builds on tx descriptors
872*5113495bSYour Name  * @nbytes: nbytes value being written into a send descriptor
873*5113495bSYour Name  * @ce_state: context of the copy engine
874*5113495bSYour Name  *
875*5113495bSYour Name  * nbytes should be non-zero and less than max configured for the copy engine
876*5113495bSYour Name  *
877*5113495bSYour Name  * Return: none
878*5113495bSYour Name  */
ce_validate_nbytes(uint32_t nbytes,struct CE_state * ce_state)879*5113495bSYour Name static inline void ce_validate_nbytes(uint32_t nbytes,
880*5113495bSYour Name 				      struct CE_state *ce_state)
881*5113495bSYour Name {
882*5113495bSYour Name 	if (nbytes <= 0 || nbytes > ce_state->src_sz_max)
883*5113495bSYour Name 		QDF_BUG(0);
884*5113495bSYour Name }
885*5113495bSYour Name #else
ce_validate_nbytes(uint32_t nbytes,struct CE_state * ce_state)886*5113495bSYour Name static inline void ce_validate_nbytes(uint32_t nbytes,
887*5113495bSYour Name 				      struct CE_state *ce_state)
888*5113495bSYour Name {
889*5113495bSYour Name }
890*5113495bSYour Name #endif /* HIF_CONFIG_SLUB_DEBUG_ON */
891*5113495bSYour Name 
892*5113495bSYour Name #if defined(HIF_RECORD_PADDR)
893*5113495bSYour Name /**
894*5113495bSYour Name  * hif_ce_desc_record_rx_paddr() - record physical address for IOMMU
895*5113495bSYour Name  * IOVA addr and MMU virtual addr for Rx
896*5113495bSYour Name  * @scn: hif_softc
897*5113495bSYour Name  * @event: event details
898*5113495bSYour Name  * @nbuf: buffer posted to fw
899*5113495bSYour Name  *
900*5113495bSYour Name  * record physical address for ce_event_type HIF_RX_DESC_POST and
901*5113495bSYour Name  * HIF_RX_DESC_COMPLETION
902*5113495bSYour Name  *
903*5113495bSYour Name  * Return: none
904*5113495bSYour Name  */
905*5113495bSYour Name void hif_ce_desc_record_rx_paddr(struct hif_softc *scn,
906*5113495bSYour Name 				 struct hif_ce_desc_event *event,
907*5113495bSYour Name 				 qdf_nbuf_t nbuf);
908*5113495bSYour Name #else
909*5113495bSYour Name static inline
hif_ce_desc_record_rx_paddr(struct hif_softc * scn,struct hif_ce_desc_event * event,qdf_nbuf_t nbuf)910*5113495bSYour Name void hif_ce_desc_record_rx_paddr(struct hif_softc *scn,
911*5113495bSYour Name 				 struct hif_ce_desc_event *event,
912*5113495bSYour Name 				 qdf_nbuf_t nbuf)
913*5113495bSYour Name {
914*5113495bSYour Name }
915*5113495bSYour Name #endif /* HIF_RECORD_PADDR */
916*5113495bSYour Name 
ce_ring_aquire_lock(struct CE_handle * handle)917*5113495bSYour Name static inline void ce_ring_aquire_lock(struct CE_handle *handle)
918*5113495bSYour Name {
919*5113495bSYour Name 	struct CE_state *ce_state = (struct CE_state *)handle;
920*5113495bSYour Name 
921*5113495bSYour Name 	qdf_spin_lock_bh(&ce_state->ce_index_lock);
922*5113495bSYour Name }
923*5113495bSYour Name 
ce_ring_release_lock(struct CE_handle * handle)924*5113495bSYour Name static inline void ce_ring_release_lock(struct CE_handle *handle)
925*5113495bSYour Name {
926*5113495bSYour Name 	struct CE_state *ce_state = (struct CE_state *)handle;
927*5113495bSYour Name 
928*5113495bSYour Name 	qdf_spin_unlock_bh(&ce_state->ce_index_lock);
929*5113495bSYour Name }
930*5113495bSYour Name 
931*5113495bSYour Name /*
932*5113495bSYour Name  * ce_ring_clear_event() - Clear ring event
933*5113495bSYour Name  * @ring: Ring pointer
934*5113495bSYour Name  * @event: ring event type
935*5113495bSYour Name  *
936*5113495bSYour Name  */
ce_ring_clear_event(struct CE_ring_state * ring,int event)937*5113495bSYour Name static inline void ce_ring_clear_event(struct CE_ring_state *ring, int event)
938*5113495bSYour Name {
939*5113495bSYour Name 	qdf_atomic_clear_bit(event, &ring->event);
940*5113495bSYour Name }
941*5113495bSYour Name 
942*5113495bSYour Name /*
943*5113495bSYour Name  * ce_ring_set_event() - Set ring event
944*5113495bSYour Name  * @ring: Ring pointer
945*5113495bSYour Name  * @event: Ring event type
946*5113495bSYour Name  *
947*5113495bSYour Name  */
ce_ring_set_event(struct CE_ring_state * ring,int event)948*5113495bSYour Name static inline void ce_ring_set_event(struct CE_ring_state *ring, int event)
949*5113495bSYour Name {
950*5113495bSYour Name 	qdf_atomic_set_bit(event, &ring->event);
951*5113495bSYour Name }
952*5113495bSYour Name 
953*5113495bSYour Name /*
954*5113495bSYour Name  * ce_ring_get_clear_event() - Clear ring event and return old value
955*5113495bSYour Name  * @ring: Ring pointer
956*5113495bSYour Name  * @event: Ring event type
957*5113495bSYour Name  *
958*5113495bSYour Name  */
ce_ring_get_clear_event(struct CE_ring_state * ring,int event)959*5113495bSYour Name static inline int ce_ring_get_clear_event(struct CE_ring_state *ring, int event)
960*5113495bSYour Name {
961*5113495bSYour Name 	return qdf_atomic_test_and_clear_bit(event, &ring->event);
962*5113495bSYour Name }
963*5113495bSYour Name 
ce_ring_inc_flush_cnt(struct CE_ring_state * ring)964*5113495bSYour Name static inline void ce_ring_inc_flush_cnt(struct CE_ring_state *ring)
965*5113495bSYour Name {
966*5113495bSYour Name 	ring->flush_count++;
967*5113495bSYour Name }
968*5113495bSYour Name #endif /* __COPY_ENGINE_INTERNAL_H__ */
969