xref: /wlan-driver/qca-wifi-host-cmn/hif/src/ce/ce_api.h (revision 5113495b16420b49004c444715d2daae2066e7dc)
1*5113495bSYour Name /*
2*5113495bSYour Name  * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
3*5113495bSYour Name  * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4*5113495bSYour Name  *
5*5113495bSYour Name  * Permission to use, copy, modify, and/or distribute this software for
6*5113495bSYour Name  * any purpose with or without fee is hereby granted, provided that the
7*5113495bSYour Name  * above copyright notice and this permission notice appear in all
8*5113495bSYour Name  * copies.
9*5113495bSYour Name  *
10*5113495bSYour Name  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11*5113495bSYour Name  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12*5113495bSYour Name  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13*5113495bSYour Name  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14*5113495bSYour Name  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15*5113495bSYour Name  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16*5113495bSYour Name  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17*5113495bSYour Name  * PERFORMANCE OF THIS SOFTWARE.
18*5113495bSYour Name  */
19*5113495bSYour Name 
20*5113495bSYour Name #ifndef __COPY_ENGINE_API_H__
21*5113495bSYour Name #define __COPY_ENGINE_API_H__
22*5113495bSYour Name 
23*5113495bSYour Name #include "pld_common.h"
24*5113495bSYour Name #include "ce_main.h"
25*5113495bSYour Name #include "hif_main.h"
26*5113495bSYour Name 
27*5113495bSYour Name /* TBDXXX: Use int return values for consistency with Target */
28*5113495bSYour Name 
29*5113495bSYour Name /* TBDXXX: Perhaps merge Host/Target-->common */
30*5113495bSYour Name 
31*5113495bSYour Name /*
32*5113495bSYour Name  * Copy Engine support: low-level Target-side Copy Engine API.
33*5113495bSYour Name  * This is a hardware access layer used by code that understands
34*5113495bSYour Name  * how to use copy engines.
35*5113495bSYour Name  */
36*5113495bSYour Name 
37*5113495bSYour Name /*
38*5113495bSYour Name  * A "struct CE_handle *" serves as an opaque pointer-sized
39*5113495bSYour Name  * handle to a specific copy engine.
40*5113495bSYour Name  */
41*5113495bSYour Name struct CE_handle;
42*5113495bSYour Name 
43*5113495bSYour Name /*
44*5113495bSYour Name  * "Send Completion" callback type for Send Completion Notification.
45*5113495bSYour Name  *
46*5113495bSYour Name  * If a Send Completion callback is registered and one or more sends
47*5113495bSYour Name  * have completed, the callback is invoked.
48*5113495bSYour Name  *
49*5113495bSYour Name  * per_ce_send_context is a context supplied by the calling layer
50*5113495bSYour Name  * (via ce_send_cb_register). It is associated with a copy engine.
51*5113495bSYour Name  *
52*5113495bSYour Name  * per_transfer_send_context is context supplied by the calling layer
53*5113495bSYour Name  * (via the "send" call).  It may be different for each invocation
54*5113495bSYour Name  * of send.
55*5113495bSYour Name  *
56*5113495bSYour Name  * The buffer parameter is the first byte sent of the first buffer
57*5113495bSYour Name  * sent (if more than one buffer).
58*5113495bSYour Name  *
59*5113495bSYour Name  * nbytes is the number of bytes of that buffer that were sent.
60*5113495bSYour Name  *
61*5113495bSYour Name  * transfer_id matches the value used when the buffer or
62*5113495bSYour Name  * buf_list was sent.
63*5113495bSYour Name  *
64*5113495bSYour Name  * Implementation note: Pops 1 completed send buffer from Source ring
65*5113495bSYour Name  */
66*5113495bSYour Name typedef void (*ce_send_cb)(struct CE_handle *copyeng,
67*5113495bSYour Name 			   void *per_ce_send_context,
68*5113495bSYour Name 			   void *per_transfer_send_context,
69*5113495bSYour Name 			   qdf_dma_addr_t buffer,
70*5113495bSYour Name 			   unsigned int nbytes,
71*5113495bSYour Name 			   unsigned int transfer_id,
72*5113495bSYour Name 			   unsigned int sw_index,
73*5113495bSYour Name 			   unsigned int hw_index,
74*5113495bSYour Name 			   uint32_t toeplitz_hash_result);
75*5113495bSYour Name 
76*5113495bSYour Name /*
77*5113495bSYour Name  * "Buffer Received" callback type for Buffer Received Notification.
78*5113495bSYour Name  *
79*5113495bSYour Name  * Implementation note: Pops 1 completed recv buffer from Dest ring
80*5113495bSYour Name  */
81*5113495bSYour Name typedef void (*CE_recv_cb)(struct CE_handle *copyeng,
82*5113495bSYour Name 		   void *per_CE_recv_context,
83*5113495bSYour Name 		   void *per_transfer_recv_context,
84*5113495bSYour Name 		   qdf_dma_addr_t buffer,
85*5113495bSYour Name 		   unsigned int nbytes,
86*5113495bSYour Name 		   unsigned int transfer_id,
87*5113495bSYour Name 		   unsigned int flags);
88*5113495bSYour Name 
89*5113495bSYour Name /*
90*5113495bSYour Name  * Copy Engine Watermark callback type.
91*5113495bSYour Name  *
92*5113495bSYour Name  * Allows upper layers to be notified when watermarks are reached:
93*5113495bSYour Name  *   space is available and/or running short in a source ring
94*5113495bSYour Name  *   buffers are exhausted and/or abundant in a destination ring
95*5113495bSYour Name  *
96*5113495bSYour Name  * The flags parameter indicates which condition triggered this
97*5113495bSYour Name  * callback.  See CE_WM_FLAG_*.
98*5113495bSYour Name  *
99*5113495bSYour Name  * Watermark APIs are provided to allow upper layers "batch"
100*5113495bSYour Name  * descriptor processing and to allow upper layers to
101*5113495bSYour Name  * throttle/unthrottle.
102*5113495bSYour Name  */
103*5113495bSYour Name typedef void (*CE_watermark_cb)(struct CE_handle *copyeng,
104*5113495bSYour Name 				void *per_CE_wm_context, unsigned int flags);
105*5113495bSYour Name 
106*5113495bSYour Name 
107*5113495bSYour Name #define CE_WM_FLAG_SEND_HIGH   1
108*5113495bSYour Name #define CE_WM_FLAG_SEND_LOW    2
109*5113495bSYour Name #define CE_WM_FLAG_RECV_HIGH   4
110*5113495bSYour Name #define CE_WM_FLAG_RECV_LOW    8
111*5113495bSYour Name #define CE_HTT_TX_CE           4
112*5113495bSYour Name 
113*5113495bSYour Name 
114*5113495bSYour Name /**
115*5113495bSYour Name  * ce_service_srng_init() - Initialization routine for CE services
116*5113495bSYour Name  *                          in SRNG based targets
117*5113495bSYour Name  * Return : None
118*5113495bSYour Name  */
119*5113495bSYour Name void ce_service_srng_init(void);
120*5113495bSYour Name 
121*5113495bSYour Name /**
122*5113495bSYour Name  * ce_service_legacy_init() - Initialization routine for CE services
123*5113495bSYour Name  *                            in legacy targets
124*5113495bSYour Name  * Return : None
125*5113495bSYour Name  */
126*5113495bSYour Name void ce_service_legacy_init(void);
127*5113495bSYour Name 
128*5113495bSYour Name /* A list of buffers to be gathered and sent */
129*5113495bSYour Name struct ce_sendlist;
130*5113495bSYour Name 
131*5113495bSYour Name /* Copy Engine settable attributes */
132*5113495bSYour Name struct CE_attr;
133*5113495bSYour Name 
134*5113495bSYour Name /*==================Send=====================================================*/
135*5113495bSYour Name 
136*5113495bSYour Name /* ce_send flags */
137*5113495bSYour Name /* disable ring's byte swap, even if the default policy is to swap */
138*5113495bSYour Name #define CE_SEND_FLAG_SWAP_DISABLE        1
139*5113495bSYour Name 
140*5113495bSYour Name /*
141*5113495bSYour Name  * Queue a source buffer to be sent to an anonymous destination buffer.
142*5113495bSYour Name  *   copyeng         - which copy engine to use
143*5113495bSYour Name  *   buffer          - address of buffer
144*5113495bSYour Name  *   nbytes          - number of bytes to send
145*5113495bSYour Name  *   transfer_id     - arbitrary ID; reflected to destination
146*5113495bSYour Name  *   flags           - CE_SEND_FLAG_* values
147*5113495bSYour Name  * Returns QDF_STATUS.
148*5113495bSYour Name  *
149*5113495bSYour Name  * Note: If no flags are specified, use CE's default data swap mode.
150*5113495bSYour Name  *
151*5113495bSYour Name  * Implementation note: pushes 1 buffer to Source ring
152*5113495bSYour Name  */
153*5113495bSYour Name QDF_STATUS ce_send(struct CE_handle *copyeng,
154*5113495bSYour Name 		   void *per_transfer_send_context,
155*5113495bSYour Name 		   qdf_dma_addr_t buffer,
156*5113495bSYour Name 		   unsigned int nbytes,
157*5113495bSYour Name 		   unsigned int transfer_id,
158*5113495bSYour Name 		   unsigned int flags,
159*5113495bSYour Name 		   unsigned int user_flags);
160*5113495bSYour Name 
161*5113495bSYour Name #ifdef WLAN_FEATURE_FASTPATH
162*5113495bSYour Name int ce_send_fast(struct CE_handle *copyeng, qdf_nbuf_t msdu,
163*5113495bSYour Name 	unsigned int transfer_id, uint32_t download_len);
164*5113495bSYour Name 
165*5113495bSYour Name #endif
166*5113495bSYour Name 
167*5113495bSYour Name /*
168*5113495bSYour Name  * ce_enqueue_desc() - enqueu desc to CE ring.
169*5113495bSYour Name  * @copyeng: which copy engine to use
170*5113495bSYour Name  * @msdu: data buffer
171*5113495bSYour Name  * @transfer_id: arbitrary ID; reflected to destination
172*5113495bSYour Name  * @download_len: length of the packet download to FW.
173*5113495bSYour Name  *
174*5113495bSYour Name  */
175*5113495bSYour Name int ce_enqueue_desc(struct CE_handle *copyeng, qdf_nbuf_t msdu,
176*5113495bSYour Name 		    unsigned int transfer_id, uint32_t download_len);
177*5113495bSYour Name 
178*5113495bSYour Name void ce_update_tx_ring(struct CE_handle *ce_tx_hdl, uint32_t num_htt_cmpls);
179*5113495bSYour Name extern qdf_nbuf_t ce_batch_send(struct CE_handle *ce_tx_hdl,
180*5113495bSYour Name 		qdf_nbuf_t msdu,
181*5113495bSYour Name 		uint32_t transfer_id,
182*5113495bSYour Name 		uint32_t len,
183*5113495bSYour Name 		uint32_t sendhead);
184*5113495bSYour Name 
185*5113495bSYour Name QDF_STATUS ce_send_single(struct CE_handle *ce_tx_hdl,
186*5113495bSYour Name 			  qdf_nbuf_t msdu,
187*5113495bSYour Name 			  uint32_t transfer_id,
188*5113495bSYour Name 			  uint32_t len);
189*5113495bSYour Name /*
190*5113495bSYour Name  * Register a Send Callback function.
191*5113495bSYour Name  * This function is called as soon as the contents of a Send
192*5113495bSYour Name  * have reached the destination, unless disable_interrupts is
193*5113495bSYour Name  * requested.  In this case, the callback is invoked when the
194*5113495bSYour Name  * send status is polled, shortly after the send completes.
195*5113495bSYour Name  */
196*5113495bSYour Name void ce_send_cb_register(struct CE_handle *copyeng,
197*5113495bSYour Name 			 ce_send_cb fn_ptr,
198*5113495bSYour Name 			 void *per_ce_send_context, int disable_interrupts);
199*5113495bSYour Name 
200*5113495bSYour Name /*
201*5113495bSYour Name  * Return the size of a SendList. This allows the caller to allocate
202*5113495bSYour Name  * a SendList while the SendList structure remains opaque.
203*5113495bSYour Name  */
204*5113495bSYour Name unsigned int ce_sendlist_sizeof(void);
205*5113495bSYour Name 
206*5113495bSYour Name /* Initialize a sendlist */
207*5113495bSYour Name void ce_sendlist_init(struct ce_sendlist *sendlist);
208*5113495bSYour Name 
209*5113495bSYour Name /**
210*5113495bSYour Name  * ce_sendlist_buf_add() - Append a simple buffer (address/length) to a sendlist
211*5113495bSYour Name  * @sendlist: Sendlist
212*5113495bSYour Name  * @buffer: buffer
213*5113495bSYour Name  * @nbytes: number of bytes to append
214*5113495bSYour Name  * @flags: flags
215*5113495bSYour Name  * @user_flags: user flags
216*5113495bSYour Name  *
217*5113495bSYour Name  * Return: QDF_STATUS
218*5113495bSYour Name  */
219*5113495bSYour Name QDF_STATUS ce_sendlist_buf_add(struct ce_sendlist *sendlist,
220*5113495bSYour Name 			       qdf_dma_addr_t buffer,
221*5113495bSYour Name 			       unsigned int nbytes,
222*5113495bSYour Name 			       /* OR-ed with internal flags */
223*5113495bSYour Name 			       uint32_t flags,
224*5113495bSYour Name 			       uint32_t user_flags);
225*5113495bSYour Name 
226*5113495bSYour Name /*
227*5113495bSYour Name  * ce_sendlist_send() - Queue a "sendlist" of buffers to be sent using gather to
228*5113495bSYour Name  * a single anonymous destination buffer
229*5113495bSYour Name  * @copyeng: which copy engine to use
230*5113495bSYour Name  * @per_transfer_send_context: Per transfer send context
231*5113495bSYour Name  * @sendlist: list of simple buffers to send using gather
232*5113495bSYour Name  * @transfer_id: arbitrary ID; reflected to destination
233*5113495bSYour Name  *
234*5113495bSYour Name  * Implementation note: Pushes multiple buffers with Gather to Source ring.
235*5113495bSYour Name  *
236*5113495bSYour Name  * Return: QDF_STATUS
237*5113495bSYour Name  */
238*5113495bSYour Name QDF_STATUS ce_sendlist_send(struct CE_handle *copyeng,
239*5113495bSYour Name 			    void *per_transfer_send_context,
240*5113495bSYour Name 			    struct ce_sendlist *sendlist,
241*5113495bSYour Name 			    unsigned int transfer_id);
242*5113495bSYour Name 
243*5113495bSYour Name /*==================Recv=====================================================*/
244*5113495bSYour Name 
245*5113495bSYour Name /**
246*5113495bSYour Name  * ce_recv_buf_enqueue() -  Make a buffer available to receive. The buffer must
247*5113495bSYour Name  * be at least of a minimal size appropriate for this copy engine (src_sz_max
248*5113495bSYour Name  * attribute).
249*5113495bSYour Name  * @copyeng: which copy engine to use
250*5113495bSYour Name  * @per_transfer_recv_context: context passed back to caller's recv_cb
251*5113495bSYour Name  * @buffer: address of buffer in CE space
252*5113495bSYour Name  *
253*5113495bSYour Name  * Implementation note: Pushes a buffer to Dest ring.
254*5113495bSYour Name  *
255*5113495bSYour Name  * Return: QDF_STATUS.
256*5113495bSYour Name  */
257*5113495bSYour Name QDF_STATUS ce_recv_buf_enqueue(struct CE_handle *copyeng,
258*5113495bSYour Name 			       void *per_transfer_recv_context,
259*5113495bSYour Name 			       qdf_dma_addr_t buffer);
260*5113495bSYour Name 
261*5113495bSYour Name /*
262*5113495bSYour Name  * Register a Receive Callback function.
263*5113495bSYour Name  * This function is called as soon as data is received
264*5113495bSYour Name  * from the source.
265*5113495bSYour Name  */
266*5113495bSYour Name void ce_recv_cb_register(struct CE_handle *copyeng,
267*5113495bSYour Name 			 CE_recv_cb fn_ptr,
268*5113495bSYour Name 			 void *per_CE_recv_context,
269*5113495bSYour Name 			 int disable_interrupts);
270*5113495bSYour Name 
271*5113495bSYour Name /*==================CE Watermark=============================================*/
272*5113495bSYour Name 
273*5113495bSYour Name /*
274*5113495bSYour Name  * Register a Watermark Callback function.
275*5113495bSYour Name  * This function is called as soon as a watermark level
276*5113495bSYour Name  * is crossed.  A Watermark Callback function is free to
277*5113495bSYour Name  * handle received data "en masse"; but then some coordination
278*5113495bSYour Name  * is required with a registered Receive Callback function.
279*5113495bSYour Name  * [Suggestion: Either handle Receives in a Receive Callback
280*5113495bSYour Name  * or en masse in a Watermark Callback; but not both.]
281*5113495bSYour Name  */
282*5113495bSYour Name void ce_watermark_cb_register(struct CE_handle *copyeng,
283*5113495bSYour Name 			  CE_watermark_cb fn_ptr,
284*5113495bSYour Name 			  void *per_CE_wm_context);
285*5113495bSYour Name 
286*5113495bSYour Name /*
287*5113495bSYour Name  * Set low/high watermarks for the send/source side of a copy engine.
288*5113495bSYour Name  *
289*5113495bSYour Name  * Typically, the destination side CPU manages watermarks for
290*5113495bSYour Name  * the receive side and the source side CPU manages watermarks
291*5113495bSYour Name  * for the send side.
292*5113495bSYour Name  *
293*5113495bSYour Name  * A low watermark of 0 is never hit (so the watermark function
294*5113495bSYour Name  * will never be called for a Low Watermark condition).
295*5113495bSYour Name  *
296*5113495bSYour Name  * A high watermark equal to nentries is never hit (so the
297*5113495bSYour Name  * watermark function will never be called for a High Watermark
298*5113495bSYour Name  * condition).
299*5113495bSYour Name  */
300*5113495bSYour Name void ce_send_watermarks_set(struct CE_handle *copyeng,
301*5113495bSYour Name 			    unsigned int low_alert_nentries,
302*5113495bSYour Name 			    unsigned int high_alert_nentries);
303*5113495bSYour Name 
304*5113495bSYour Name /* Set low/high watermarks for the receive/destination side of copy engine. */
305*5113495bSYour Name void ce_recv_watermarks_set(struct CE_handle *copyeng,
306*5113495bSYour Name 			    unsigned int low_alert_nentries,
307*5113495bSYour Name 			    unsigned int high_alert_nentries);
308*5113495bSYour Name 
309*5113495bSYour Name /*
310*5113495bSYour Name  * Return the number of entries that can be queued
311*5113495bSYour Name  * to a ring at an instant in time.
312*5113495bSYour Name  *
313*5113495bSYour Name  * For source ring, does not imply that destination-side
314*5113495bSYour Name  * buffers are available; merely indicates descriptor space
315*5113495bSYour Name  * in the source ring.
316*5113495bSYour Name  *
317*5113495bSYour Name  * For destination ring, does not imply that previously
318*5113495bSYour Name  * received buffers have been processed; merely indicates
319*5113495bSYour Name  * descriptor space in destination ring.
320*5113495bSYour Name  *
321*5113495bSYour Name  * Mainly for use with CE Watermark callback.
322*5113495bSYour Name  */
323*5113495bSYour Name unsigned int ce_send_entries_avail(struct CE_handle *copyeng);
324*5113495bSYour Name unsigned int ce_recv_entries_avail(struct CE_handle *copyeng);
325*5113495bSYour Name 
326*5113495bSYour Name /* recv flags */
327*5113495bSYour Name /* Data is byte-swapped */
328*5113495bSYour Name #define CE_RECV_FLAG_SWAPPED            1
329*5113495bSYour Name 
330*5113495bSYour Name /**
331*5113495bSYour Name  * ce_completed_recv_next() - Supply data for the next completed unprocessed
332*5113495bSYour Name  * receive descriptor.
333*5113495bSYour Name  * @copyeng: which copy engine to use
334*5113495bSYour Name  * @per_CE_contextp: CE context
335*5113495bSYour Name  * @per_transfer_contextp: Transfer context
336*5113495bSYour Name  * @bufferp: buffer pointer
337*5113495bSYour Name  * @nbytesp: number of bytes
338*5113495bSYour Name  * @transfer_idp: Transfer idp
339*5113495bSYour Name  * @flagsp: flags
340*5113495bSYour Name  *
341*5113495bSYour Name  * For use
342*5113495bSYour Name  *    with CE Watermark callback,
343*5113495bSYour Name  *    in a recv_cb function when processing buf_lists
344*5113495bSYour Name  *    in a recv_cb function in order to mitigate recv_cb's.
345*5113495bSYour Name  *
346*5113495bSYour Name  * Implementation note: Pops buffer from Dest ring.
347*5113495bSYour Name  *
348*5113495bSYour Name  * Return: QDF_STATUS
349*5113495bSYour Name  */
350*5113495bSYour Name QDF_STATUS ce_completed_recv_next(struct CE_handle *copyeng,
351*5113495bSYour Name 				  void **per_CE_contextp,
352*5113495bSYour Name 				  void **per_transfer_contextp,
353*5113495bSYour Name 				  qdf_dma_addr_t *bufferp,
354*5113495bSYour Name 				  unsigned int *nbytesp,
355*5113495bSYour Name 				  unsigned int *transfer_idp,
356*5113495bSYour Name 				  unsigned int *flagsp);
357*5113495bSYour Name 
358*5113495bSYour Name /**
359*5113495bSYour Name  * ce_completed_send_next() - Supply data for the next completed unprocessed
360*5113495bSYour Name  * send descriptor.
361*5113495bSYour Name  * @copyeng: which copy engine to use
362*5113495bSYour Name  * @per_CE_contextp: CE context
363*5113495bSYour Name  * @per_transfer_contextp: Transfer context
364*5113495bSYour Name  * @bufferp: buffer pointer
365*5113495bSYour Name  * @nbytesp: number of bytes
366*5113495bSYour Name  * @transfer_idp: Transfer idp
367*5113495bSYour Name  * @sw_idx: SW index
368*5113495bSYour Name  * @hw_idx: HW index
369*5113495bSYour Name  * @toeplitz_hash_result: toeplitz hash result
370*5113495bSYour Name  *
371*5113495bSYour Name  * For use
372*5113495bSYour Name  *    with CE Watermark callback
373*5113495bSYour Name  *    in a send_cb function in order to mitigate send_cb's.
374*5113495bSYour Name  *
375*5113495bSYour Name  * Implementation note: Pops 1 completed send buffer from Source ring
376*5113495bSYour Name  *
377*5113495bSYour Name  * Return: QDF_STATUS
378*5113495bSYour Name  */
379*5113495bSYour Name QDF_STATUS ce_completed_send_next(struct CE_handle *copyeng,
380*5113495bSYour Name 				  void **per_CE_contextp,
381*5113495bSYour Name 				  void **per_transfer_contextp,
382*5113495bSYour Name 				  qdf_dma_addr_t *bufferp,
383*5113495bSYour Name 				  unsigned int *nbytesp,
384*5113495bSYour Name 				  unsigned int *transfer_idp,
385*5113495bSYour Name 				  unsigned int *sw_idx,
386*5113495bSYour Name 				  unsigned int *hw_idx,
387*5113495bSYour Name 				  uint32_t *toeplitz_hash_result);
388*5113495bSYour Name 
389*5113495bSYour Name #ifdef CUSTOM_CB_SCHEDULER_SUPPORT
390*5113495bSYour Name /*==================CE custom callbacks=================================*/
391*5113495bSYour Name 
392*5113495bSYour Name /**
393*5113495bSYour Name  * ce_register_custom_cb() - Helper API to register the custom callback
394*5113495bSYour Name  * @copyeng: Pointer to CE handle
395*5113495bSYour Name  * @custom_cb: Custom call back function pointer
396*5113495bSYour Name  * @custom_cb_context: Custom callback context
397*5113495bSYour Name  *
398*5113495bSYour Name  * return: void
399*5113495bSYour Name  */
400*5113495bSYour Name void
401*5113495bSYour Name ce_register_custom_cb(struct CE_handle *copyeng, void (*custom_cb)(void *),
402*5113495bSYour Name 		      void *custom_cb_context);
403*5113495bSYour Name 
404*5113495bSYour Name /**
405*5113495bSYour Name  * ce_unregister_custom_cb() - Helper API to unregister the custom callback
406*5113495bSYour Name  * @copyeng: Pointer to CE handle
407*5113495bSYour Name  *
408*5113495bSYour Name  * return: void
409*5113495bSYour Name  */
410*5113495bSYour Name void
411*5113495bSYour Name ce_unregister_custom_cb(struct CE_handle *copyeng);
412*5113495bSYour Name 
413*5113495bSYour Name /**
414*5113495bSYour Name  * ce_enable_custom_cb() - Helper API to enable the custom callback
415*5113495bSYour Name  * @copyeng: Pointer to CE handle
416*5113495bSYour Name  *
417*5113495bSYour Name  * return: void
418*5113495bSYour Name  */
419*5113495bSYour Name void
420*5113495bSYour Name ce_enable_custom_cb(struct CE_handle *copyeng);
421*5113495bSYour Name 
422*5113495bSYour Name /**
423*5113495bSYour Name  * ce_disable_custom_cb() - Helper API to disable the custom callback
424*5113495bSYour Name  * @copyeng: Pointer to CE handle
425*5113495bSYour Name  *
426*5113495bSYour Name  * return: void
427*5113495bSYour Name  */
428*5113495bSYour Name void
429*5113495bSYour Name ce_disable_custom_cb(struct CE_handle *copyeng);
430*5113495bSYour Name #endif /* CUSTOM_CB_SCHEDULER_SUPPORT */
431*5113495bSYour Name 
432*5113495bSYour Name /*==================CE Engine Initialization=================================*/
433*5113495bSYour Name 
434*5113495bSYour Name /* Initialize an instance of a CE */
435*5113495bSYour Name struct CE_handle *ce_init(struct hif_softc *scn,
436*5113495bSYour Name 			  unsigned int CE_id, struct CE_attr *attr);
437*5113495bSYour Name 
438*5113495bSYour Name /*
439*5113495bSYour Name  * hif_ce_desc_history_log_register() - Register hif_ce_desc_history buffers
440*5113495bSYour Name  * to SSR driver dump.
441*5113495bSYour Name  * @scn: HIF context
442*5113495bSYour Name  *
443*5113495bSYour Name  * Return: None
444*5113495bSYour Name  */
445*5113495bSYour Name void hif_ce_desc_history_log_register(struct hif_softc *scn);
446*5113495bSYour Name 
447*5113495bSYour Name /*==================CE Engine Shutdown=======================================*/
448*5113495bSYour Name /*
449*5113495bSYour Name  * Support clean shutdown by allowing the caller to revoke
450*5113495bSYour Name  * receive buffers.  Target DMA must be stopped before using
451*5113495bSYour Name  * this API.
452*5113495bSYour Name  */
453*5113495bSYour Name QDF_STATUS
454*5113495bSYour Name ce_revoke_recv_next(struct CE_handle *copyeng,
455*5113495bSYour Name 		    void **per_CE_contextp,
456*5113495bSYour Name 		    void **per_transfer_contextp,
457*5113495bSYour Name 		    qdf_dma_addr_t *bufferp);
458*5113495bSYour Name 
459*5113495bSYour Name /*
460*5113495bSYour Name  * Support clean shutdown by allowing the caller to cancel
461*5113495bSYour Name  * pending sends.  Target DMA must be stopped before using
462*5113495bSYour Name  * this API.
463*5113495bSYour Name  */
464*5113495bSYour Name QDF_STATUS
465*5113495bSYour Name ce_cancel_send_next(struct CE_handle *copyeng,
466*5113495bSYour Name 		    void **per_CE_contextp,
467*5113495bSYour Name 		    void **per_transfer_contextp,
468*5113495bSYour Name 		    qdf_dma_addr_t *bufferp,
469*5113495bSYour Name 		    unsigned int *nbytesp,
470*5113495bSYour Name 		    unsigned int *transfer_idp,
471*5113495bSYour Name 		    uint32_t *toeplitz_hash_result);
472*5113495bSYour Name 
473*5113495bSYour Name void ce_fini(struct CE_handle *copyeng);
474*5113495bSYour Name 
475*5113495bSYour Name /*
476*5113495bSYour Name  * hif_ce_desc_history_log_unregister() - unregister hif_ce_desc_history
477*5113495bSYour Name  * buffers from SSR driver dump.
478*5113495bSYour Name  *
479*5113495bSYour Name  * Return: None
480*5113495bSYour Name  */
481*5113495bSYour Name void hif_ce_desc_history_log_unregister(void);
482*5113495bSYour Name 
483*5113495bSYour Name /*==================CE Interrupt Handlers====================================*/
484*5113495bSYour Name void ce_per_engine_service_any(int irq, struct hif_softc *scn);
485*5113495bSYour Name int ce_per_engine_service(struct hif_softc *scn, unsigned int CE_id);
486*5113495bSYour Name void ce_per_engine_servicereap(struct hif_softc *scn, unsigned int CE_id);
487*5113495bSYour Name 
488*5113495bSYour Name /*===================CE cmpl interrupt Enable/Disable =======================*/
489*5113495bSYour Name void ce_disable_any_copy_compl_intr_nolock(struct hif_softc *scn);
490*5113495bSYour Name void ce_enable_any_copy_compl_intr_nolock(struct hif_softc *scn);
491*5113495bSYour Name 
492*5113495bSYour Name /* API to check if any of the copy engine pipes has
493*5113495bSYour Name  * pending frames for processing
494*5113495bSYour Name  */
495*5113495bSYour Name bool ce_get_rx_pending(struct hif_softc *scn);
496*5113495bSYour Name 
497*5113495bSYour Name /**
498*5113495bSYour Name  * war_ce_src_ring_write_idx_set() - Set write index for CE source ring
499*5113495bSYour Name  * @scn: HIF context
500*5113495bSYour Name  * @ctrl_addr: address
501*5113495bSYour Name  * @write_index: write index
502*5113495bSYour Name  *
503*5113495bSYour Name  * Return: None
504*5113495bSYour Name  */
505*5113495bSYour Name void war_ce_src_ring_write_idx_set(struct hif_softc *scn,
506*5113495bSYour Name 				   u32 ctrl_addr, unsigned int write_index);
507*5113495bSYour Name 
508*5113495bSYour Name /* CE_attr.flags values */
509*5113495bSYour Name #define CE_ATTR_NO_SNOOP             0x01 /* Use NonSnooping PCIe accesses? */
510*5113495bSYour Name #define CE_ATTR_BYTE_SWAP_DATA       0x02 /* Byte swap data words */
511*5113495bSYour Name #define CE_ATTR_SWIZZLE_DESCRIPTORS  0x04 /* Swizzle descriptors? */
512*5113495bSYour Name #define CE_ATTR_DISABLE_INTR         0x08 /* no interrupt on copy completion */
513*5113495bSYour Name #define CE_ATTR_ENABLE_POLL          0x10 /* poll for residue descriptors */
514*5113495bSYour Name #define CE_ATTR_DIAG                 0x20 /* Diag CE */
515*5113495bSYour Name #define CE_ATTR_INIT_ON_DEMAND       0x40 /* Initialized on demand */
516*5113495bSYour Name #define CE_ATTR_HI_TASKLET           0x80 /* HI_TASKLET CE */
517*5113495bSYour Name 
518*5113495bSYour Name /**
519*5113495bSYour Name  * struct CE_attr - Attributes of an instance of a Copy Engine
520*5113495bSYour Name  * @flags:         CE_ATTR_* values
521*5113495bSYour Name  * @priority:      TBD
522*5113495bSYour Name  * @src_nentries:  #entries in source ring - Must be a power of 2
523*5113495bSYour Name  * @src_sz_max:    Max source send size for this CE. This is also the minimum
524*5113495bSYour Name  *                 size of a destination buffer
525*5113495bSYour Name  * @dest_nentries: #entries in destination ring - Must be a power of 2
526*5113495bSYour Name  * @reserved:      Future Use
527*5113495bSYour Name  */
528*5113495bSYour Name struct CE_attr {
529*5113495bSYour Name 	unsigned int flags;
530*5113495bSYour Name 	unsigned int priority;
531*5113495bSYour Name 	unsigned int src_nentries;
532*5113495bSYour Name 	unsigned int src_sz_max;
533*5113495bSYour Name 	unsigned int dest_nentries;
534*5113495bSYour Name 	void *reserved;
535*5113495bSYour Name };
536*5113495bSYour Name 
537*5113495bSYour Name /*
538*5113495bSYour Name  * When using sendlist_send to transfer multiple buffer fragments, the
539*5113495bSYour Name  * transfer context of each fragment, except last one, will be filled
540*5113495bSYour Name  * with CE_SENDLIST_ITEM_CTXT. CE_completed_send will return success for
541*5113495bSYour Name  * each fragment done with send and the transfer context would be
542*5113495bSYour Name  * CE_SENDLIST_ITEM_CTXT. Upper layer could use this to identify the
543*5113495bSYour Name  * status of a send completion.
544*5113495bSYour Name  */
545*5113495bSYour Name #define CE_SENDLIST_ITEM_CTXT   ((void *)0xcecebeef)
546*5113495bSYour Name 
547*5113495bSYour Name /*
548*5113495bSYour Name  * This is an opaque type that is at least large enough to hold
549*5113495bSYour Name  * a sendlist. A sendlist can only be accessed through CE APIs,
550*5113495bSYour Name  * but this allows a sendlist to be allocated on the run-time
551*5113495bSYour Name  * stack.  TBDXXX: un-opaque would be simpler...
552*5113495bSYour Name  */
553*5113495bSYour Name struct ce_sendlist {
554*5113495bSYour Name 	unsigned int word[62];
555*5113495bSYour Name };
556*5113495bSYour Name 
557*5113495bSYour Name #define ATH_ISR_NOSCHED  0x0000  /* Do not schedule bottom half/DPC */
558*5113495bSYour Name #define ATH_ISR_SCHED    0x0001  /* Schedule the bottom half for execution */
559*5113495bSYour Name #define ATH_ISR_NOTMINE  0x0002  /* for shared IRQ's */
560*5113495bSYour Name 
561*5113495bSYour Name #ifdef IPA_OFFLOAD
562*5113495bSYour Name void ce_ipa_get_resource(struct CE_handle *ce,
563*5113495bSYour Name 			 qdf_shared_mem_t **ce_sr,
564*5113495bSYour Name 			 uint32_t *ce_sr_ring_size,
565*5113495bSYour Name 			 qdf_dma_addr_t *ce_reg_paddr);
566*5113495bSYour Name #else
567*5113495bSYour Name /**
568*5113495bSYour Name  * ce_ipa_get_resource() - get uc resource on copyengine
569*5113495bSYour Name  * @ce: copyengine context
570*5113495bSYour Name  * @ce_sr: copyengine source ring resource info
571*5113495bSYour Name  * @ce_sr_ring_size: copyengine source ring size
572*5113495bSYour Name  * @ce_reg_paddr: copyengine register physical address
573*5113495bSYour Name  *
574*5113495bSYour Name  * Copy engine should release resource to micro controller
575*5113495bSYour Name  * Micro controller needs
576*5113495bSYour Name  *  - Copy engine source descriptor base address
577*5113495bSYour Name  *  - Copy engine source descriptor size
578*5113495bSYour Name  *  - PCI BAR address to access copy engine register
579*5113495bSYour Name  *
580*5113495bSYour Name  * Return: None
581*5113495bSYour Name  */
ce_ipa_get_resource(struct CE_handle * ce,qdf_shared_mem_t ** ce_sr,uint32_t * ce_sr_ring_size,qdf_dma_addr_t * ce_reg_paddr)582*5113495bSYour Name static inline void ce_ipa_get_resource(struct CE_handle *ce,
583*5113495bSYour Name 			 qdf_shared_mem_t **ce_sr,
584*5113495bSYour Name 			 uint32_t *ce_sr_ring_size,
585*5113495bSYour Name 			 qdf_dma_addr_t *ce_reg_paddr)
586*5113495bSYour Name {
587*5113495bSYour Name }
588*5113495bSYour Name #endif /* IPA_OFFLOAD */
589*5113495bSYour Name 
ce_pkt_error_count_incr(struct HIF_CE_state * _hif_state,enum ol_ath_hif_pkt_ecodes _hif_ecode)590*5113495bSYour Name static inline void ce_pkt_error_count_incr(
591*5113495bSYour Name 	struct HIF_CE_state *_hif_state,
592*5113495bSYour Name 	enum ol_ath_hif_pkt_ecodes _hif_ecode)
593*5113495bSYour Name {
594*5113495bSYour Name 	struct hif_softc *scn = HIF_GET_SOFTC(_hif_state);
595*5113495bSYour Name 
596*5113495bSYour Name 	if (_hif_ecode == HIF_PIPE_NO_RESOURCE)
597*5113495bSYour Name 		(scn->pkt_stats.hif_pipe_no_resrc_count)
598*5113495bSYour Name 		+= 1;
599*5113495bSYour Name }
600*5113495bSYour Name 
601*5113495bSYour Name bool ce_check_rx_pending(struct CE_state *CE_state);
602*5113495bSYour Name void *hif_ce_get_lro_ctx(struct hif_opaque_softc *hif_hdl, int ctx_id);
603*5113495bSYour Name struct ce_ops *ce_services_srng(void);
604*5113495bSYour Name struct ce_ops *ce_services_legacy(void);
605*5113495bSYour Name bool ce_srng_based(struct hif_softc *scn);
606*5113495bSYour Name /* Forward declaration */
607*5113495bSYour Name struct CE_ring_state;
608*5113495bSYour Name 
609*5113495bSYour Name struct ce_ops {
610*5113495bSYour Name 	uint32_t (*ce_get_desc_size)(uint8_t ring_type);
611*5113495bSYour Name 	int (*ce_ring_setup)(struct hif_softc *scn, uint8_t ring_type,
612*5113495bSYour Name 		uint32_t ce_id, struct CE_ring_state *ring,
613*5113495bSYour Name 		struct CE_attr *attr);
614*5113495bSYour Name 	void (*ce_srng_cleanup)(struct hif_softc *scn,
615*5113495bSYour Name 				struct CE_state *CE_state, uint8_t ring_type);
616*5113495bSYour Name 	QDF_STATUS (*ce_send_nolock)(struct CE_handle *copyeng,
617*5113495bSYour Name 				     void *per_transfer_context,
618*5113495bSYour Name 				     qdf_dma_addr_t buffer,
619*5113495bSYour Name 				     uint32_t nbytes,
620*5113495bSYour Name 				     uint32_t transfer_id,
621*5113495bSYour Name 				     uint32_t flags,
622*5113495bSYour Name 				     uint32_t user_flags);
623*5113495bSYour Name 	QDF_STATUS (*ce_sendlist_send)(struct CE_handle *copyeng,
624*5113495bSYour Name 				       void *per_transfer_context,
625*5113495bSYour Name 				       struct ce_sendlist *sendlist,
626*5113495bSYour Name 				       unsigned int transfer_id);
627*5113495bSYour Name 	QDF_STATUS (*ce_revoke_recv_next)(struct CE_handle *copyeng,
628*5113495bSYour Name 			void **per_CE_contextp,
629*5113495bSYour Name 			void **per_transfer_contextp,
630*5113495bSYour Name 			qdf_dma_addr_t *bufferp);
631*5113495bSYour Name 	QDF_STATUS (*ce_cancel_send_next)(struct CE_handle *copyeng,
632*5113495bSYour Name 			void **per_CE_contextp, void **per_transfer_contextp,
633*5113495bSYour Name 			qdf_dma_addr_t *bufferp, unsigned int *nbytesp,
634*5113495bSYour Name 			unsigned int *transfer_idp,
635*5113495bSYour Name 			uint32_t *toeplitz_hash_result);
636*5113495bSYour Name 	QDF_STATUS (*ce_recv_buf_enqueue)(struct CE_handle *copyeng,
637*5113495bSYour Name 					  void *per_recv_context,
638*5113495bSYour Name 					  qdf_dma_addr_t buffer);
639*5113495bSYour Name 	bool (*watermark_int)(struct CE_state *CE_state, unsigned int *flags);
640*5113495bSYour Name 	QDF_STATUS (*ce_completed_recv_next_nolock)(
641*5113495bSYour Name 			struct CE_state *CE_state,
642*5113495bSYour Name 			void **per_CE_contextp,
643*5113495bSYour Name 			void **per_transfer_contextp,
644*5113495bSYour Name 			qdf_dma_addr_t *bufferp,
645*5113495bSYour Name 			unsigned int *nbytesp,
646*5113495bSYour Name 			unsigned int *transfer_idp,
647*5113495bSYour Name 			unsigned int *flagsp);
648*5113495bSYour Name 	QDF_STATUS (*ce_completed_send_next_nolock)(
649*5113495bSYour Name 			struct CE_state *CE_state,
650*5113495bSYour Name 			void **per_CE_contextp,
651*5113495bSYour Name 			void **per_transfer_contextp,
652*5113495bSYour Name 			qdf_dma_addr_t *bufferp,
653*5113495bSYour Name 			unsigned int *nbytesp,
654*5113495bSYour Name 			unsigned int *transfer_idp,
655*5113495bSYour Name 			unsigned int *sw_idx,
656*5113495bSYour Name 			unsigned int *hw_idx,
657*5113495bSYour Name 			uint32_t *toeplitz_hash_result);
658*5113495bSYour Name 	unsigned int (*ce_recv_entries_done_nolock)(struct hif_softc *scn,
659*5113495bSYour Name 			struct CE_state *CE_state);
660*5113495bSYour Name 	unsigned int (*ce_send_entries_done_nolock)(struct hif_softc *scn,
661*5113495bSYour Name 			    struct CE_state *CE_state);
662*5113495bSYour Name 	void (*ce_per_engine_handler_adjust)(struct CE_state *CE_state,
663*5113495bSYour Name 			     int disable_copy_compl_intr);
664*5113495bSYour Name 	void (*ce_prepare_shadow_register_v2_cfg)(struct hif_softc *scn,
665*5113495bSYour Name 			    struct pld_shadow_reg_v2_cfg **shadow_config,
666*5113495bSYour Name 			    int *num_shadow_registers_configured);
667*5113495bSYour Name 	int (*ce_get_index_info)(struct hif_softc *scn, void *ce_state,
668*5113495bSYour Name 				 struct ce_index *info);
669*5113495bSYour Name #ifdef CONFIG_SHADOW_V3
670*5113495bSYour Name 	void (*ce_prepare_shadow_register_v3_cfg)(struct hif_softc *scn,
671*5113495bSYour Name 			    struct pld_shadow_reg_v3_cfg **shadow_config,
672*5113495bSYour Name 			    int *num_shadow_registers_configured);
673*5113495bSYour Name #endif
674*5113495bSYour Name #ifdef FEATURE_DIRECT_LINK
675*5113495bSYour Name 	QDF_STATUS (*ce_set_irq_config_by_ceid)(struct hif_softc *scn,
676*5113495bSYour Name 						uint8_t ce_id, uint64_t addr,
677*5113495bSYour Name 						uint32_t data);
678*5113495bSYour Name 	uint16_t (*ce_get_direct_link_dest_buffers)(struct hif_softc *scn,
679*5113495bSYour Name 						    uint64_t **dma_addr,
680*5113495bSYour Name 						    uint32_t *buf_size);
681*5113495bSYour Name 	QDF_STATUS (*ce_get_direct_link_ring_info)(struct hif_softc *scn,
682*5113495bSYour Name 					   struct hif_direct_link_ce_info *info,
683*5113495bSYour Name 					   uint8_t max_ce_info_len);
684*5113495bSYour Name #endif
685*5113495bSYour Name };
686*5113495bSYour Name 
687*5113495bSYour Name int hif_ce_bus_early_suspend(struct hif_softc *scn);
688*5113495bSYour Name int hif_ce_bus_late_resume(struct hif_softc *scn);
689*5113495bSYour Name 
690*5113495bSYour Name /*
691*5113495bSYour Name  * ce_engine_service_reg:
692*5113495bSYour Name  * @scn: hif_context
693*5113495bSYour Name  * @CE_id: Copy engine ID
694*5113495bSYour Name  *
695*5113495bSYour Name  * Called from ce_per_engine_service and goes through the regular interrupt
696*5113495bSYour Name  * handling that does not involve the WLAN fast path feature.
697*5113495bSYour Name  *
698*5113495bSYour Name  * Returns void
699*5113495bSYour Name  */
700*5113495bSYour Name void ce_engine_service_reg(struct hif_softc *scn, int CE_id);
701*5113495bSYour Name 
702*5113495bSYour Name /**
703*5113495bSYour Name  * ce_per_engine_service_fast() - CE handler routine to service fastpath msgs
704*5113495bSYour Name  * @scn: hif_context
705*5113495bSYour Name  * @ce_id: Copy engine ID
706*5113495bSYour Name  *
707*5113495bSYour Name  * Return: void
708*5113495bSYour Name  */
709*5113495bSYour Name void ce_per_engine_service_fast(struct hif_softc *scn, int ce_id);
710*5113495bSYour Name 
711*5113495bSYour Name void ce_tx_ring_write_idx_update_wrapper(struct CE_handle *ce_tx_hdl,
712*5113495bSYour Name 					int coalesce);
713*5113495bSYour Name 
714*5113495bSYour Name /*
715*5113495bSYour Name  * ce_ring_flush_write_idx() - CE handler to flush write index
716*5113495bSYour Name  * @ce_tx_hdl: ce handle
717*5113495bSYour Name  * @force_flush: force flush the write idx if it set to true.
718*5113495bSYour Name  *
719*5113495bSYour Name  * Returns void
720*5113495bSYour Name  */
721*5113495bSYour Name void ce_flush_tx_ring_write_idx(struct CE_handle *ce_tx_hdl, bool force_flush);
722*5113495bSYour Name #endif /* __COPY_ENGINE_API_H__ */
723