xref: /wlan-driver/qca-wifi-host-cmn/hif/src/ce/ce_service_legacy.c (revision 5113495b16420b49004c444715d2daae2066e7dc)
1*5113495bSYour Name /*
2*5113495bSYour Name  * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
3*5113495bSYour Name  * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4*5113495bSYour Name  *
5*5113495bSYour Name  * Permission to use, copy, modify, and/or distribute this software for
6*5113495bSYour Name  * any purpose with or without fee is hereby granted, provided that the
7*5113495bSYour Name  * above copyright notice and this permission notice appear in all
8*5113495bSYour Name  * copies.
9*5113495bSYour Name  *
10*5113495bSYour Name  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11*5113495bSYour Name  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12*5113495bSYour Name  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13*5113495bSYour Name  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14*5113495bSYour Name  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15*5113495bSYour Name  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16*5113495bSYour Name  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17*5113495bSYour Name  * PERFORMANCE OF THIS SOFTWARE.
18*5113495bSYour Name  */
19*5113495bSYour Name 
20*5113495bSYour Name #include "ce_api.h"
21*5113495bSYour Name #include "ce_internal.h"
22*5113495bSYour Name #include "ce_main.h"
23*5113495bSYour Name #include "ce_reg.h"
24*5113495bSYour Name #include "hif.h"
25*5113495bSYour Name #include "hif_debug.h"
26*5113495bSYour Name #include "hif_io32.h"
27*5113495bSYour Name #include "qdf_lock.h"
28*5113495bSYour Name #include "hif_main.h"
29*5113495bSYour Name #include "hif_napi.h"
30*5113495bSYour Name #include "qdf_module.h"
31*5113495bSYour Name #include "regtable.h"
32*5113495bSYour Name 
33*5113495bSYour Name /*
34*5113495bSYour Name  * Support for Copy Engine hardware, which is mainly used for
35*5113495bSYour Name  * communication between Host and Target over a PCIe interconnect.
36*5113495bSYour Name  */
37*5113495bSYour Name 
38*5113495bSYour Name /*
39*5113495bSYour Name  * A single CopyEngine (CE) comprises two "rings":
40*5113495bSYour Name  *   a source ring
41*5113495bSYour Name  *   a destination ring
42*5113495bSYour Name  *
43*5113495bSYour Name  * Each ring consists of a number of descriptors which specify
44*5113495bSYour Name  * an address, length, and meta-data.
45*5113495bSYour Name  *
46*5113495bSYour Name  * Typically, one side of the PCIe interconnect (Host or Target)
47*5113495bSYour Name  * controls one ring and the other side controls the other ring.
48*5113495bSYour Name  * The source side chooses when to initiate a transfer and it
49*5113495bSYour Name  * chooses what to send (buffer address, length). The destination
50*5113495bSYour Name  * side keeps a supply of "anonymous receive buffers" available and
51*5113495bSYour Name  * it handles incoming data as it arrives (when the destination
52*5113495bSYour Name  * receives an interrupt).
53*5113495bSYour Name  *
54*5113495bSYour Name  * The sender may send a simple buffer (address/length) or it may
55*5113495bSYour Name  * send a small list of buffers.  When a small list is sent, hardware
56*5113495bSYour Name  * "gathers" these and they end up in a single destination buffer
57*5113495bSYour Name  * with a single interrupt.
58*5113495bSYour Name  *
59*5113495bSYour Name  * There are several "contexts" managed by this layer -- more, it
60*5113495bSYour Name  * may seem -- than should be needed. These are provided mainly for
61*5113495bSYour Name  * maximum flexibility and especially to facilitate a simpler HIF
62*5113495bSYour Name  * implementation. There are per-CopyEngine recv, send, and watermark
63*5113495bSYour Name  * contexts. These are supplied by the caller when a recv, send,
64*5113495bSYour Name  * or watermark handler is established and they are echoed back to
65*5113495bSYour Name  * the caller when the respective callbacks are invoked. There is
66*5113495bSYour Name  * also a per-transfer context supplied by the caller when a buffer
67*5113495bSYour Name  * (or sendlist) is sent and when a buffer is enqueued for recv.
68*5113495bSYour Name  * These per-transfer contexts are echoed back to the caller when
69*5113495bSYour Name  * the buffer is sent/received.
70*5113495bSYour Name  * Target TX harsh result toeplitz_hash_result
71*5113495bSYour Name  */
72*5113495bSYour Name 
73*5113495bSYour Name /* NB: Modeled after ce_completed_send_next */
74*5113495bSYour Name /* Shift bits to convert IS_*_RING_*_WATERMARK_MASK to CE_WM_FLAG_*_* */
75*5113495bSYour Name #define CE_WM_SHFT 1
76*5113495bSYour Name 
77*5113495bSYour Name #ifdef WLAN_FEATURE_FASTPATH
78*5113495bSYour Name #ifdef QCA_WIFI_3_0
79*5113495bSYour Name static inline void
ce_buffer_addr_hi_set(struct CE_src_desc * shadow_src_desc,uint64_t dma_addr,uint32_t user_flags)80*5113495bSYour Name ce_buffer_addr_hi_set(struct CE_src_desc *shadow_src_desc,
81*5113495bSYour Name 		      uint64_t dma_addr,
82*5113495bSYour Name 		      uint32_t user_flags)
83*5113495bSYour Name {
84*5113495bSYour Name 	shadow_src_desc->buffer_addr_hi =
85*5113495bSYour Name 		(uint32_t)((dma_addr >> 32) & CE_RING_BASE_ADDR_HIGH_MASK);
86*5113495bSYour Name 	user_flags |= shadow_src_desc->buffer_addr_hi;
87*5113495bSYour Name 	memcpy(&(((uint32_t *)shadow_src_desc)[1]), &user_flags,
88*5113495bSYour Name 	       sizeof(uint32_t));
89*5113495bSYour Name }
90*5113495bSYour Name #else
91*5113495bSYour Name static inline void
ce_buffer_addr_hi_set(struct CE_src_desc * shadow_src_desc,uint64_t dma_addr,uint32_t user_flags)92*5113495bSYour Name ce_buffer_addr_hi_set(struct CE_src_desc *shadow_src_desc,
93*5113495bSYour Name 		      uint64_t dma_addr,
94*5113495bSYour Name 		      uint32_t user_flags)
95*5113495bSYour Name {
96*5113495bSYour Name }
97*5113495bSYour Name #endif
98*5113495bSYour Name 
99*5113495bSYour Name #define SLOTS_PER_DATAPATH_TX 2
100*5113495bSYour Name 
101*5113495bSYour Name /**
102*5113495bSYour Name  * ce_send_fast() - CE layer Tx buffer posting function
103*5113495bSYour Name  * @copyeng: copy engine handle
104*5113495bSYour Name  * @msdu: msdu to be sent
105*5113495bSYour Name  * @transfer_id: transfer_id
106*5113495bSYour Name  * @download_len: packet download length
107*5113495bSYour Name  *
108*5113495bSYour Name  * Assumption : Called with an array of MSDU's
109*5113495bSYour Name  * Function:
110*5113495bSYour Name  * For each msdu in the array
111*5113495bSYour Name  * 1. Check no. of available entries
112*5113495bSYour Name  * 2. Create src ring entries (allocated in consistent memory
113*5113495bSYour Name  * 3. Write index to h/w
114*5113495bSYour Name  *
115*5113495bSYour Name  * Return: No. of packets that could be sent
116*5113495bSYour Name  */
ce_send_fast(struct CE_handle * copyeng,qdf_nbuf_t msdu,unsigned int transfer_id,uint32_t download_len)117*5113495bSYour Name int ce_send_fast(struct CE_handle *copyeng, qdf_nbuf_t msdu,
118*5113495bSYour Name 		 unsigned int transfer_id, uint32_t download_len)
119*5113495bSYour Name {
120*5113495bSYour Name 	struct CE_state *ce_state = (struct CE_state *)copyeng;
121*5113495bSYour Name 	struct hif_softc *scn = ce_state->scn;
122*5113495bSYour Name 	struct CE_ring_state *src_ring = ce_state->src_ring;
123*5113495bSYour Name 	u_int32_t ctrl_addr = ce_state->ctrl_addr;
124*5113495bSYour Name 	unsigned int nentries_mask = src_ring->nentries_mask;
125*5113495bSYour Name 	unsigned int write_index;
126*5113495bSYour Name 	unsigned int sw_index;
127*5113495bSYour Name 	unsigned int frag_len;
128*5113495bSYour Name 	uint64_t dma_addr;
129*5113495bSYour Name 	uint32_t user_flags;
130*5113495bSYour Name 	enum hif_ce_event_type type = FAST_TX_SOFTWARE_INDEX_UPDATE;
131*5113495bSYour Name 	bool ok_to_send = true;
132*5113495bSYour Name 
133*5113495bSYour Name 	/*
134*5113495bSYour Name 	 * Create a log assuming the call will go through, and if not, we would
135*5113495bSYour Name 	 * add an error trace as well.
136*5113495bSYour Name 	 * Please add the same failure log for any additional error paths.
137*5113495bSYour Name 	 */
138*5113495bSYour Name 	DPTRACE(qdf_dp_trace(msdu,
139*5113495bSYour Name 			     QDF_DP_TRACE_CE_FAST_PACKET_PTR_RECORD,
140*5113495bSYour Name 			     QDF_TRACE_DEFAULT_PDEV_ID,
141*5113495bSYour Name 			     qdf_nbuf_data_addr(msdu),
142*5113495bSYour Name 			     sizeof(qdf_nbuf_data(msdu)), QDF_TX));
143*5113495bSYour Name 
144*5113495bSYour Name 	qdf_spin_lock_bh(&ce_state->ce_index_lock);
145*5113495bSYour Name 
146*5113495bSYour Name 	/*
147*5113495bSYour Name 	 * Request runtime PM resume if it has already suspended and make
148*5113495bSYour Name 	 * sure there is no PCIe link access.
149*5113495bSYour Name 	 */
150*5113495bSYour Name 	if (hif_rtpm_get(HIF_RTPM_GET_ASYNC, HIF_RTPM_ID_CE) != 0)
151*5113495bSYour Name 		ok_to_send = false;
152*5113495bSYour Name 
153*5113495bSYour Name 	if (ok_to_send) {
154*5113495bSYour Name 		Q_TARGET_ACCESS_BEGIN(scn);
155*5113495bSYour Name 		DATA_CE_UPDATE_SWINDEX(src_ring->sw_index, scn, ctrl_addr);
156*5113495bSYour Name 	}
157*5113495bSYour Name 
158*5113495bSYour Name 	write_index = src_ring->write_index;
159*5113495bSYour Name 	sw_index = src_ring->sw_index;
160*5113495bSYour Name 	hif_record_ce_desc_event(scn, ce_state->id,
161*5113495bSYour Name 				 FAST_TX_SOFTWARE_INDEX_UPDATE,
162*5113495bSYour Name 				 NULL, NULL, sw_index, 0);
163*5113495bSYour Name 
164*5113495bSYour Name 	if (qdf_unlikely(CE_RING_DELTA(nentries_mask, write_index, sw_index - 1)
165*5113495bSYour Name 			 < SLOTS_PER_DATAPATH_TX)) {
166*5113495bSYour Name 		hif_err_rl("Source ring full, required %d, available %d",
167*5113495bSYour Name 			   SLOTS_PER_DATAPATH_TX,
168*5113495bSYour Name 			   CE_RING_DELTA(nentries_mask, write_index,
169*5113495bSYour Name 					 sw_index - 1));
170*5113495bSYour Name 		OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL);
171*5113495bSYour Name 		if (ok_to_send)
172*5113495bSYour Name 			Q_TARGET_ACCESS_END(scn);
173*5113495bSYour Name 		qdf_spin_unlock_bh(&ce_state->ce_index_lock);
174*5113495bSYour Name 
175*5113495bSYour Name 		DPTRACE(qdf_dp_trace(NULL,
176*5113495bSYour Name 				     QDF_DP_TRACE_CE_FAST_PACKET_ERR_RECORD,
177*5113495bSYour Name 				     QDF_TRACE_DEFAULT_PDEV_ID,
178*5113495bSYour Name 				     NULL, 0, QDF_TX));
179*5113495bSYour Name 
180*5113495bSYour Name 		return 0;
181*5113495bSYour Name 	}
182*5113495bSYour Name 
183*5113495bSYour Name 	{
184*5113495bSYour Name 		struct CE_src_desc *src_ring_base =
185*5113495bSYour Name 			(struct CE_src_desc *)src_ring->base_addr_owner_space;
186*5113495bSYour Name 		struct CE_src_desc *shadow_base =
187*5113495bSYour Name 			(struct CE_src_desc *)src_ring->shadow_base;
188*5113495bSYour Name 		struct CE_src_desc *src_desc =
189*5113495bSYour Name 			CE_SRC_RING_TO_DESC(src_ring_base, write_index);
190*5113495bSYour Name 		struct CE_src_desc *shadow_src_desc =
191*5113495bSYour Name 			CE_SRC_RING_TO_DESC(shadow_base, write_index);
192*5113495bSYour Name 
193*5113495bSYour Name 		/*
194*5113495bSYour Name 		 * First fill out the ring descriptor for the HTC HTT frame
195*5113495bSYour Name 		 * header. These are uncached writes. Should we use a local
196*5113495bSYour Name 		 * structure instead?
197*5113495bSYour Name 		 */
198*5113495bSYour Name 		/* HTT/HTC header can be passed as a argument */
199*5113495bSYour Name 		dma_addr = qdf_nbuf_get_frag_paddr(msdu, 0);
200*5113495bSYour Name 		shadow_src_desc->buffer_addr = (uint32_t)(dma_addr &
201*5113495bSYour Name 							  0xFFFFFFFF);
202*5113495bSYour Name 		user_flags = qdf_nbuf_data_attr_get(msdu) & DESC_DATA_FLAG_MASK;
203*5113495bSYour Name 		ce_buffer_addr_hi_set(shadow_src_desc, dma_addr, user_flags);
204*5113495bSYour Name 			shadow_src_desc->meta_data = transfer_id;
205*5113495bSYour Name 		shadow_src_desc->nbytes = qdf_nbuf_get_frag_len(msdu, 0);
206*5113495bSYour Name 		ce_validate_nbytes(shadow_src_desc->nbytes, ce_state);
207*5113495bSYour Name 		download_len -= shadow_src_desc->nbytes;
208*5113495bSYour Name 		/*
209*5113495bSYour Name 		 * HTC HTT header is a word stream, so byte swap if CE byte
210*5113495bSYour Name 		 * swap enabled
211*5113495bSYour Name 		 */
212*5113495bSYour Name 		shadow_src_desc->byte_swap = ((ce_state->attr_flags &
213*5113495bSYour Name 					CE_ATTR_BYTE_SWAP_DATA) != 0);
214*5113495bSYour Name 		/* For the first one, it still does not need to write */
215*5113495bSYour Name 		shadow_src_desc->gather = 1;
216*5113495bSYour Name 		*src_desc = *shadow_src_desc;
217*5113495bSYour Name 		/* By default we could initialize the transfer context to this
218*5113495bSYour Name 		 * value
219*5113495bSYour Name 		 */
220*5113495bSYour Name 		src_ring->per_transfer_context[write_index] =
221*5113495bSYour Name 			CE_SENDLIST_ITEM_CTXT;
222*5113495bSYour Name 		write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
223*5113495bSYour Name 
224*5113495bSYour Name 		src_desc = CE_SRC_RING_TO_DESC(src_ring_base, write_index);
225*5113495bSYour Name 		shadow_src_desc = CE_SRC_RING_TO_DESC(shadow_base, write_index);
226*5113495bSYour Name 		/*
227*5113495bSYour Name 		 * Now fill out the ring descriptor for the actual data
228*5113495bSYour Name 		 * packet
229*5113495bSYour Name 		 */
230*5113495bSYour Name 		dma_addr = qdf_nbuf_get_frag_paddr(msdu, 1);
231*5113495bSYour Name 		shadow_src_desc->buffer_addr = (uint32_t)(dma_addr &
232*5113495bSYour Name 							  0xFFFFFFFF);
233*5113495bSYour Name 		/*
234*5113495bSYour Name 		 * Clear packet offset for all but the first CE desc.
235*5113495bSYour Name 		 */
236*5113495bSYour Name 		user_flags &= ~CE_DESC_PKT_OFFSET_BIT_M;
237*5113495bSYour Name 		ce_buffer_addr_hi_set(shadow_src_desc, dma_addr, user_flags);
238*5113495bSYour Name 		shadow_src_desc->meta_data = transfer_id;
239*5113495bSYour Name 
240*5113495bSYour Name 		/* get actual packet length */
241*5113495bSYour Name 		frag_len = qdf_nbuf_get_frag_len(msdu, 1);
242*5113495bSYour Name 
243*5113495bSYour Name 		/* download remaining bytes of payload */
244*5113495bSYour Name 		shadow_src_desc->nbytes =  download_len;
245*5113495bSYour Name 		ce_validate_nbytes(shadow_src_desc->nbytes, ce_state);
246*5113495bSYour Name 		if (shadow_src_desc->nbytes > frag_len)
247*5113495bSYour Name 			shadow_src_desc->nbytes = frag_len;
248*5113495bSYour Name 
249*5113495bSYour Name 		/*  Data packet is a byte stream, so disable byte swap */
250*5113495bSYour Name 		shadow_src_desc->byte_swap = 0;
251*5113495bSYour Name 		/* For the last one, gather is not set */
252*5113495bSYour Name 		shadow_src_desc->gather    = 0;
253*5113495bSYour Name 		*src_desc = *shadow_src_desc;
254*5113495bSYour Name 		src_ring->per_transfer_context[write_index] = msdu;
255*5113495bSYour Name 
256*5113495bSYour Name 		hif_record_ce_desc_event(scn, ce_state->id, type,
257*5113495bSYour Name 					 (union ce_desc *)src_desc,
258*5113495bSYour Name 				src_ring->per_transfer_context[write_index],
259*5113495bSYour Name 				write_index, shadow_src_desc->nbytes);
260*5113495bSYour Name 
261*5113495bSYour Name 		write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
262*5113495bSYour Name 
263*5113495bSYour Name 		DPTRACE(qdf_dp_trace(msdu,
264*5113495bSYour Name 				     QDF_DP_TRACE_CE_FAST_PACKET_PTR_RECORD,
265*5113495bSYour Name 				     QDF_TRACE_DEFAULT_PDEV_ID,
266*5113495bSYour Name 				     qdf_nbuf_data_addr(msdu),
267*5113495bSYour Name 				     sizeof(qdf_nbuf_data(msdu)), QDF_TX));
268*5113495bSYour Name 	}
269*5113495bSYour Name 
270*5113495bSYour Name 	src_ring->write_index = write_index;
271*5113495bSYour Name 
272*5113495bSYour Name 	if (ok_to_send) {
273*5113495bSYour Name 		if (qdf_likely(ce_state->state == CE_RUNNING)) {
274*5113495bSYour Name 			type = FAST_TX_WRITE_INDEX_UPDATE;
275*5113495bSYour Name 			war_ce_src_ring_write_idx_set(scn, ctrl_addr,
276*5113495bSYour Name 						      write_index);
277*5113495bSYour Name 			Q_TARGET_ACCESS_END(scn);
278*5113495bSYour Name 		} else {
279*5113495bSYour Name 			ce_state->state = CE_PENDING;
280*5113495bSYour Name 		}
281*5113495bSYour Name 		hif_rtpm_put(HIF_RTPM_PUT_ASYNC, HIF_RTPM_ID_CE);
282*5113495bSYour Name 	}
283*5113495bSYour Name 
284*5113495bSYour Name 	qdf_spin_unlock_bh(&ce_state->ce_index_lock);
285*5113495bSYour Name 
286*5113495bSYour Name 	/* sent 1 packet */
287*5113495bSYour Name 	return 1;
288*5113495bSYour Name }
289*5113495bSYour Name 
290*5113495bSYour Name /**
291*5113495bSYour Name  * ce_fastpath_rx_handle() - Updates write_index and calls fastpath msg handler
292*5113495bSYour Name  * @ce_state: handle to copy engine state
293*5113495bSYour Name  * @cmpl_msdus: Rx msdus
294*5113495bSYour Name  * @num_cmpls: number of Rx msdus
295*5113495bSYour Name  * @ctrl_addr: CE control address
296*5113495bSYour Name  *
297*5113495bSYour Name  * Return: None
298*5113495bSYour Name  */
ce_fastpath_rx_handle(struct CE_state * ce_state,qdf_nbuf_t * cmpl_msdus,uint32_t num_cmpls,uint32_t ctrl_addr)299*5113495bSYour Name static void ce_fastpath_rx_handle(struct CE_state *ce_state,
300*5113495bSYour Name 				  qdf_nbuf_t *cmpl_msdus, uint32_t num_cmpls,
301*5113495bSYour Name 				  uint32_t ctrl_addr)
302*5113495bSYour Name {
303*5113495bSYour Name 	struct hif_softc *scn = ce_state->scn;
304*5113495bSYour Name 	struct CE_ring_state *dest_ring = ce_state->dest_ring;
305*5113495bSYour Name 	uint32_t nentries_mask = dest_ring->nentries_mask;
306*5113495bSYour Name 	uint32_t write_index;
307*5113495bSYour Name 
308*5113495bSYour Name 	qdf_spin_unlock(&ce_state->ce_index_lock);
309*5113495bSYour Name 	ce_state->fastpath_handler(ce_state->context,	cmpl_msdus, num_cmpls);
310*5113495bSYour Name 	qdf_spin_lock(&ce_state->ce_index_lock);
311*5113495bSYour Name 
312*5113495bSYour Name 	/* Update Destination Ring Write Index */
313*5113495bSYour Name 	write_index = dest_ring->write_index;
314*5113495bSYour Name 	write_index = CE_RING_IDX_ADD(nentries_mask, write_index, num_cmpls);
315*5113495bSYour Name 
316*5113495bSYour Name 	hif_record_ce_desc_event(scn, ce_state->id,
317*5113495bSYour Name 				 FAST_RX_WRITE_INDEX_UPDATE,
318*5113495bSYour Name 				 NULL, NULL, write_index, 0);
319*5113495bSYour Name 
320*5113495bSYour Name 	CE_DEST_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
321*5113495bSYour Name 	dest_ring->write_index = write_index;
322*5113495bSYour Name }
323*5113495bSYour Name 
324*5113495bSYour Name /**
325*5113495bSYour Name  * ce_per_engine_service_fast() - CE handler routine to service fastpath msgs
326*5113495bSYour Name  * @scn: hif_context
327*5113495bSYour Name  * @ce_id: Copy engine ID
328*5113495bSYour Name  * 1) Go through the CE ring, and find the completions
329*5113495bSYour Name  * 2) For valid completions retrieve context (nbuf) for per_transfer_context[]
330*5113495bSYour Name  * 3) Unmap buffer & accumulate in an array.
331*5113495bSYour Name  * 4) Call message handler when array is full or when exiting the handler
332*5113495bSYour Name  *
333*5113495bSYour Name  * Return: void
334*5113495bSYour Name  */
335*5113495bSYour Name 
ce_per_engine_service_fast(struct hif_softc * scn,int ce_id)336*5113495bSYour Name void ce_per_engine_service_fast(struct hif_softc *scn, int ce_id)
337*5113495bSYour Name {
338*5113495bSYour Name 	struct CE_state *ce_state = scn->ce_id_to_state[ce_id];
339*5113495bSYour Name 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
340*5113495bSYour Name 	struct CE_ring_state *dest_ring = ce_state->dest_ring;
341*5113495bSYour Name 	struct CE_dest_desc *dest_ring_base =
342*5113495bSYour Name 		(struct CE_dest_desc *)dest_ring->base_addr_owner_space;
343*5113495bSYour Name 
344*5113495bSYour Name 	uint32_t nentries_mask = dest_ring->nentries_mask;
345*5113495bSYour Name 	uint32_t sw_index = dest_ring->sw_index;
346*5113495bSYour Name 	uint32_t nbytes;
347*5113495bSYour Name 	qdf_nbuf_t nbuf;
348*5113495bSYour Name 	dma_addr_t paddr;
349*5113495bSYour Name 	struct CE_dest_desc *dest_desc;
350*5113495bSYour Name 	qdf_nbuf_t cmpl_msdus[MSG_FLUSH_NUM];
351*5113495bSYour Name 	uint32_t ctrl_addr = ce_state->ctrl_addr;
352*5113495bSYour Name 	uint32_t nbuf_cmpl_idx = 0;
353*5113495bSYour Name 	unsigned int more_comp_cnt = 0;
354*5113495bSYour Name 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
355*5113495bSYour Name 	struct ce_ops *ce_services = hif_state->ce_services;
356*5113495bSYour Name 
357*5113495bSYour Name more_data:
358*5113495bSYour Name 	for (;;) {
359*5113495bSYour Name 		dest_desc = CE_DEST_RING_TO_DESC(dest_ring_base,
360*5113495bSYour Name 						 sw_index);
361*5113495bSYour Name 
362*5113495bSYour Name 		/*
363*5113495bSYour Name 		 * The following 2 reads are from non-cached memory
364*5113495bSYour Name 		 */
365*5113495bSYour Name 		nbytes = dest_desc->nbytes;
366*5113495bSYour Name 
367*5113495bSYour Name 		/* If completion is invalid, break */
368*5113495bSYour Name 		if (qdf_unlikely(nbytes == 0))
369*5113495bSYour Name 			break;
370*5113495bSYour Name 
371*5113495bSYour Name 		/*
372*5113495bSYour Name 		 * Build the nbuf list from valid completions
373*5113495bSYour Name 		 */
374*5113495bSYour Name 		nbuf = dest_ring->per_transfer_context[sw_index];
375*5113495bSYour Name 
376*5113495bSYour Name 		/*
377*5113495bSYour Name 		 * No lock is needed here, since this is the only thread
378*5113495bSYour Name 		 * that accesses the sw_index
379*5113495bSYour Name 		 */
380*5113495bSYour Name 		sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
381*5113495bSYour Name 
382*5113495bSYour Name 		/*
383*5113495bSYour Name 		 * CAREFUL : Uncached write, but still less expensive,
384*5113495bSYour Name 		 * since most modern caches use "write-combining" to
385*5113495bSYour Name 		 * flush multiple cache-writes all at once.
386*5113495bSYour Name 		 */
387*5113495bSYour Name 		dest_desc->nbytes = 0;
388*5113495bSYour Name 
389*5113495bSYour Name 		/*
390*5113495bSYour Name 		 * Per our understanding this is not required on our
391*5113495bSYour Name 		 * since we are doing the same cache invalidation
392*5113495bSYour Name 		 * operation on the same buffer twice in succession,
393*5113495bSYour Name 		 * without any modifiication to this buffer by CPU in
394*5113495bSYour Name 		 * between.
395*5113495bSYour Name 		 * However, this code with 2 syncs in succession has
396*5113495bSYour Name 		 * been undergoing some testing at a customer site,
397*5113495bSYour Name 		 * and seemed to be showing no problems so far. Would
398*5113495bSYour Name 		 * like to validate from the customer, that this line
399*5113495bSYour Name 		 * is really not required, before we remove this line
400*5113495bSYour Name 		 * completely.
401*5113495bSYour Name 		 */
402*5113495bSYour Name 		paddr = QDF_NBUF_CB_PADDR(nbuf);
403*5113495bSYour Name 
404*5113495bSYour Name 		qdf_mem_dma_sync_single_for_cpu(scn->qdf_dev, paddr,
405*5113495bSYour Name 						(skb_end_pointer(nbuf) -
406*5113495bSYour Name 						(nbuf)->data),
407*5113495bSYour Name 						DMA_FROM_DEVICE);
408*5113495bSYour Name 
409*5113495bSYour Name 		qdf_nbuf_put_tail(nbuf, nbytes);
410*5113495bSYour Name 
411*5113495bSYour Name 		qdf_assert_always(nbuf->data);
412*5113495bSYour Name 
413*5113495bSYour Name 		QDF_NBUF_CB_RX_CTX_ID(nbuf) =
414*5113495bSYour Name 				hif_get_rx_ctx_id(ce_state->id, hif_hdl);
415*5113495bSYour Name 		cmpl_msdus[nbuf_cmpl_idx++] = nbuf;
416*5113495bSYour Name 
417*5113495bSYour Name 		/*
418*5113495bSYour Name 		 * we are not posting the buffers back instead
419*5113495bSYour Name 		 * reusing the buffers
420*5113495bSYour Name 		 */
421*5113495bSYour Name 		if (nbuf_cmpl_idx == scn->ce_service_max_rx_ind_flush) {
422*5113495bSYour Name 			hif_record_ce_desc_event(scn, ce_state->id,
423*5113495bSYour Name 						 FAST_RX_SOFTWARE_INDEX_UPDATE,
424*5113495bSYour Name 						 NULL, NULL, sw_index, 0);
425*5113495bSYour Name 			dest_ring->sw_index = sw_index;
426*5113495bSYour Name 			ce_fastpath_rx_handle(ce_state, cmpl_msdus,
427*5113495bSYour Name 					      nbuf_cmpl_idx, ctrl_addr);
428*5113495bSYour Name 
429*5113495bSYour Name 			ce_state->receive_count += nbuf_cmpl_idx;
430*5113495bSYour Name 			if (qdf_unlikely(hif_ce_service_should_yield(
431*5113495bSYour Name 						scn, ce_state))) {
432*5113495bSYour Name 				ce_state->force_break = 1;
433*5113495bSYour Name 				qdf_atomic_set(&ce_state->rx_pending, 1);
434*5113495bSYour Name 				return;
435*5113495bSYour Name 			}
436*5113495bSYour Name 
437*5113495bSYour Name 			nbuf_cmpl_idx = 0;
438*5113495bSYour Name 			more_comp_cnt = 0;
439*5113495bSYour Name 		}
440*5113495bSYour Name 	}
441*5113495bSYour Name 
442*5113495bSYour Name 	hif_record_ce_desc_event(scn, ce_state->id,
443*5113495bSYour Name 				 FAST_RX_SOFTWARE_INDEX_UPDATE,
444*5113495bSYour Name 				 NULL, NULL, sw_index, 0);
445*5113495bSYour Name 
446*5113495bSYour Name 	dest_ring->sw_index = sw_index;
447*5113495bSYour Name 
448*5113495bSYour Name 	/*
449*5113495bSYour Name 	 * If there are not enough completions to fill the array,
450*5113495bSYour Name 	 * just call the message handler here
451*5113495bSYour Name 	 */
452*5113495bSYour Name 	if (nbuf_cmpl_idx) {
453*5113495bSYour Name 		ce_fastpath_rx_handle(ce_state, cmpl_msdus,
454*5113495bSYour Name 				      nbuf_cmpl_idx, ctrl_addr);
455*5113495bSYour Name 
456*5113495bSYour Name 		ce_state->receive_count += nbuf_cmpl_idx;
457*5113495bSYour Name 		if (qdf_unlikely(hif_ce_service_should_yield(scn, ce_state))) {
458*5113495bSYour Name 			ce_state->force_break = 1;
459*5113495bSYour Name 			qdf_atomic_set(&ce_state->rx_pending, 1);
460*5113495bSYour Name 			return;
461*5113495bSYour Name 		}
462*5113495bSYour Name 
463*5113495bSYour Name 		/* check for more packets after upper layer processing */
464*5113495bSYour Name 		nbuf_cmpl_idx = 0;
465*5113495bSYour Name 		more_comp_cnt = 0;
466*5113495bSYour Name 		goto more_data;
467*5113495bSYour Name 	}
468*5113495bSYour Name 
469*5113495bSYour Name 	hif_update_napi_max_poll_time(ce_state, ce_id, qdf_get_cpu());
470*5113495bSYour Name 
471*5113495bSYour Name 	qdf_atomic_set(&ce_state->rx_pending, 0);
472*5113495bSYour Name 	if (TARGET_REGISTER_ACCESS_ALLOWED(scn)) {
473*5113495bSYour Name 		if (!ce_state->msi_supported)
474*5113495bSYour Name 			CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr,
475*5113495bSYour Name 						   HOST_IS_COPY_COMPLETE_MASK);
476*5113495bSYour Name 	} else {
477*5113495bSYour Name 		hif_err_rl("Target access is not allowed");
478*5113495bSYour Name 		return;
479*5113495bSYour Name 	}
480*5113495bSYour Name 
481*5113495bSYour Name 	if (ce_services->ce_recv_entries_done_nolock(scn, ce_state)) {
482*5113495bSYour Name 		if (more_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
483*5113495bSYour Name 			goto more_data;
484*5113495bSYour Name 		} else {
485*5113495bSYour Name 			hif_err("Potential infinite loop detected during Rx processing nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x",
486*5113495bSYour Name 				  nentries_mask,
487*5113495bSYour Name 				  ce_state->dest_ring->sw_index,
488*5113495bSYour Name 				  CE_DEST_RING_READ_IDX_GET(scn, ctrl_addr));
489*5113495bSYour Name 		}
490*5113495bSYour Name 	}
491*5113495bSYour Name #ifdef NAPI_YIELD_BUDGET_BASED
492*5113495bSYour Name 	/*
493*5113495bSYour Name 	 * Caution : Before you modify this code, please refer hif_napi_poll
494*5113495bSYour Name 	 * function to understand how napi_complete gets called and make the
495*5113495bSYour Name 	 * necessary changes. Force break has to be done till WIN disables the
496*5113495bSYour Name 	 * interrupt at source
497*5113495bSYour Name 	 */
498*5113495bSYour Name 	ce_state->force_break = 1;
499*5113495bSYour Name #endif
500*5113495bSYour Name }
501*5113495bSYour Name 
502*5113495bSYour Name /**
503*5113495bSYour Name  * ce_is_fastpath_enabled() - returns true if fastpath mode is enabled
504*5113495bSYour Name  * @scn: Handle to HIF context
505*5113495bSYour Name  *
506*5113495bSYour Name  * Return: true if fastpath is enabled else false.
507*5113495bSYour Name  */
ce_is_fastpath_enabled(struct hif_softc * scn)508*5113495bSYour Name static inline bool ce_is_fastpath_enabled(struct hif_softc *scn)
509*5113495bSYour Name {
510*5113495bSYour Name 	return scn->fastpath_mode_on;
511*5113495bSYour Name }
512*5113495bSYour Name #else
ce_per_engine_service_fast(struct hif_softc * scn,int ce_id)513*5113495bSYour Name void ce_per_engine_service_fast(struct hif_softc *scn, int ce_id)
514*5113495bSYour Name {
515*5113495bSYour Name }
516*5113495bSYour Name 
ce_is_fastpath_enabled(struct hif_softc * scn)517*5113495bSYour Name static inline bool ce_is_fastpath_enabled(struct hif_softc *scn)
518*5113495bSYour Name {
519*5113495bSYour Name 	return false;
520*5113495bSYour Name }
521*5113495bSYour Name #endif /* WLAN_FEATURE_FASTPATH */
522*5113495bSYour Name 
523*5113495bSYour Name static QDF_STATUS
ce_send_nolock_legacy(struct CE_handle * copyeng,void * per_transfer_context,qdf_dma_addr_t buffer,uint32_t nbytes,uint32_t transfer_id,uint32_t flags,uint32_t user_flags)524*5113495bSYour Name ce_send_nolock_legacy(struct CE_handle *copyeng,
525*5113495bSYour Name 		      void *per_transfer_context,
526*5113495bSYour Name 		      qdf_dma_addr_t buffer,
527*5113495bSYour Name 		      uint32_t nbytes,
528*5113495bSYour Name 		      uint32_t transfer_id,
529*5113495bSYour Name 		      uint32_t flags,
530*5113495bSYour Name 		      uint32_t user_flags)
531*5113495bSYour Name {
532*5113495bSYour Name 	QDF_STATUS status;
533*5113495bSYour Name 	struct CE_state *CE_state = (struct CE_state *)copyeng;
534*5113495bSYour Name 	struct CE_ring_state *src_ring = CE_state->src_ring;
535*5113495bSYour Name 	uint32_t ctrl_addr = CE_state->ctrl_addr;
536*5113495bSYour Name 	unsigned int nentries_mask = src_ring->nentries_mask;
537*5113495bSYour Name 	unsigned int sw_index = src_ring->sw_index;
538*5113495bSYour Name 	unsigned int write_index = src_ring->write_index;
539*5113495bSYour Name 	uint64_t dma_addr = buffer;
540*5113495bSYour Name 	struct hif_softc *scn = CE_state->scn;
541*5113495bSYour Name 
542*5113495bSYour Name 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
543*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
544*5113495bSYour Name 	if (unlikely(CE_RING_DELTA(nentries_mask,
545*5113495bSYour Name 				   write_index, sw_index - 1) <= 0)) {
546*5113495bSYour Name 		OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL);
547*5113495bSYour Name 		Q_TARGET_ACCESS_END(scn);
548*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
549*5113495bSYour Name 	}
550*5113495bSYour Name 	{
551*5113495bSYour Name 		enum hif_ce_event_type event_type;
552*5113495bSYour Name 		struct CE_src_desc *src_ring_base =
553*5113495bSYour Name 			(struct CE_src_desc *)src_ring->base_addr_owner_space;
554*5113495bSYour Name 		struct CE_src_desc *shadow_base =
555*5113495bSYour Name 			(struct CE_src_desc *)src_ring->shadow_base;
556*5113495bSYour Name 		struct CE_src_desc *src_desc =
557*5113495bSYour Name 			CE_SRC_RING_TO_DESC(src_ring_base, write_index);
558*5113495bSYour Name 		struct CE_src_desc *shadow_src_desc =
559*5113495bSYour Name 			CE_SRC_RING_TO_DESC(shadow_base, write_index);
560*5113495bSYour Name 
561*5113495bSYour Name 		/* Update low 32 bits source descriptor address */
562*5113495bSYour Name 		shadow_src_desc->buffer_addr =
563*5113495bSYour Name 			(uint32_t)(dma_addr & 0xFFFFFFFF);
564*5113495bSYour Name 
565*5113495bSYour Name #ifdef QCA_WIFI_3_0
566*5113495bSYour Name 		shadow_src_desc->buffer_addr_hi =
567*5113495bSYour Name 			(uint32_t)((dma_addr >> 32) &
568*5113495bSYour Name 				   CE_RING_BASE_ADDR_HIGH_MASK);
569*5113495bSYour Name 		user_flags |= shadow_src_desc->buffer_addr_hi;
570*5113495bSYour Name 		memcpy(&(((uint32_t *)shadow_src_desc)[1]), &user_flags,
571*5113495bSYour Name 		       sizeof(uint32_t));
572*5113495bSYour Name #endif
573*5113495bSYour Name 		shadow_src_desc->target_int_disable = 0;
574*5113495bSYour Name 		shadow_src_desc->host_int_disable = 0;
575*5113495bSYour Name 
576*5113495bSYour Name 		shadow_src_desc->meta_data = transfer_id;
577*5113495bSYour Name 
578*5113495bSYour Name 		/*
579*5113495bSYour Name 		 * Set the swap bit if:
580*5113495bSYour Name 		 * typical sends on this CE are swapped (host is big-endian)
581*5113495bSYour Name 		 * and this send doesn't disable the swapping
582*5113495bSYour Name 		 * (data is not bytestream)
583*5113495bSYour Name 		 */
584*5113495bSYour Name 		shadow_src_desc->byte_swap =
585*5113495bSYour Name 			(((CE_state->attr_flags & CE_ATTR_BYTE_SWAP_DATA)
586*5113495bSYour Name 			 != 0) & ((flags & CE_SEND_FLAG_SWAP_DISABLE) == 0));
587*5113495bSYour Name 		shadow_src_desc->gather = ((flags & CE_SEND_FLAG_GATHER) != 0);
588*5113495bSYour Name 		shadow_src_desc->nbytes = nbytes;
589*5113495bSYour Name 		ce_validate_nbytes(nbytes, CE_state);
590*5113495bSYour Name 
591*5113495bSYour Name 		*src_desc = *shadow_src_desc;
592*5113495bSYour Name 
593*5113495bSYour Name 		src_ring->per_transfer_context[write_index] =
594*5113495bSYour Name 			per_transfer_context;
595*5113495bSYour Name 
596*5113495bSYour Name 		/* Update Source Ring Write Index */
597*5113495bSYour Name 		write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
598*5113495bSYour Name 
599*5113495bSYour Name 		/* WORKAROUND */
600*5113495bSYour Name 		if (shadow_src_desc->gather) {
601*5113495bSYour Name 			event_type = HIF_TX_GATHER_DESC_POST;
602*5113495bSYour Name 		} else if (qdf_unlikely(CE_state->state != CE_RUNNING)) {
603*5113495bSYour Name 			event_type = HIF_TX_DESC_SOFTWARE_POST;
604*5113495bSYour Name 			CE_state->state = CE_PENDING;
605*5113495bSYour Name 		} else {
606*5113495bSYour Name 			event_type = HIF_TX_DESC_POST;
607*5113495bSYour Name 			war_ce_src_ring_write_idx_set(scn, ctrl_addr,
608*5113495bSYour Name 						      write_index);
609*5113495bSYour Name 		}
610*5113495bSYour Name 
611*5113495bSYour Name 		/* src_ring->write index hasn't been updated event though
612*5113495bSYour Name 		 * the register has already been written to.
613*5113495bSYour Name 		 */
614*5113495bSYour Name 		hif_record_ce_desc_event(scn, CE_state->id, event_type,
615*5113495bSYour Name 			(union ce_desc *)shadow_src_desc, per_transfer_context,
616*5113495bSYour Name 			src_ring->write_index, nbytes);
617*5113495bSYour Name 
618*5113495bSYour Name 		src_ring->write_index = write_index;
619*5113495bSYour Name 		status = QDF_STATUS_SUCCESS;
620*5113495bSYour Name 	}
621*5113495bSYour Name 	Q_TARGET_ACCESS_END(scn);
622*5113495bSYour Name 	return status;
623*5113495bSYour Name }
624*5113495bSYour Name 
625*5113495bSYour Name static QDF_STATUS
ce_sendlist_send_legacy(struct CE_handle * copyeng,void * per_transfer_context,struct ce_sendlist * sendlist,unsigned int transfer_id)626*5113495bSYour Name ce_sendlist_send_legacy(struct CE_handle *copyeng,
627*5113495bSYour Name 			void *per_transfer_context,
628*5113495bSYour Name 			struct ce_sendlist *sendlist, unsigned int transfer_id)
629*5113495bSYour Name {
630*5113495bSYour Name 	QDF_STATUS status = QDF_STATUS_E_NOMEM;
631*5113495bSYour Name 	struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
632*5113495bSYour Name 	struct CE_state *CE_state = (struct CE_state *)copyeng;
633*5113495bSYour Name 	struct CE_ring_state *src_ring = CE_state->src_ring;
634*5113495bSYour Name 	unsigned int nentries_mask = src_ring->nentries_mask;
635*5113495bSYour Name 	unsigned int num_items = sl->num_items;
636*5113495bSYour Name 	unsigned int sw_index;
637*5113495bSYour Name 	unsigned int write_index;
638*5113495bSYour Name 	struct hif_softc *scn = CE_state->scn;
639*5113495bSYour Name 
640*5113495bSYour Name 	QDF_ASSERT((num_items > 0) && (num_items < src_ring->nentries));
641*5113495bSYour Name 
642*5113495bSYour Name 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
643*5113495bSYour Name 
644*5113495bSYour Name 	if (CE_state->scn->fastpath_mode_on && CE_state->htt_tx_data &&
645*5113495bSYour Name 	    Q_TARGET_ACCESS_BEGIN(scn) == 0) {
646*5113495bSYour Name 		src_ring->sw_index = CE_SRC_RING_READ_IDX_GET_FROM_DDR(
647*5113495bSYour Name 					       scn, CE_state->ctrl_addr);
648*5113495bSYour Name 		Q_TARGET_ACCESS_END(scn);
649*5113495bSYour Name 	}
650*5113495bSYour Name 
651*5113495bSYour Name 	sw_index = src_ring->sw_index;
652*5113495bSYour Name 	write_index = src_ring->write_index;
653*5113495bSYour Name 
654*5113495bSYour Name 	if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) >=
655*5113495bSYour Name 	    num_items) {
656*5113495bSYour Name 		struct ce_sendlist_item *item;
657*5113495bSYour Name 		int i;
658*5113495bSYour Name 
659*5113495bSYour Name 		/* handle all but the last item uniformly */
660*5113495bSYour Name 		for (i = 0; i < num_items - 1; i++) {
661*5113495bSYour Name 			item = &sl->item[i];
662*5113495bSYour Name 			/* TBDXXX: Support extensible sendlist_types? */
663*5113495bSYour Name 			QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
664*5113495bSYour Name 			status = ce_send_nolock_legacy(copyeng,
665*5113495bSYour Name 				CE_SENDLIST_ITEM_CTXT,
666*5113495bSYour Name 				(qdf_dma_addr_t)item->data,
667*5113495bSYour Name 				item->u.nbytes, transfer_id,
668*5113495bSYour Name 				item->flags | CE_SEND_FLAG_GATHER,
669*5113495bSYour Name 				item->user_flags);
670*5113495bSYour Name 			QDF_ASSERT(status == QDF_STATUS_SUCCESS);
671*5113495bSYour Name 		}
672*5113495bSYour Name 		/* provide valid context pointer for final item */
673*5113495bSYour Name 		item = &sl->item[i];
674*5113495bSYour Name 		/* TBDXXX: Support extensible sendlist_types? */
675*5113495bSYour Name 		QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
676*5113495bSYour Name 		status = ce_send_nolock_legacy(copyeng, per_transfer_context,
677*5113495bSYour Name 					       (qdf_dma_addr_t) item->data,
678*5113495bSYour Name 					       item->u.nbytes,
679*5113495bSYour Name 					       transfer_id, item->flags,
680*5113495bSYour Name 					       item->user_flags);
681*5113495bSYour Name 		QDF_ASSERT(status == QDF_STATUS_SUCCESS);
682*5113495bSYour Name 		QDF_NBUF_UPDATE_TX_PKT_COUNT((qdf_nbuf_t)per_transfer_context,
683*5113495bSYour Name 					     QDF_NBUF_TX_PKT_CE);
684*5113495bSYour Name 		DPTRACE(qdf_dp_trace((qdf_nbuf_t)per_transfer_context,
685*5113495bSYour Name 			QDF_DP_TRACE_CE_PACKET_PTR_RECORD,
686*5113495bSYour Name 			QDF_TRACE_DEFAULT_PDEV_ID,
687*5113495bSYour Name 			(uint8_t *)&(((qdf_nbuf_t)per_transfer_context)->data),
688*5113495bSYour Name 			sizeof(((qdf_nbuf_t)per_transfer_context)->data),
689*5113495bSYour Name 			QDF_TX));
690*5113495bSYour Name 	} else {
691*5113495bSYour Name 		/*
692*5113495bSYour Name 		 * Probably not worth the additional complexity to support
693*5113495bSYour Name 		 * partial sends with continuation or notification.  We expect
694*5113495bSYour Name 		 * to use large rings and small sendlists. If we can't handle
695*5113495bSYour Name 		 * the entire request at once, punt it back to the caller.
696*5113495bSYour Name 		 */
697*5113495bSYour Name 	}
698*5113495bSYour Name 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
699*5113495bSYour Name 
700*5113495bSYour Name 	return status;
701*5113495bSYour Name }
702*5113495bSYour Name 
703*5113495bSYour Name /**
704*5113495bSYour Name  * ce_recv_buf_enqueue_legacy() - enqueue a recv buffer into a copy engine
705*5113495bSYour Name  * @copyeng: copy engine handle
706*5113495bSYour Name  * @per_recv_context: virtual address of the nbuf
707*5113495bSYour Name  * @buffer: physical address of the nbuf
708*5113495bSYour Name  *
709*5113495bSYour Name  * Return: QDF_STATUS_SUCCESS if the buffer is enqueued
710*5113495bSYour Name  */
711*5113495bSYour Name static QDF_STATUS
ce_recv_buf_enqueue_legacy(struct CE_handle * copyeng,void * per_recv_context,qdf_dma_addr_t buffer)712*5113495bSYour Name ce_recv_buf_enqueue_legacy(struct CE_handle *copyeng,
713*5113495bSYour Name 			   void *per_recv_context, qdf_dma_addr_t buffer)
714*5113495bSYour Name {
715*5113495bSYour Name 	QDF_STATUS status;
716*5113495bSYour Name 	struct CE_state *CE_state = (struct CE_state *)copyeng;
717*5113495bSYour Name 	struct CE_ring_state *dest_ring = CE_state->dest_ring;
718*5113495bSYour Name 	uint32_t ctrl_addr = CE_state->ctrl_addr;
719*5113495bSYour Name 	unsigned int nentries_mask = dest_ring->nentries_mask;
720*5113495bSYour Name 	unsigned int write_index;
721*5113495bSYour Name 	unsigned int sw_index;
722*5113495bSYour Name 	uint64_t dma_addr = buffer;
723*5113495bSYour Name 	struct hif_softc *scn = CE_state->scn;
724*5113495bSYour Name 
725*5113495bSYour Name 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
726*5113495bSYour Name 	write_index = dest_ring->write_index;
727*5113495bSYour Name 	sw_index = dest_ring->sw_index;
728*5113495bSYour Name 
729*5113495bSYour Name 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0) {
730*5113495bSYour Name 		qdf_spin_unlock_bh(&CE_state->ce_index_lock);
731*5113495bSYour Name 		return QDF_STATUS_E_IO;
732*5113495bSYour Name 	}
733*5113495bSYour Name 
734*5113495bSYour Name 	if ((CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) ||
735*5113495bSYour Name 	    (ce_is_fastpath_enabled(scn) && CE_state->htt_rx_data)) {
736*5113495bSYour Name 		struct CE_dest_desc *dest_ring_base =
737*5113495bSYour Name 			(struct CE_dest_desc *)dest_ring->base_addr_owner_space;
738*5113495bSYour Name 		struct CE_dest_desc *dest_desc =
739*5113495bSYour Name 			CE_DEST_RING_TO_DESC(dest_ring_base, write_index);
740*5113495bSYour Name 
741*5113495bSYour Name 		/* Update low 32 bit destination descriptor */
742*5113495bSYour Name 		dest_desc->buffer_addr = (uint32_t)(dma_addr & 0xFFFFFFFF);
743*5113495bSYour Name #ifdef QCA_WIFI_3_0
744*5113495bSYour Name 		dest_desc->buffer_addr_hi =
745*5113495bSYour Name 			(uint32_t)((dma_addr >> 32) &
746*5113495bSYour Name 				   CE_RING_BASE_ADDR_HIGH_MASK);
747*5113495bSYour Name #endif
748*5113495bSYour Name 		dest_desc->nbytes = 0;
749*5113495bSYour Name 
750*5113495bSYour Name 		dest_ring->per_transfer_context[write_index] =
751*5113495bSYour Name 			per_recv_context;
752*5113495bSYour Name 
753*5113495bSYour Name 		hif_record_ce_desc_event(scn, CE_state->id,
754*5113495bSYour Name 					 HIF_RX_DESC_POST,
755*5113495bSYour Name 					 (union ce_desc *)dest_desc,
756*5113495bSYour Name 					 per_recv_context,
757*5113495bSYour Name 					 write_index, 0);
758*5113495bSYour Name 
759*5113495bSYour Name 		/* Update Destination Ring Write Index */
760*5113495bSYour Name 		write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
761*5113495bSYour Name 		if (write_index != sw_index) {
762*5113495bSYour Name 			CE_DEST_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
763*5113495bSYour Name 			dest_ring->write_index = write_index;
764*5113495bSYour Name 		}
765*5113495bSYour Name 		status = QDF_STATUS_SUCCESS;
766*5113495bSYour Name 	} else
767*5113495bSYour Name 		status = QDF_STATUS_E_FAILURE;
768*5113495bSYour Name 
769*5113495bSYour Name 	Q_TARGET_ACCESS_END(scn);
770*5113495bSYour Name 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
771*5113495bSYour Name 	return status;
772*5113495bSYour Name }
773*5113495bSYour Name 
774*5113495bSYour Name static unsigned int
ce_send_entries_done_nolock_legacy(struct hif_softc * scn,struct CE_state * CE_state)775*5113495bSYour Name ce_send_entries_done_nolock_legacy(struct hif_softc *scn,
776*5113495bSYour Name 				   struct CE_state *CE_state)
777*5113495bSYour Name {
778*5113495bSYour Name 	struct CE_ring_state *src_ring = CE_state->src_ring;
779*5113495bSYour Name 	uint32_t ctrl_addr = CE_state->ctrl_addr;
780*5113495bSYour Name 	unsigned int nentries_mask = src_ring->nentries_mask;
781*5113495bSYour Name 	unsigned int sw_index;
782*5113495bSYour Name 	unsigned int read_index;
783*5113495bSYour Name 
784*5113495bSYour Name 	sw_index = src_ring->sw_index;
785*5113495bSYour Name 	read_index = CE_SRC_RING_READ_IDX_GET(scn, ctrl_addr);
786*5113495bSYour Name 
787*5113495bSYour Name 	return CE_RING_DELTA(nentries_mask, sw_index, read_index);
788*5113495bSYour Name }
789*5113495bSYour Name 
790*5113495bSYour Name static unsigned int
ce_recv_entries_done_nolock_legacy(struct hif_softc * scn,struct CE_state * CE_state)791*5113495bSYour Name ce_recv_entries_done_nolock_legacy(struct hif_softc *scn,
792*5113495bSYour Name 				   struct CE_state *CE_state)
793*5113495bSYour Name {
794*5113495bSYour Name 	struct CE_ring_state *dest_ring = CE_state->dest_ring;
795*5113495bSYour Name 	uint32_t ctrl_addr = CE_state->ctrl_addr;
796*5113495bSYour Name 	unsigned int nentries_mask = dest_ring->nentries_mask;
797*5113495bSYour Name 	unsigned int sw_index;
798*5113495bSYour Name 	unsigned int read_index;
799*5113495bSYour Name 
800*5113495bSYour Name 	sw_index = dest_ring->sw_index;
801*5113495bSYour Name 	read_index = CE_DEST_RING_READ_IDX_GET(scn, ctrl_addr);
802*5113495bSYour Name 
803*5113495bSYour Name 	return CE_RING_DELTA(nentries_mask, sw_index, read_index);
804*5113495bSYour Name }
805*5113495bSYour Name 
806*5113495bSYour Name static QDF_STATUS
ce_completed_recv_next_nolock_legacy(struct CE_state * CE_state,void ** per_CE_contextp,void ** per_transfer_contextp,qdf_dma_addr_t * bufferp,unsigned int * nbytesp,unsigned int * transfer_idp,unsigned int * flagsp)807*5113495bSYour Name ce_completed_recv_next_nolock_legacy(struct CE_state *CE_state,
808*5113495bSYour Name 				     void **per_CE_contextp,
809*5113495bSYour Name 				     void **per_transfer_contextp,
810*5113495bSYour Name 				     qdf_dma_addr_t *bufferp,
811*5113495bSYour Name 				     unsigned int *nbytesp,
812*5113495bSYour Name 				     unsigned int *transfer_idp,
813*5113495bSYour Name 				     unsigned int *flagsp)
814*5113495bSYour Name {
815*5113495bSYour Name 	QDF_STATUS status;
816*5113495bSYour Name 	struct CE_ring_state *dest_ring = CE_state->dest_ring;
817*5113495bSYour Name 	unsigned int nentries_mask = dest_ring->nentries_mask;
818*5113495bSYour Name 	unsigned int sw_index = dest_ring->sw_index;
819*5113495bSYour Name 	struct hif_softc *scn = CE_state->scn;
820*5113495bSYour Name 	struct CE_dest_desc *dest_ring_base =
821*5113495bSYour Name 		(struct CE_dest_desc *)dest_ring->base_addr_owner_space;
822*5113495bSYour Name 	struct CE_dest_desc *dest_desc =
823*5113495bSYour Name 		CE_DEST_RING_TO_DESC(dest_ring_base, sw_index);
824*5113495bSYour Name 	int nbytes;
825*5113495bSYour Name 	struct CE_dest_desc dest_desc_info;
826*5113495bSYour Name 	/*
827*5113495bSYour Name 	 * By copying the dest_desc_info element to local memory, we could
828*5113495bSYour Name 	 * avoid extra memory read from non-cachable memory.
829*5113495bSYour Name 	 */
830*5113495bSYour Name 	dest_desc_info =  *dest_desc;
831*5113495bSYour Name 	nbytes = dest_desc_info.nbytes;
832*5113495bSYour Name 	if (nbytes == 0) {
833*5113495bSYour Name 		/*
834*5113495bSYour Name 		 * This closes a relatively unusual race where the Host
835*5113495bSYour Name 		 * sees the updated DRRI before the update to the
836*5113495bSYour Name 		 * corresponding descriptor has completed. We treat this
837*5113495bSYour Name 		 * as a descriptor that is not yet done.
838*5113495bSYour Name 		 */
839*5113495bSYour Name 		status = QDF_STATUS_E_FAILURE;
840*5113495bSYour Name 		goto done;
841*5113495bSYour Name 	}
842*5113495bSYour Name 
843*5113495bSYour Name 	hif_record_ce_desc_event(scn, CE_state->id, HIF_RX_DESC_COMPLETION,
844*5113495bSYour Name 				 (union ce_desc *)dest_desc,
845*5113495bSYour Name 				 dest_ring->per_transfer_context[sw_index],
846*5113495bSYour Name 				 sw_index, 0);
847*5113495bSYour Name 
848*5113495bSYour Name 	dest_desc->nbytes = 0;
849*5113495bSYour Name 
850*5113495bSYour Name 	/* Return data from completed destination descriptor */
851*5113495bSYour Name 	*bufferp = HIF_CE_DESC_ADDR_TO_DMA(&dest_desc_info);
852*5113495bSYour Name 	*nbytesp = nbytes;
853*5113495bSYour Name 	*transfer_idp = dest_desc_info.meta_data;
854*5113495bSYour Name 	*flagsp = (dest_desc_info.byte_swap) ? CE_RECV_FLAG_SWAPPED : 0;
855*5113495bSYour Name 
856*5113495bSYour Name 	if (per_CE_contextp)
857*5113495bSYour Name 		*per_CE_contextp = CE_state->recv_context;
858*5113495bSYour Name 
859*5113495bSYour Name 	if (per_transfer_contextp) {
860*5113495bSYour Name 		*per_transfer_contextp =
861*5113495bSYour Name 			dest_ring->per_transfer_context[sw_index];
862*5113495bSYour Name 	}
863*5113495bSYour Name 	dest_ring->per_transfer_context[sw_index] = 0;  /* sanity */
864*5113495bSYour Name 
865*5113495bSYour Name 	/* Update sw_index */
866*5113495bSYour Name 	sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
867*5113495bSYour Name 	dest_ring->sw_index = sw_index;
868*5113495bSYour Name 	status = QDF_STATUS_SUCCESS;
869*5113495bSYour Name 
870*5113495bSYour Name done:
871*5113495bSYour Name 	return status;
872*5113495bSYour Name }
873*5113495bSYour Name 
874*5113495bSYour Name /* NB: Modeled after ce_completed_recv_next_nolock */
875*5113495bSYour Name static QDF_STATUS
ce_revoke_recv_next_legacy(struct CE_handle * copyeng,void ** per_CE_contextp,void ** per_transfer_contextp,qdf_dma_addr_t * bufferp)876*5113495bSYour Name ce_revoke_recv_next_legacy(struct CE_handle *copyeng,
877*5113495bSYour Name 			   void **per_CE_contextp,
878*5113495bSYour Name 			   void **per_transfer_contextp,
879*5113495bSYour Name 			   qdf_dma_addr_t *bufferp)
880*5113495bSYour Name {
881*5113495bSYour Name 	struct CE_state *CE_state;
882*5113495bSYour Name 	struct CE_ring_state *dest_ring;
883*5113495bSYour Name 	unsigned int nentries_mask;
884*5113495bSYour Name 	unsigned int sw_index;
885*5113495bSYour Name 	unsigned int write_index;
886*5113495bSYour Name 	QDF_STATUS status;
887*5113495bSYour Name 	struct hif_softc *scn;
888*5113495bSYour Name 
889*5113495bSYour Name 	CE_state = (struct CE_state *)copyeng;
890*5113495bSYour Name 	dest_ring = CE_state->dest_ring;
891*5113495bSYour Name 	if (!dest_ring)
892*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
893*5113495bSYour Name 
894*5113495bSYour Name 	scn = CE_state->scn;
895*5113495bSYour Name 	qdf_spin_lock(&CE_state->ce_index_lock);
896*5113495bSYour Name 	nentries_mask = dest_ring->nentries_mask;
897*5113495bSYour Name 	sw_index = dest_ring->sw_index;
898*5113495bSYour Name 	write_index = dest_ring->write_index;
899*5113495bSYour Name 	if (write_index != sw_index) {
900*5113495bSYour Name 		struct CE_dest_desc *dest_ring_base =
901*5113495bSYour Name 			(struct CE_dest_desc *)dest_ring->
902*5113495bSYour Name 			    base_addr_owner_space;
903*5113495bSYour Name 		struct CE_dest_desc *dest_desc =
904*5113495bSYour Name 			CE_DEST_RING_TO_DESC(dest_ring_base, sw_index);
905*5113495bSYour Name 
906*5113495bSYour Name 		/* Return data from completed destination descriptor */
907*5113495bSYour Name 		*bufferp = HIF_CE_DESC_ADDR_TO_DMA(dest_desc);
908*5113495bSYour Name 
909*5113495bSYour Name 		if (per_CE_contextp)
910*5113495bSYour Name 			*per_CE_contextp = CE_state->recv_context;
911*5113495bSYour Name 
912*5113495bSYour Name 		if (per_transfer_contextp) {
913*5113495bSYour Name 			*per_transfer_contextp =
914*5113495bSYour Name 				dest_ring->per_transfer_context[sw_index];
915*5113495bSYour Name 		}
916*5113495bSYour Name 		dest_ring->per_transfer_context[sw_index] = 0;  /* sanity */
917*5113495bSYour Name 
918*5113495bSYour Name 		/* Update sw_index */
919*5113495bSYour Name 		sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
920*5113495bSYour Name 		dest_ring->sw_index = sw_index;
921*5113495bSYour Name 		status = QDF_STATUS_SUCCESS;
922*5113495bSYour Name 	} else {
923*5113495bSYour Name 		status = QDF_STATUS_E_FAILURE;
924*5113495bSYour Name 	}
925*5113495bSYour Name 	qdf_spin_unlock(&CE_state->ce_index_lock);
926*5113495bSYour Name 
927*5113495bSYour Name 	return status;
928*5113495bSYour Name }
929*5113495bSYour Name 
930*5113495bSYour Name /*
931*5113495bSYour Name  * Guts of ce_completed_send_next.
932*5113495bSYour Name  * The caller takes responsibility for any necessary locking.
933*5113495bSYour Name  */
934*5113495bSYour Name static QDF_STATUS
ce_completed_send_next_nolock_legacy(struct CE_state * CE_state,void ** per_CE_contextp,void ** per_transfer_contextp,qdf_dma_addr_t * bufferp,unsigned int * nbytesp,unsigned int * transfer_idp,unsigned int * sw_idx,unsigned int * hw_idx,uint32_t * toeplitz_hash_result)935*5113495bSYour Name ce_completed_send_next_nolock_legacy(struct CE_state *CE_state,
936*5113495bSYour Name 				     void **per_CE_contextp,
937*5113495bSYour Name 				     void **per_transfer_contextp,
938*5113495bSYour Name 				     qdf_dma_addr_t *bufferp,
939*5113495bSYour Name 				     unsigned int *nbytesp,
940*5113495bSYour Name 				     unsigned int *transfer_idp,
941*5113495bSYour Name 				     unsigned int *sw_idx,
942*5113495bSYour Name 				     unsigned int *hw_idx,
943*5113495bSYour Name 				     uint32_t *toeplitz_hash_result)
944*5113495bSYour Name {
945*5113495bSYour Name 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
946*5113495bSYour Name 	struct CE_ring_state *src_ring = CE_state->src_ring;
947*5113495bSYour Name 	uint32_t ctrl_addr = CE_state->ctrl_addr;
948*5113495bSYour Name 	unsigned int nentries_mask = src_ring->nentries_mask;
949*5113495bSYour Name 	unsigned int sw_index = src_ring->sw_index;
950*5113495bSYour Name 	unsigned int read_index;
951*5113495bSYour Name 	struct hif_softc *scn = CE_state->scn;
952*5113495bSYour Name 
953*5113495bSYour Name 	if (src_ring->hw_index == sw_index) {
954*5113495bSYour Name 		/*
955*5113495bSYour Name 		 * The SW completion index has caught up with the cached
956*5113495bSYour Name 		 * version of the HW completion index.
957*5113495bSYour Name 		 * Update the cached HW completion index to see whether
958*5113495bSYour Name 		 * the SW has really caught up to the HW, or if the cached
959*5113495bSYour Name 		 * value of the HW index has become stale.
960*5113495bSYour Name 		 */
961*5113495bSYour Name 		if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
962*5113495bSYour Name 			return QDF_STATUS_E_FAILURE;
963*5113495bSYour Name 		src_ring->hw_index =
964*5113495bSYour Name 			CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, ctrl_addr);
965*5113495bSYour Name 		if (Q_TARGET_ACCESS_END(scn) < 0)
966*5113495bSYour Name 			return QDF_STATUS_E_FAILURE;
967*5113495bSYour Name 	}
968*5113495bSYour Name 	read_index = src_ring->hw_index;
969*5113495bSYour Name 
970*5113495bSYour Name 	if (sw_idx)
971*5113495bSYour Name 		*sw_idx = sw_index;
972*5113495bSYour Name 
973*5113495bSYour Name 	if (hw_idx)
974*5113495bSYour Name 		*hw_idx = read_index;
975*5113495bSYour Name 
976*5113495bSYour Name 	if ((read_index != sw_index) && (read_index != 0xffffffff)) {
977*5113495bSYour Name 		struct CE_src_desc *shadow_base =
978*5113495bSYour Name 			(struct CE_src_desc *)src_ring->shadow_base;
979*5113495bSYour Name 		struct CE_src_desc *shadow_src_desc =
980*5113495bSYour Name 			CE_SRC_RING_TO_DESC(shadow_base, sw_index);
981*5113495bSYour Name #ifdef QCA_WIFI_3_0
982*5113495bSYour Name 		struct CE_src_desc *src_ring_base =
983*5113495bSYour Name 			(struct CE_src_desc *)src_ring->base_addr_owner_space;
984*5113495bSYour Name 		struct CE_src_desc *src_desc =
985*5113495bSYour Name 			CE_SRC_RING_TO_DESC(src_ring_base, sw_index);
986*5113495bSYour Name #endif
987*5113495bSYour Name 		hif_record_ce_desc_event(scn, CE_state->id,
988*5113495bSYour Name 				HIF_TX_DESC_COMPLETION,
989*5113495bSYour Name 				(union ce_desc *)shadow_src_desc,
990*5113495bSYour Name 				src_ring->per_transfer_context[sw_index],
991*5113495bSYour Name 				sw_index, shadow_src_desc->nbytes);
992*5113495bSYour Name 
993*5113495bSYour Name 		/* Return data from completed source descriptor */
994*5113495bSYour Name 		*bufferp = HIF_CE_DESC_ADDR_TO_DMA(shadow_src_desc);
995*5113495bSYour Name 		*nbytesp = shadow_src_desc->nbytes;
996*5113495bSYour Name 		*transfer_idp = shadow_src_desc->meta_data;
997*5113495bSYour Name #ifdef QCA_WIFI_3_0
998*5113495bSYour Name 		*toeplitz_hash_result = src_desc->toeplitz_hash_result;
999*5113495bSYour Name #else
1000*5113495bSYour Name 		*toeplitz_hash_result = 0;
1001*5113495bSYour Name #endif
1002*5113495bSYour Name 		if (per_CE_contextp)
1003*5113495bSYour Name 			*per_CE_contextp = CE_state->send_context;
1004*5113495bSYour Name 
1005*5113495bSYour Name 		if (per_transfer_contextp) {
1006*5113495bSYour Name 			*per_transfer_contextp =
1007*5113495bSYour Name 				src_ring->per_transfer_context[sw_index];
1008*5113495bSYour Name 		}
1009*5113495bSYour Name 		src_ring->per_transfer_context[sw_index] = 0;   /* sanity */
1010*5113495bSYour Name 
1011*5113495bSYour Name 		/* Update sw_index */
1012*5113495bSYour Name 		sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1013*5113495bSYour Name 		src_ring->sw_index = sw_index;
1014*5113495bSYour Name 		status = QDF_STATUS_SUCCESS;
1015*5113495bSYour Name 	}
1016*5113495bSYour Name 
1017*5113495bSYour Name 	return status;
1018*5113495bSYour Name }
1019*5113495bSYour Name 
1020*5113495bSYour Name static QDF_STATUS
ce_cancel_send_next_legacy(struct CE_handle * copyeng,void ** per_CE_contextp,void ** per_transfer_contextp,qdf_dma_addr_t * bufferp,unsigned int * nbytesp,unsigned int * transfer_idp,uint32_t * toeplitz_hash_result)1021*5113495bSYour Name ce_cancel_send_next_legacy(struct CE_handle *copyeng,
1022*5113495bSYour Name 			   void **per_CE_contextp,
1023*5113495bSYour Name 			   void **per_transfer_contextp,
1024*5113495bSYour Name 			   qdf_dma_addr_t *bufferp,
1025*5113495bSYour Name 			   unsigned int *nbytesp,
1026*5113495bSYour Name 			   unsigned int *transfer_idp,
1027*5113495bSYour Name 			   uint32_t *toeplitz_hash_result)
1028*5113495bSYour Name {
1029*5113495bSYour Name 	struct CE_state *CE_state;
1030*5113495bSYour Name 	struct CE_ring_state *src_ring;
1031*5113495bSYour Name 	unsigned int nentries_mask;
1032*5113495bSYour Name 	unsigned int sw_index;
1033*5113495bSYour Name 	unsigned int write_index;
1034*5113495bSYour Name 	QDF_STATUS status;
1035*5113495bSYour Name 	struct hif_softc *scn;
1036*5113495bSYour Name 
1037*5113495bSYour Name 	CE_state = (struct CE_state *)copyeng;
1038*5113495bSYour Name 	src_ring = CE_state->src_ring;
1039*5113495bSYour Name 	if (!src_ring)
1040*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
1041*5113495bSYour Name 
1042*5113495bSYour Name 	scn = CE_state->scn;
1043*5113495bSYour Name 	qdf_spin_lock(&CE_state->ce_index_lock);
1044*5113495bSYour Name 	nentries_mask = src_ring->nentries_mask;
1045*5113495bSYour Name 	sw_index = src_ring->sw_index;
1046*5113495bSYour Name 	write_index = src_ring->write_index;
1047*5113495bSYour Name 
1048*5113495bSYour Name 	if (write_index != sw_index) {
1049*5113495bSYour Name 		struct CE_src_desc *src_ring_base =
1050*5113495bSYour Name 			(struct CE_src_desc *)src_ring->base_addr_owner_space;
1051*5113495bSYour Name 		struct CE_src_desc *src_desc =
1052*5113495bSYour Name 			CE_SRC_RING_TO_DESC(src_ring_base, sw_index);
1053*5113495bSYour Name 
1054*5113495bSYour Name 		/* Return data from completed source descriptor */
1055*5113495bSYour Name 		*bufferp = HIF_CE_DESC_ADDR_TO_DMA(src_desc);
1056*5113495bSYour Name 		*nbytesp = src_desc->nbytes;
1057*5113495bSYour Name 		*transfer_idp = src_desc->meta_data;
1058*5113495bSYour Name #ifdef QCA_WIFI_3_0
1059*5113495bSYour Name 		*toeplitz_hash_result = src_desc->toeplitz_hash_result;
1060*5113495bSYour Name #else
1061*5113495bSYour Name 		*toeplitz_hash_result = 0;
1062*5113495bSYour Name #endif
1063*5113495bSYour Name 
1064*5113495bSYour Name 		if (per_CE_contextp)
1065*5113495bSYour Name 			*per_CE_contextp = CE_state->send_context;
1066*5113495bSYour Name 
1067*5113495bSYour Name 		if (per_transfer_contextp) {
1068*5113495bSYour Name 			*per_transfer_contextp =
1069*5113495bSYour Name 				src_ring->per_transfer_context[sw_index];
1070*5113495bSYour Name 		}
1071*5113495bSYour Name 		src_ring->per_transfer_context[sw_index] = 0;   /* sanity */
1072*5113495bSYour Name 
1073*5113495bSYour Name 		/* Update sw_index */
1074*5113495bSYour Name 		sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1075*5113495bSYour Name 		src_ring->sw_index = sw_index;
1076*5113495bSYour Name 		status = QDF_STATUS_SUCCESS;
1077*5113495bSYour Name 	} else {
1078*5113495bSYour Name 		status = QDF_STATUS_E_FAILURE;
1079*5113495bSYour Name 	}
1080*5113495bSYour Name 	qdf_spin_unlock(&CE_state->ce_index_lock);
1081*5113495bSYour Name 
1082*5113495bSYour Name 	return status;
1083*5113495bSYour Name }
1084*5113495bSYour Name 
1085*5113495bSYour Name /*
1086*5113495bSYour Name  * Adjust interrupts for the copy complete handler.
1087*5113495bSYour Name  * If it's needed for either send or recv, then unmask
1088*5113495bSYour Name  * this interrupt; otherwise, mask it.
1089*5113495bSYour Name  *
1090*5113495bSYour Name  * Called with target_lock held.
1091*5113495bSYour Name  */
1092*5113495bSYour Name static void
ce_per_engine_handler_adjust_legacy(struct CE_state * CE_state,int disable_copy_compl_intr)1093*5113495bSYour Name ce_per_engine_handler_adjust_legacy(struct CE_state *CE_state,
1094*5113495bSYour Name 				    int disable_copy_compl_intr)
1095*5113495bSYour Name {
1096*5113495bSYour Name 	uint32_t ctrl_addr = CE_state->ctrl_addr;
1097*5113495bSYour Name 	struct hif_softc *scn = CE_state->scn;
1098*5113495bSYour Name 
1099*5113495bSYour Name 	CE_state->disable_copy_compl_intr = disable_copy_compl_intr;
1100*5113495bSYour Name 
1101*5113495bSYour Name 	if (CE_state->msi_supported)
1102*5113495bSYour Name 		return;
1103*5113495bSYour Name 
1104*5113495bSYour Name 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1105*5113495bSYour Name 		return;
1106*5113495bSYour Name 
1107*5113495bSYour Name 	if (!TARGET_REGISTER_ACCESS_ALLOWED(scn)) {
1108*5113495bSYour Name 		hif_err_rl("Target access is not allowed");
1109*5113495bSYour Name 		return;
1110*5113495bSYour Name 	}
1111*5113495bSYour Name 
1112*5113495bSYour Name 	if ((!disable_copy_compl_intr) &&
1113*5113495bSYour Name 	    (CE_state->send_cb || CE_state->recv_cb))
1114*5113495bSYour Name 		CE_COPY_COMPLETE_INTR_ENABLE(scn, ctrl_addr);
1115*5113495bSYour Name 	else
1116*5113495bSYour Name 		CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
1117*5113495bSYour Name 
1118*5113495bSYour Name 	if (CE_state->watermark_cb)
1119*5113495bSYour Name 		CE_WATERMARK_INTR_ENABLE(scn, ctrl_addr);
1120*5113495bSYour Name 	else
1121*5113495bSYour Name 		CE_WATERMARK_INTR_DISABLE(scn, ctrl_addr);
1122*5113495bSYour Name 	Q_TARGET_ACCESS_END(scn);
1123*5113495bSYour Name }
1124*5113495bSYour Name 
1125*5113495bSYour Name #ifdef QCA_WIFI_WCN6450
ce_enqueue_desc(struct CE_handle * copyeng,qdf_nbuf_t msdu,unsigned int transfer_id,uint32_t download_len)1126*5113495bSYour Name int ce_enqueue_desc(struct CE_handle *copyeng, qdf_nbuf_t msdu,
1127*5113495bSYour Name 		    unsigned int transfer_id, uint32_t download_len)
1128*5113495bSYour Name {
1129*5113495bSYour Name 	struct CE_state *ce_state = (struct CE_state *)copyeng;
1130*5113495bSYour Name 	struct hif_softc *scn = ce_state->scn;
1131*5113495bSYour Name 	struct CE_ring_state *src_ring = ce_state->src_ring;
1132*5113495bSYour Name 	u_int32_t ctrl_addr = ce_state->ctrl_addr;
1133*5113495bSYour Name 	unsigned int nentries_mask = src_ring->nentries_mask;
1134*5113495bSYour Name 	unsigned int write_index;
1135*5113495bSYour Name 	unsigned int sw_index;
1136*5113495bSYour Name 	unsigned int frag_len;
1137*5113495bSYour Name 	uint64_t dma_addr;
1138*5113495bSYour Name 	uint32_t user_flags;
1139*5113495bSYour Name 	enum hif_ce_event_type type = FAST_TX_SOFTWARE_INDEX_UPDATE;
1140*5113495bSYour Name 
1141*5113495bSYour Name 	/*
1142*5113495bSYour Name 	 * Create a log assuming the call will go through, and if not, we would
1143*5113495bSYour Name 	 * add an error trace as well.
1144*5113495bSYour Name 	 * Please add the same failure log for any additional error paths.
1145*5113495bSYour Name 	 */
1146*5113495bSYour Name 	DPTRACE(qdf_dp_trace(msdu,
1147*5113495bSYour Name 			     QDF_DP_TRACE_CE_FAST_PACKET_PTR_RECORD,
1148*5113495bSYour Name 			     QDF_TRACE_DEFAULT_PDEV_ID,
1149*5113495bSYour Name 			     qdf_nbuf_data_addr(msdu),
1150*5113495bSYour Name 			     sizeof(qdf_nbuf_data(msdu)), QDF_TX));
1151*5113495bSYour Name 
1152*5113495bSYour Name 	DATA_CE_UPDATE_SWINDEX(src_ring->sw_index, scn, ctrl_addr);
1153*5113495bSYour Name 
1154*5113495bSYour Name 	write_index = src_ring->write_index;
1155*5113495bSYour Name 	sw_index = src_ring->sw_index;
1156*5113495bSYour Name 	hif_record_ce_desc_event(scn, ce_state->id,
1157*5113495bSYour Name 				 FAST_TX_SOFTWARE_INDEX_UPDATE,
1158*5113495bSYour Name 				 NULL, NULL, sw_index, 0);
1159*5113495bSYour Name 
1160*5113495bSYour Name 	if (qdf_unlikely(CE_RING_DELTA(nentries_mask, write_index, sw_index - 1)
1161*5113495bSYour Name 			 < SLOTS_PER_DATAPATH_TX)) {
1162*5113495bSYour Name 		hif_err_rl("Source ring full, required %d, available %d",
1163*5113495bSYour Name 			   SLOTS_PER_DATAPATH_TX,
1164*5113495bSYour Name 			   CE_RING_DELTA(nentries_mask, write_index,
1165*5113495bSYour Name 					 sw_index - 1));
1166*5113495bSYour Name 		OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL);
1167*5113495bSYour Name 
1168*5113495bSYour Name 		DPTRACE(qdf_dp_trace(NULL,
1169*5113495bSYour Name 				     QDF_DP_TRACE_CE_FAST_PACKET_ERR_RECORD,
1170*5113495bSYour Name 				     QDF_TRACE_DEFAULT_PDEV_ID,
1171*5113495bSYour Name 				     NULL, 0, QDF_TX));
1172*5113495bSYour Name 
1173*5113495bSYour Name 		return -ENOSPC;
1174*5113495bSYour Name 	}
1175*5113495bSYour Name 
1176*5113495bSYour Name 	{
1177*5113495bSYour Name 		struct CE_src_desc *src_ring_base =
1178*5113495bSYour Name 			(struct CE_src_desc *)src_ring->base_addr_owner_space;
1179*5113495bSYour Name 		struct CE_src_desc *shadow_base =
1180*5113495bSYour Name 			(struct CE_src_desc *)src_ring->shadow_base;
1181*5113495bSYour Name 		struct CE_src_desc *src_desc =
1182*5113495bSYour Name 			CE_SRC_RING_TO_DESC(src_ring_base, write_index);
1183*5113495bSYour Name 		struct CE_src_desc *shadow_src_desc =
1184*5113495bSYour Name 			CE_SRC_RING_TO_DESC(shadow_base, write_index);
1185*5113495bSYour Name 
1186*5113495bSYour Name 		/*
1187*5113495bSYour Name 		 * First fill out the ring descriptor for the HTC HTT frame
1188*5113495bSYour Name 		 * header. These are uncached writes. Should we use a local
1189*5113495bSYour Name 		 * structure instead?
1190*5113495bSYour Name 		 */
1191*5113495bSYour Name 		/* HTT/HTC header can be passed as a argument */
1192*5113495bSYour Name 		dma_addr = qdf_nbuf_get_frag_paddr(msdu, 0);
1193*5113495bSYour Name 		shadow_src_desc->buffer_addr = (uint32_t)(dma_addr &
1194*5113495bSYour Name 							  0xFFFFFFFF);
1195*5113495bSYour Name 		user_flags = qdf_nbuf_data_attr_get(msdu) & DESC_DATA_FLAG_MASK;
1196*5113495bSYour Name 		ce_buffer_addr_hi_set(shadow_src_desc, dma_addr, user_flags);
1197*5113495bSYour Name 			shadow_src_desc->meta_data = transfer_id;
1198*5113495bSYour Name 		shadow_src_desc->nbytes = qdf_nbuf_get_frag_len(msdu, 0);
1199*5113495bSYour Name 		ce_validate_nbytes(shadow_src_desc->nbytes, ce_state);
1200*5113495bSYour Name 		download_len -= shadow_src_desc->nbytes;
1201*5113495bSYour Name 		/*
1202*5113495bSYour Name 		 * HTC HTT header is a word stream, so byte swap if CE byte
1203*5113495bSYour Name 		 * swap enabled
1204*5113495bSYour Name 		 */
1205*5113495bSYour Name 		shadow_src_desc->byte_swap = ((ce_state->attr_flags &
1206*5113495bSYour Name 					CE_ATTR_BYTE_SWAP_DATA) != 0);
1207*5113495bSYour Name 		/* For the first one, it still does not need to write */
1208*5113495bSYour Name 		shadow_src_desc->gather = 1;
1209*5113495bSYour Name 		*src_desc = *shadow_src_desc;
1210*5113495bSYour Name 		/* By default we could initialize the transfer context to this
1211*5113495bSYour Name 		 * value
1212*5113495bSYour Name 		 */
1213*5113495bSYour Name 		src_ring->per_transfer_context[write_index] =
1214*5113495bSYour Name 			CE_SENDLIST_ITEM_CTXT;
1215*5113495bSYour Name 		write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
1216*5113495bSYour Name 
1217*5113495bSYour Name 		src_desc = CE_SRC_RING_TO_DESC(src_ring_base, write_index);
1218*5113495bSYour Name 		shadow_src_desc = CE_SRC_RING_TO_DESC(shadow_base, write_index);
1219*5113495bSYour Name 		/*
1220*5113495bSYour Name 		 * Now fill out the ring descriptor for the actual data
1221*5113495bSYour Name 		 * packet
1222*5113495bSYour Name 		 */
1223*5113495bSYour Name 		dma_addr = qdf_nbuf_get_frag_paddr(msdu, 1);
1224*5113495bSYour Name 		shadow_src_desc->buffer_addr = (uint32_t)(dma_addr &
1225*5113495bSYour Name 							  0xFFFFFFFF);
1226*5113495bSYour Name 		/*
1227*5113495bSYour Name 		 * Clear packet offset for all but the first CE desc.
1228*5113495bSYour Name 		 */
1229*5113495bSYour Name 		user_flags &= ~CE_DESC_PKT_OFFSET_BIT_M;
1230*5113495bSYour Name 		ce_buffer_addr_hi_set(shadow_src_desc, dma_addr, user_flags);
1231*5113495bSYour Name 		shadow_src_desc->meta_data = transfer_id;
1232*5113495bSYour Name 
1233*5113495bSYour Name 		/* get actual packet length */
1234*5113495bSYour Name 		frag_len = qdf_nbuf_get_frag_len(msdu, 1);
1235*5113495bSYour Name 
1236*5113495bSYour Name 		/* download remaining bytes of payload */
1237*5113495bSYour Name 		shadow_src_desc->nbytes =  download_len;
1238*5113495bSYour Name 		ce_validate_nbytes(shadow_src_desc->nbytes, ce_state);
1239*5113495bSYour Name 		if (shadow_src_desc->nbytes > frag_len)
1240*5113495bSYour Name 			shadow_src_desc->nbytes = frag_len;
1241*5113495bSYour Name 
1242*5113495bSYour Name 		/*  Data packet is a byte stream, so disable byte swap */
1243*5113495bSYour Name 		shadow_src_desc->byte_swap = 0;
1244*5113495bSYour Name 		/* For the last one, gather is not set */
1245*5113495bSYour Name 		shadow_src_desc->gather    = 0;
1246*5113495bSYour Name 		*src_desc = *shadow_src_desc;
1247*5113495bSYour Name 		src_ring->per_transfer_context[write_index] = msdu;
1248*5113495bSYour Name 
1249*5113495bSYour Name 		hif_record_ce_desc_event(scn, ce_state->id, type,
1250*5113495bSYour Name 					 (union ce_desc *)src_desc,
1251*5113495bSYour Name 				src_ring->per_transfer_context[write_index],
1252*5113495bSYour Name 				write_index, shadow_src_desc->nbytes);
1253*5113495bSYour Name 
1254*5113495bSYour Name 		write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
1255*5113495bSYour Name 
1256*5113495bSYour Name 		DPTRACE(qdf_dp_trace(msdu,
1257*5113495bSYour Name 				     QDF_DP_TRACE_CE_FAST_PACKET_PTR_RECORD,
1258*5113495bSYour Name 				     QDF_TRACE_DEFAULT_PDEV_ID,
1259*5113495bSYour Name 				     qdf_nbuf_data_addr(msdu),
1260*5113495bSYour Name 				     sizeof(qdf_nbuf_data(msdu)), QDF_TX));
1261*5113495bSYour Name 	}
1262*5113495bSYour Name 
1263*5113495bSYour Name 	src_ring->write_index = write_index;
1264*5113495bSYour Name 
1265*5113495bSYour Name 	return 0;
1266*5113495bSYour Name }
1267*5113495bSYour Name 
ce_legacy_msi_param_setup(struct hif_softc * scn,uint32_t ctrl_addr,uint32_t ce_id,struct CE_attr * attr)1268*5113495bSYour Name static void ce_legacy_msi_param_setup(struct hif_softc *scn, uint32_t ctrl_addr,
1269*5113495bSYour Name 				      uint32_t ce_id, struct CE_attr *attr)
1270*5113495bSYour Name {
1271*5113495bSYour Name 	uint32_t addr_low;
1272*5113495bSYour Name 	uint32_t addr_high;
1273*5113495bSYour Name 	uint32_t msi_data_start;
1274*5113495bSYour Name 	uint32_t msi_data_count;
1275*5113495bSYour Name 	uint32_t msi_irq_start;
1276*5113495bSYour Name 	uint32_t tmp;
1277*5113495bSYour Name 	int ret;
1278*5113495bSYour Name 	int irq_id;
1279*5113495bSYour Name 
1280*5113495bSYour Name 	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
1281*5113495bSYour Name 					  &msi_data_count, &msi_data_start,
1282*5113495bSYour Name 					  &msi_irq_start);
1283*5113495bSYour Name 
1284*5113495bSYour Name 	/* msi config not found */
1285*5113495bSYour Name 	if (ret) {
1286*5113495bSYour Name 		hif_debug("Failed to get user msi assignment ret %d", ret);
1287*5113495bSYour Name 		return;
1288*5113495bSYour Name 	}
1289*5113495bSYour Name 
1290*5113495bSYour Name 	irq_id = scn->int_assignment->msi_idx[ce_id];
1291*5113495bSYour Name 	pld_get_msi_address(scn->qdf_dev->dev, &addr_low, &addr_high);
1292*5113495bSYour Name 
1293*5113495bSYour Name 	CE_MSI_ADDR_LOW_SET(scn, ctrl_addr, addr_low);
1294*5113495bSYour Name 	tmp = CE_MSI_ADDR_HIGH_GET(scn, ctrl_addr);
1295*5113495bSYour Name 	tmp &= ~CE_RING_BASE_ADDR_HIGH_MASK;
1296*5113495bSYour Name 	tmp |= (addr_high & CE_RING_BASE_ADDR_HIGH_MASK);
1297*5113495bSYour Name 	CE_MSI_ADDR_HIGH_SET(scn, ctrl_addr, tmp);
1298*5113495bSYour Name 	CE_MSI_DATA_SET(scn, ctrl_addr, irq_id + msi_data_start);
1299*5113495bSYour Name 	CE_MSI_EN_SET(scn, ctrl_addr);
1300*5113495bSYour Name }
1301*5113495bSYour Name 
ce_legacy_src_intr_thres_setup(struct hif_softc * scn,uint32_t ctrl_addr,struct CE_attr * attr,uint32_t timer_thrs,uint32_t count_thrs)1302*5113495bSYour Name static void ce_legacy_src_intr_thres_setup(struct hif_softc *scn,
1303*5113495bSYour Name 					   uint32_t ctrl_addr,
1304*5113495bSYour Name 					   struct CE_attr *attr,
1305*5113495bSYour Name 					   uint32_t timer_thrs,
1306*5113495bSYour Name 					   uint32_t count_thrs)
1307*5113495bSYour Name {
1308*5113495bSYour Name 	uint32_t tmp;
1309*5113495bSYour Name 
1310*5113495bSYour Name 	tmp = CE_CHANNEL_SRC_BATCH_TIMER_INT_SETUP_GET(scn, ctrl_addr);
1311*5113495bSYour Name 
1312*5113495bSYour Name 	if (count_thrs) {
1313*5113495bSYour Name 		tmp &= ~CE_SRC_BATCH_COUNTER_THRESH_MASK;
1314*5113495bSYour Name 		tmp |= ((count_thrs << CE_SRC_BATCH_COUNTER_THRESH_LSB) &
1315*5113495bSYour Name 			 CE_SRC_BATCH_COUNTER_THRESH_MASK);
1316*5113495bSYour Name 	}
1317*5113495bSYour Name 
1318*5113495bSYour Name 	if (timer_thrs) {
1319*5113495bSYour Name 		tmp &= ~CE_SRC_BATCH_TIMER_THRESH_MASK;
1320*5113495bSYour Name 		tmp |= ((timer_thrs  << CE_SRC_BATCH_TIMER_THRESH_LSB) &
1321*5113495bSYour Name 			CE_SRC_BATCH_TIMER_THRESH_MASK);
1322*5113495bSYour Name 	}
1323*5113495bSYour Name 
1324*5113495bSYour Name 	CE_CHANNEL_SRC_BATCH_TIMER_INT_SETUP(scn, ctrl_addr, tmp);
1325*5113495bSYour Name 	CE_CHANNEL_SRC_TIMER_BATCH_INT_EN(scn, ctrl_addr);
1326*5113495bSYour Name }
1327*5113495bSYour Name 
ce_legacy_dest_intr_thres_setup(struct hif_softc * scn,uint32_t ctrl_addr,struct CE_attr * attr,uint32_t timer_thrs,uint32_t count_thrs)1328*5113495bSYour Name static void ce_legacy_dest_intr_thres_setup(struct hif_softc *scn,
1329*5113495bSYour Name 					    uint32_t ctrl_addr,
1330*5113495bSYour Name 					    struct CE_attr *attr,
1331*5113495bSYour Name 					    uint32_t timer_thrs,
1332*5113495bSYour Name 					    uint32_t count_thrs)
1333*5113495bSYour Name {
1334*5113495bSYour Name 	uint32_t tmp;
1335*5113495bSYour Name 
1336*5113495bSYour Name 	tmp = CE_CHANNEL_DST_BATCH_TIMER_INT_SETUP_GET(scn, ctrl_addr);
1337*5113495bSYour Name 
1338*5113495bSYour Name 	if (count_thrs) {
1339*5113495bSYour Name 		tmp &= ~CE_DST_BATCH_COUNTER_THRESH_MASK;
1340*5113495bSYour Name 		tmp |= ((count_thrs << CE_DST_BATCH_COUNTER_THRESH_LSB) &
1341*5113495bSYour Name 			 CE_DST_BATCH_COUNTER_THRESH_MASK);
1342*5113495bSYour Name 	}
1343*5113495bSYour Name 
1344*5113495bSYour Name 	if (timer_thrs) {
1345*5113495bSYour Name 		tmp &= ~CE_DST_BATCH_TIMER_THRESH_MASK;
1346*5113495bSYour Name 		tmp |= ((timer_thrs  << CE_DST_BATCH_TIMER_THRESH_LSB) &
1347*5113495bSYour Name 			 CE_DST_BATCH_TIMER_THRESH_MASK);
1348*5113495bSYour Name 	}
1349*5113495bSYour Name 
1350*5113495bSYour Name 	CE_CHANNEL_DST_BATCH_TIMER_INT_SETUP(scn, ctrl_addr, tmp);
1351*5113495bSYour Name 	CE_CHANNEL_DST_TIMER_BATCH_INT_EN(scn, ctrl_addr);
1352*5113495bSYour Name }
1353*5113495bSYour Name #else
ce_legacy_msi_param_setup(struct hif_softc * scn,uint32_t ctrl_addr,uint32_t ce_id,struct CE_attr * attr)1354*5113495bSYour Name static void ce_legacy_msi_param_setup(struct hif_softc *scn, uint32_t ctrl_addr,
1355*5113495bSYour Name 				      uint32_t ce_id, struct CE_attr *attr)
1356*5113495bSYour Name {
1357*5113495bSYour Name }
1358*5113495bSYour Name 
ce_legacy_src_intr_thres_setup(struct hif_softc * scn,uint32_t ctrl_addr,struct CE_attr * attr,uint32_t timer_thrs,uint32_t count_thrs)1359*5113495bSYour Name static void ce_legacy_src_intr_thres_setup(struct hif_softc *scn,
1360*5113495bSYour Name 					   uint32_t ctrl_addr,
1361*5113495bSYour Name 					   struct CE_attr *attr,
1362*5113495bSYour Name 					   uint32_t timer_thrs,
1363*5113495bSYour Name 					   uint32_t count_thrs)
1364*5113495bSYour Name {
1365*5113495bSYour Name }
1366*5113495bSYour Name 
ce_legacy_dest_intr_thres_setup(struct hif_softc * scn,uint32_t ctrl_addr,struct CE_attr * attr,uint32_t timer_thrs,uint32_t count_thrs)1367*5113495bSYour Name static void ce_legacy_dest_intr_thres_setup(struct hif_softc *scn,
1368*5113495bSYour Name 					    uint32_t ctrl_addr,
1369*5113495bSYour Name 					    struct CE_attr *attr,
1370*5113495bSYour Name 					    uint32_t timer_thrs,
1371*5113495bSYour Name 					    uint32_t count_thrs)
1372*5113495bSYour Name {
1373*5113495bSYour Name }
1374*5113495bSYour Name #endif /* QCA_WIFI_WCN6450 */
1375*5113495bSYour Name 
ce_legacy_src_ring_setup(struct hif_softc * scn,uint32_t ce_id,struct CE_ring_state * src_ring,struct CE_attr * attr)1376*5113495bSYour Name static void ce_legacy_src_ring_setup(struct hif_softc *scn, uint32_t ce_id,
1377*5113495bSYour Name 				     struct CE_ring_state *src_ring,
1378*5113495bSYour Name 				     struct CE_attr *attr)
1379*5113495bSYour Name {
1380*5113495bSYour Name 	uint32_t ctrl_addr;
1381*5113495bSYour Name 	uint64_t dma_addr;
1382*5113495bSYour Name 	uint32_t timer_thrs;
1383*5113495bSYour Name 	uint32_t count_thrs;
1384*5113495bSYour Name 
1385*5113495bSYour Name 	QDF_ASSERT(ce_id < scn->ce_count);
1386*5113495bSYour Name 	ctrl_addr = CE_BASE_ADDRESS(ce_id);
1387*5113495bSYour Name 
1388*5113495bSYour Name 	src_ring->hw_index =
1389*5113495bSYour Name 		CE_SRC_RING_READ_IDX_GET_FROM_REGISTER(scn, ctrl_addr);
1390*5113495bSYour Name 	src_ring->sw_index = src_ring->hw_index;
1391*5113495bSYour Name 	src_ring->write_index =
1392*5113495bSYour Name 		CE_SRC_RING_WRITE_IDX_GET_FROM_REGISTER(scn, ctrl_addr);
1393*5113495bSYour Name 	dma_addr = src_ring->base_addr_CE_space;
1394*5113495bSYour Name 	CE_SRC_RING_BASE_ADDR_SET(scn, ctrl_addr,
1395*5113495bSYour Name 				  (uint32_t)(dma_addr & 0xFFFFFFFF));
1396*5113495bSYour Name 
1397*5113495bSYour Name 	/* if SR_BA_ADDRESS_HIGH register exists */
1398*5113495bSYour Name 	if (is_register_supported(SR_BA_ADDRESS_HIGH)) {
1399*5113495bSYour Name 		uint32_t tmp;
1400*5113495bSYour Name 
1401*5113495bSYour Name 		tmp = CE_SRC_RING_BASE_ADDR_HIGH_GET(
1402*5113495bSYour Name 				scn, ctrl_addr);
1403*5113495bSYour Name 		tmp &= ~CE_RING_BASE_ADDR_HIGH_MASK;
1404*5113495bSYour Name 		dma_addr =
1405*5113495bSYour Name 			((dma_addr >> 32) & CE_RING_BASE_ADDR_HIGH_MASK) | tmp;
1406*5113495bSYour Name 		CE_SRC_RING_BASE_ADDR_HIGH_SET(scn,
1407*5113495bSYour Name 					ctrl_addr, (uint32_t)dma_addr);
1408*5113495bSYour Name 	}
1409*5113495bSYour Name 	CE_SRC_RING_SZ_SET(scn, ctrl_addr, src_ring->nentries);
1410*5113495bSYour Name 	CE_SRC_RING_DMAX_SET(scn, ctrl_addr, attr->src_sz_max);
1411*5113495bSYour Name #ifdef BIG_ENDIAN_HOST
1412*5113495bSYour Name 	/* Enable source ring byte swap for big endian host */
1413*5113495bSYour Name 	CE_SRC_RING_BYTE_SWAP_SET(scn, ctrl_addr, 1);
1414*5113495bSYour Name #endif
1415*5113495bSYour Name 	CE_SRC_RING_LOWMARK_SET(scn, ctrl_addr, 0);
1416*5113495bSYour Name 	CE_SRC_RING_HIGHMARK_SET(scn, ctrl_addr, src_ring->nentries);
1417*5113495bSYour Name 
1418*5113495bSYour Name 	if (!(CE_ATTR_DISABLE_INTR & attr->flags)) {
1419*5113495bSYour Name 		/* In 8us units */
1420*5113495bSYour Name 		timer_thrs = CE_SRC_BATCH_TIMER_THRESHOLD >> 3;
1421*5113495bSYour Name 		count_thrs = CE_SRC_BATCH_COUNTER_THRESHOLD;
1422*5113495bSYour Name 
1423*5113495bSYour Name 		ce_legacy_msi_param_setup(scn, ctrl_addr, ce_id, attr);
1424*5113495bSYour Name 		ce_legacy_src_intr_thres_setup(scn, ctrl_addr, attr,
1425*5113495bSYour Name 					       timer_thrs, count_thrs);
1426*5113495bSYour Name 	}
1427*5113495bSYour Name }
1428*5113495bSYour Name 
ce_legacy_dest_ring_setup(struct hif_softc * scn,uint32_t ce_id,struct CE_ring_state * dest_ring,struct CE_attr * attr)1429*5113495bSYour Name static void ce_legacy_dest_ring_setup(struct hif_softc *scn, uint32_t ce_id,
1430*5113495bSYour Name 				struct CE_ring_state *dest_ring,
1431*5113495bSYour Name 				struct CE_attr *attr)
1432*5113495bSYour Name {
1433*5113495bSYour Name 	uint32_t ctrl_addr;
1434*5113495bSYour Name 	uint64_t dma_addr;
1435*5113495bSYour Name 	uint32_t timer_thrs;
1436*5113495bSYour Name 	uint32_t count_thrs;
1437*5113495bSYour Name 
1438*5113495bSYour Name 	QDF_ASSERT(ce_id < scn->ce_count);
1439*5113495bSYour Name 	ctrl_addr = CE_BASE_ADDRESS(ce_id);
1440*5113495bSYour Name 	dest_ring->sw_index =
1441*5113495bSYour Name 		CE_DEST_RING_READ_IDX_GET_FROM_REGISTER(scn, ctrl_addr);
1442*5113495bSYour Name 	dest_ring->write_index =
1443*5113495bSYour Name 		CE_DEST_RING_WRITE_IDX_GET_FROM_REGISTER(scn, ctrl_addr);
1444*5113495bSYour Name 	dma_addr = dest_ring->base_addr_CE_space;
1445*5113495bSYour Name 	CE_DEST_RING_BASE_ADDR_SET(scn, ctrl_addr,
1446*5113495bSYour Name 				   (uint32_t)(dma_addr & 0xFFFFFFFF));
1447*5113495bSYour Name 
1448*5113495bSYour Name 	/* if DR_BA_ADDRESS_HIGH exists */
1449*5113495bSYour Name 	if (is_register_supported(DR_BA_ADDRESS_HIGH)) {
1450*5113495bSYour Name 		uint32_t tmp;
1451*5113495bSYour Name 
1452*5113495bSYour Name 		tmp = CE_DEST_RING_BASE_ADDR_HIGH_GET(scn,
1453*5113495bSYour Name 						      ctrl_addr);
1454*5113495bSYour Name 		tmp &= ~CE_RING_BASE_ADDR_HIGH_MASK;
1455*5113495bSYour Name 		dma_addr =
1456*5113495bSYour Name 			((dma_addr >> 32) & CE_RING_BASE_ADDR_HIGH_MASK) | tmp;
1457*5113495bSYour Name 		CE_DEST_RING_BASE_ADDR_HIGH_SET(scn,
1458*5113495bSYour Name 				ctrl_addr, (uint32_t)dma_addr);
1459*5113495bSYour Name 	}
1460*5113495bSYour Name 
1461*5113495bSYour Name 	CE_DEST_RING_SZ_SET(scn, ctrl_addr, dest_ring->nentries);
1462*5113495bSYour Name #ifdef BIG_ENDIAN_HOST
1463*5113495bSYour Name 	/* Enable Dest ring byte swap for big endian host */
1464*5113495bSYour Name 	CE_DEST_RING_BYTE_SWAP_SET(scn, ctrl_addr, 1);
1465*5113495bSYour Name #endif
1466*5113495bSYour Name 	CE_DEST_RING_LOWMARK_SET(scn, ctrl_addr, 0);
1467*5113495bSYour Name 	CE_DEST_RING_HIGHMARK_SET(scn, ctrl_addr, dest_ring->nentries);
1468*5113495bSYour Name 
1469*5113495bSYour Name 	if (!(CE_ATTR_DISABLE_INTR & attr->flags)) {
1470*5113495bSYour Name 		/* In 8us units */
1471*5113495bSYour Name 		timer_thrs = CE_DST_BATCH_TIMER_THRESHOLD >> 3;
1472*5113495bSYour Name 		count_thrs = CE_DST_BATCH_COUNTER_THRESHOLD;
1473*5113495bSYour Name 
1474*5113495bSYour Name 		ce_legacy_msi_param_setup(scn, ctrl_addr, ce_id, attr);
1475*5113495bSYour Name 		ce_legacy_dest_intr_thres_setup(scn, ctrl_addr, attr,
1476*5113495bSYour Name 						timer_thrs, count_thrs);
1477*5113495bSYour Name 	}
1478*5113495bSYour Name }
1479*5113495bSYour Name 
ce_get_desc_size_legacy(uint8_t ring_type)1480*5113495bSYour Name static uint32_t ce_get_desc_size_legacy(uint8_t ring_type)
1481*5113495bSYour Name {
1482*5113495bSYour Name 	switch (ring_type) {
1483*5113495bSYour Name 	case CE_RING_SRC:
1484*5113495bSYour Name 		return sizeof(struct CE_src_desc);
1485*5113495bSYour Name 	case CE_RING_DEST:
1486*5113495bSYour Name 		return sizeof(struct CE_dest_desc);
1487*5113495bSYour Name 	case CE_RING_STATUS:
1488*5113495bSYour Name 		qdf_assert(0);
1489*5113495bSYour Name 		return 0;
1490*5113495bSYour Name 	default:
1491*5113495bSYour Name 		return 0;
1492*5113495bSYour Name 	}
1493*5113495bSYour Name 
1494*5113495bSYour Name 	return 0;
1495*5113495bSYour Name }
1496*5113495bSYour Name 
ce_ring_setup_legacy(struct hif_softc * scn,uint8_t ring_type,uint32_t ce_id,struct CE_ring_state * ring,struct CE_attr * attr)1497*5113495bSYour Name static int ce_ring_setup_legacy(struct hif_softc *scn, uint8_t ring_type,
1498*5113495bSYour Name 				uint32_t ce_id, struct CE_ring_state *ring,
1499*5113495bSYour Name 				struct CE_attr *attr)
1500*5113495bSYour Name {
1501*5113495bSYour Name 	int status = Q_TARGET_ACCESS_BEGIN(scn);
1502*5113495bSYour Name 
1503*5113495bSYour Name 	if (status < 0)
1504*5113495bSYour Name 		goto out;
1505*5113495bSYour Name 
1506*5113495bSYour Name 	switch (ring_type) {
1507*5113495bSYour Name 	case CE_RING_SRC:
1508*5113495bSYour Name 		ce_legacy_src_ring_setup(scn, ce_id, ring, attr);
1509*5113495bSYour Name 		break;
1510*5113495bSYour Name 	case CE_RING_DEST:
1511*5113495bSYour Name 		ce_legacy_dest_ring_setup(scn, ce_id, ring, attr);
1512*5113495bSYour Name 		break;
1513*5113495bSYour Name 	case CE_RING_STATUS:
1514*5113495bSYour Name 	default:
1515*5113495bSYour Name 		qdf_assert(0);
1516*5113495bSYour Name 		break;
1517*5113495bSYour Name 	}
1518*5113495bSYour Name 
1519*5113495bSYour Name 	Q_TARGET_ACCESS_END(scn);
1520*5113495bSYour Name out:
1521*5113495bSYour Name 	return status;
1522*5113495bSYour Name }
1523*5113495bSYour Name 
ce_prepare_shadow_register_v2_cfg_legacy(struct hif_softc * scn,struct pld_shadow_reg_v2_cfg ** shadow_config,int * num_shadow_registers_configured)1524*5113495bSYour Name static void ce_prepare_shadow_register_v2_cfg_legacy(struct hif_softc *scn,
1525*5113495bSYour Name 			    struct pld_shadow_reg_v2_cfg **shadow_config,
1526*5113495bSYour Name 			    int *num_shadow_registers_configured)
1527*5113495bSYour Name {
1528*5113495bSYour Name 	*num_shadow_registers_configured = 0;
1529*5113495bSYour Name 	*shadow_config = NULL;
1530*5113495bSYour Name }
1531*5113495bSYour Name 
ce_check_int_watermark(struct CE_state * CE_state,unsigned int * flags)1532*5113495bSYour Name static bool ce_check_int_watermark(struct CE_state *CE_state,
1533*5113495bSYour Name 				   unsigned int *flags)
1534*5113495bSYour Name {
1535*5113495bSYour Name 	uint32_t ce_int_status;
1536*5113495bSYour Name 	uint32_t ctrl_addr = CE_state->ctrl_addr;
1537*5113495bSYour Name 	struct hif_softc *scn = CE_state->scn;
1538*5113495bSYour Name 
1539*5113495bSYour Name 	ce_int_status = CE_ENGINE_INT_STATUS_GET(scn, ctrl_addr);
1540*5113495bSYour Name 	if (ce_int_status & CE_WATERMARK_MASK) {
1541*5113495bSYour Name 		/* Convert HW IS bits to software flags */
1542*5113495bSYour Name 		*flags =
1543*5113495bSYour Name 			(ce_int_status & CE_WATERMARK_MASK) >>
1544*5113495bSYour Name 			CE_WM_SHFT;
1545*5113495bSYour Name 		return true;
1546*5113495bSYour Name 	}
1547*5113495bSYour Name 
1548*5113495bSYour Name 	return false;
1549*5113495bSYour Name }
1550*5113495bSYour Name 
hif_display_ctrl_traffic_pipes_state(struct hif_opaque_softc * hif_ctx)1551*5113495bSYour Name void hif_display_ctrl_traffic_pipes_state(struct hif_opaque_softc *hif_ctx) { }
1552*5113495bSYour Name 
1553*5113495bSYour Name #ifdef HIF_CE_LOG_INFO
1554*5113495bSYour Name /**
1555*5113495bSYour Name  * ce_get_index_info_legacy(): Get CE index info
1556*5113495bSYour Name  * @scn: HIF Context
1557*5113495bSYour Name  * @ce_state: CE opaque handle
1558*5113495bSYour Name  * @info: CE info
1559*5113495bSYour Name  *
1560*5113495bSYour Name  * Return: 0 for success and non zero for failure
1561*5113495bSYour Name  */
1562*5113495bSYour Name static
ce_get_index_info_legacy(struct hif_softc * scn,void * ce_state,struct ce_index * info)1563*5113495bSYour Name int ce_get_index_info_legacy(struct hif_softc *scn, void *ce_state,
1564*5113495bSYour Name 			     struct ce_index *info)
1565*5113495bSYour Name {
1566*5113495bSYour Name 	struct CE_state *state = (struct CE_state *)ce_state;
1567*5113495bSYour Name 
1568*5113495bSYour Name 	info->id = state->id;
1569*5113495bSYour Name 	if (state->src_ring) {
1570*5113495bSYour Name 		info->u.legacy_info.sw_index = state->src_ring->sw_index;
1571*5113495bSYour Name 		info->u.legacy_info.write_index = state->src_ring->write_index;
1572*5113495bSYour Name 	} else if (state->dest_ring) {
1573*5113495bSYour Name 		info->u.legacy_info.sw_index = state->dest_ring->sw_index;
1574*5113495bSYour Name 		info->u.legacy_info.write_index = state->dest_ring->write_index;
1575*5113495bSYour Name 	}
1576*5113495bSYour Name 
1577*5113495bSYour Name 	return 0;
1578*5113495bSYour Name }
1579*5113495bSYour Name #endif
1580*5113495bSYour Name 
1581*5113495bSYour Name #ifdef CONFIG_SHADOW_V3
ce_prepare_shadow_register_v3_cfg_legacy(struct hif_softc * scn,struct pld_shadow_reg_v3_cfg ** shadow_config,int * num_shadow_registers_configured)1582*5113495bSYour Name static void ce_prepare_shadow_register_v3_cfg_legacy(struct hif_softc *scn,
1583*5113495bSYour Name 				struct pld_shadow_reg_v3_cfg **shadow_config,
1584*5113495bSYour Name 				int *num_shadow_registers_configured)
1585*5113495bSYour Name {
1586*5113495bSYour Name 	hif_get_shadow_reg_config_v3(scn, shadow_config,
1587*5113495bSYour Name 				     num_shadow_registers_configured);
1588*5113495bSYour Name 
1589*5113495bSYour Name 	if (*num_shadow_registers_configured != 0) {
1590*5113495bSYour Name 		hif_err("shadow register configuration already constructed");
1591*5113495bSYour Name 		return;
1592*5113495bSYour Name 	}
1593*5113495bSYour Name 
1594*5113495bSYour Name 	hif_preare_shadow_register_cfg_v3(scn);
1595*5113495bSYour Name 	hif_get_shadow_reg_config_v3(scn, shadow_config,
1596*5113495bSYour Name 				     num_shadow_registers_configured);
1597*5113495bSYour Name }
1598*5113495bSYour Name #endif
1599*5113495bSYour Name 
1600*5113495bSYour Name struct ce_ops ce_service_legacy = {
1601*5113495bSYour Name 	.ce_get_desc_size = ce_get_desc_size_legacy,
1602*5113495bSYour Name 	.ce_ring_setup = ce_ring_setup_legacy,
1603*5113495bSYour Name 	.ce_sendlist_send = ce_sendlist_send_legacy,
1604*5113495bSYour Name 	.ce_completed_recv_next_nolock = ce_completed_recv_next_nolock_legacy,
1605*5113495bSYour Name 	.ce_revoke_recv_next = ce_revoke_recv_next_legacy,
1606*5113495bSYour Name 	.ce_cancel_send_next = ce_cancel_send_next_legacy,
1607*5113495bSYour Name 	.ce_recv_buf_enqueue = ce_recv_buf_enqueue_legacy,
1608*5113495bSYour Name 	.ce_per_engine_handler_adjust = ce_per_engine_handler_adjust_legacy,
1609*5113495bSYour Name 	.ce_send_nolock = ce_send_nolock_legacy,
1610*5113495bSYour Name 	.watermark_int = ce_check_int_watermark,
1611*5113495bSYour Name 	.ce_completed_send_next_nolock = ce_completed_send_next_nolock_legacy,
1612*5113495bSYour Name 	.ce_recv_entries_done_nolock = ce_recv_entries_done_nolock_legacy,
1613*5113495bSYour Name 	.ce_send_entries_done_nolock = ce_send_entries_done_nolock_legacy,
1614*5113495bSYour Name 	.ce_prepare_shadow_register_v2_cfg =
1615*5113495bSYour Name 		ce_prepare_shadow_register_v2_cfg_legacy,
1616*5113495bSYour Name #ifdef HIF_CE_LOG_INFO
1617*5113495bSYour Name 	.ce_get_index_info =
1618*5113495bSYour Name 		ce_get_index_info_legacy,
1619*5113495bSYour Name #endif
1620*5113495bSYour Name #ifdef CONFIG_SHADOW_V3
1621*5113495bSYour Name 	.ce_prepare_shadow_register_v3_cfg =
1622*5113495bSYour Name 		ce_prepare_shadow_register_v3_cfg_legacy,
1623*5113495bSYour Name #endif
1624*5113495bSYour Name };
1625*5113495bSYour Name 
ce_services_legacy(void)1626*5113495bSYour Name struct ce_ops *ce_services_legacy(void)
1627*5113495bSYour Name {
1628*5113495bSYour Name 	return &ce_service_legacy;
1629*5113495bSYour Name }
1630*5113495bSYour Name 
1631*5113495bSYour Name qdf_export_symbol(ce_services_legacy);
1632*5113495bSYour Name 
ce_service_legacy_init(void)1633*5113495bSYour Name void ce_service_legacy_init(void)
1634*5113495bSYour Name {
1635*5113495bSYour Name 	ce_service_register_module(CE_SVC_LEGACY, &ce_services_legacy);
1636*5113495bSYour Name }
1637