xref: /wlan-driver/qca-wifi-host-cmn/hif/src/ce/ce_service_srng.c (revision 5113495b16420b49004c444715d2daae2066e7dc)
1*5113495bSYour Name /*
2*5113495bSYour Name  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3*5113495bSYour Name  * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
4*5113495bSYour Name  *
5*5113495bSYour Name  * Permission to use, copy, modify, and/or distribute this software for
6*5113495bSYour Name  * any purpose with or without fee is hereby granted, provided that the
7*5113495bSYour Name  * above copyright notice and this permission notice appear in all
8*5113495bSYour Name  * copies.
9*5113495bSYour Name  *
10*5113495bSYour Name  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11*5113495bSYour Name  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12*5113495bSYour Name  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13*5113495bSYour Name  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14*5113495bSYour Name  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15*5113495bSYour Name  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16*5113495bSYour Name  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17*5113495bSYour Name  * PERFORMANCE OF THIS SOFTWARE.
18*5113495bSYour Name  */
19*5113495bSYour Name #include "hif_io32.h"
20*5113495bSYour Name #include "reg_struct.h"
21*5113495bSYour Name #include "ce_api.h"
22*5113495bSYour Name #include "ce_main.h"
23*5113495bSYour Name #include "ce_internal.h"
24*5113495bSYour Name #include "ce_reg.h"
25*5113495bSYour Name #include "qdf_lock.h"
26*5113495bSYour Name #include "regtable.h"
27*5113495bSYour Name #include "hif_main.h"
28*5113495bSYour Name #include "hif_debug.h"
29*5113495bSYour Name #include "hal_api.h"
30*5113495bSYour Name #include "pld_common.h"
31*5113495bSYour Name #include "qdf_module.h"
32*5113495bSYour Name #include "hif.h"
33*5113495bSYour Name 
34*5113495bSYour Name /*
35*5113495bSYour Name  * Support for Copy Engine hardware, which is mainly used for
36*5113495bSYour Name  * communication between Host and Target over a PCIe interconnect.
37*5113495bSYour Name  */
38*5113495bSYour Name 
39*5113495bSYour Name /*
40*5113495bSYour Name  * A single CopyEngine (CE) comprises two "rings":
41*5113495bSYour Name  *   a source ring
42*5113495bSYour Name  *   a destination ring
43*5113495bSYour Name  *
44*5113495bSYour Name  * Each ring consists of a number of descriptors which specify
45*5113495bSYour Name  * an address, length, and meta-data.
46*5113495bSYour Name  *
47*5113495bSYour Name  * Typically, one side of the PCIe interconnect (Host or Target)
48*5113495bSYour Name  * controls one ring and the other side controls the other ring.
49*5113495bSYour Name  * The source side chooses when to initiate a transfer and it
50*5113495bSYour Name  * chooses what to send (buffer address, length). The destination
51*5113495bSYour Name  * side keeps a supply of "anonymous receive buffers" available and
52*5113495bSYour Name  * it handles incoming data as it arrives (when the destination
53*5113495bSYour Name  * receives an interrupt).
54*5113495bSYour Name  *
55*5113495bSYour Name  * The sender may send a simple buffer (address/length) or it may
56*5113495bSYour Name  * send a small list of buffers.  When a small list is sent, hardware
57*5113495bSYour Name  * "gathers" these and they end up in a single destination buffer
58*5113495bSYour Name  * with a single interrupt.
59*5113495bSYour Name  *
60*5113495bSYour Name  * There are several "contexts" managed by this layer -- more, it
61*5113495bSYour Name  * may seem -- than should be needed. These are provided mainly for
62*5113495bSYour Name  * maximum flexibility and especially to facilitate a simpler HIF
63*5113495bSYour Name  * implementation. There are per-CopyEngine recv, send, and watermark
64*5113495bSYour Name  * contexts. These are supplied by the caller when a recv, send,
65*5113495bSYour Name  * or watermark handler is established and they are echoed back to
66*5113495bSYour Name  * the caller when the respective callbacks are invoked. There is
67*5113495bSYour Name  * also a per-transfer context supplied by the caller when a buffer
68*5113495bSYour Name  * (or sendlist) is sent and when a buffer is enqueued for recv.
69*5113495bSYour Name  * These per-transfer contexts are echoed back to the caller when
70*5113495bSYour Name  * the buffer is sent/received.
71*5113495bSYour Name  * Target TX harsh result toeplitz_hash_result
72*5113495bSYour Name  */
73*5113495bSYour Name 
74*5113495bSYour Name #define CE_ADDR_COPY(desc, dma_addr) do {\
75*5113495bSYour Name 		(desc)->buffer_addr_lo = (uint32_t)((dma_addr) &\
76*5113495bSYour Name 							  0xFFFFFFFF);\
77*5113495bSYour Name 		(desc)->buffer_addr_hi =\
78*5113495bSYour Name 			(uint32_t)(((dma_addr) >> 32) & 0xFF);\
79*5113495bSYour Name 	} while (0)
80*5113495bSYour Name 
hif_display_ctrl_traffic_pipes_state(struct hif_opaque_softc * hif_ctx)81*5113495bSYour Name void hif_display_ctrl_traffic_pipes_state(struct hif_opaque_softc *hif_ctx)
82*5113495bSYour Name {
83*5113495bSYour Name 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
84*5113495bSYour Name 	struct CE_state *CE_state;
85*5113495bSYour Name 	uint32_t hp = 0, tp = 0;
86*5113495bSYour Name 
87*5113495bSYour Name 	CE_state = scn->ce_id_to_state[2];
88*5113495bSYour Name 	hal_get_sw_hptp(scn->hal_soc,
89*5113495bSYour Name 			CE_state->status_ring->srng_ctx,
90*5113495bSYour Name 			&tp, &hp);
91*5113495bSYour Name 	hif_info_high("CE-2 Dest status ring current snapshot HP:%u TP:%u",
92*5113495bSYour Name 		      hp, tp);
93*5113495bSYour Name 
94*5113495bSYour Name 	hp = 0;
95*5113495bSYour Name 	tp = 0;
96*5113495bSYour Name 	CE_state = scn->ce_id_to_state[3];
97*5113495bSYour Name 	hal_get_sw_hptp(scn->hal_soc, CE_state->src_ring->srng_ctx, &tp, &hp);
98*5113495bSYour Name 	hif_info_high("CE-3 Source ring current snapshot HP:%u TP:%u", hp, tp);
99*5113495bSYour Name }
100*5113495bSYour Name 
101*5113495bSYour Name #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
hif_record_ce_srng_desc_event(struct hif_softc * scn,int ce_id,enum hif_ce_event_type type,union ce_srng_desc * descriptor,void * memory,int index,int len,void * hal_ring)102*5113495bSYour Name void hif_record_ce_srng_desc_event(struct hif_softc *scn, int ce_id,
103*5113495bSYour Name 				   enum hif_ce_event_type type,
104*5113495bSYour Name 				   union ce_srng_desc *descriptor,
105*5113495bSYour Name 				   void *memory, int index,
106*5113495bSYour Name 				   int len, void *hal_ring)
107*5113495bSYour Name {
108*5113495bSYour Name 	int record_index;
109*5113495bSYour Name 	struct hif_ce_desc_event *event;
110*5113495bSYour Name 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
111*5113495bSYour Name 	struct hif_ce_desc_event *hist_ev = NULL;
112*5113495bSYour Name 
113*5113495bSYour Name 	if (ce_id < CE_COUNT_MAX)
114*5113495bSYour Name 		hist_ev = (struct hif_ce_desc_event *)ce_hist->hist_ev[ce_id];
115*5113495bSYour Name 	else
116*5113495bSYour Name 		return;
117*5113495bSYour Name 
118*5113495bSYour Name 	if (ce_id >= CE_COUNT_MAX)
119*5113495bSYour Name 		return;
120*5113495bSYour Name 
121*5113495bSYour Name 	if (!ce_hist->enable[ce_id])
122*5113495bSYour Name 		return;
123*5113495bSYour Name 
124*5113495bSYour Name 	if (!hist_ev)
125*5113495bSYour Name 		return;
126*5113495bSYour Name 
127*5113495bSYour Name 	record_index = get_next_record_index(
128*5113495bSYour Name 			&ce_hist->history_index[ce_id], HIF_CE_HISTORY_MAX);
129*5113495bSYour Name 
130*5113495bSYour Name 	event = &hist_ev[record_index];
131*5113495bSYour Name 
132*5113495bSYour Name 	hif_clear_ce_desc_debug_data(event);
133*5113495bSYour Name 
134*5113495bSYour Name 	event->type = type;
135*5113495bSYour Name 	event->time = qdf_get_log_timestamp();
136*5113495bSYour Name 	event->cpu_id = qdf_get_cpu();
137*5113495bSYour Name 
138*5113495bSYour Name 	if (descriptor)
139*5113495bSYour Name 		qdf_mem_copy(&event->descriptor, descriptor,
140*5113495bSYour Name 			     hal_get_entrysize_from_srng(hal_ring));
141*5113495bSYour Name 
142*5113495bSYour Name 	if (hal_ring)
143*5113495bSYour Name 		hal_get_sw_hptp(scn->hal_soc, hal_ring, &event->current_tp,
144*5113495bSYour Name 				&event->current_hp);
145*5113495bSYour Name 
146*5113495bSYour Name 	event->memory = memory;
147*5113495bSYour Name 	event->index = index;
148*5113495bSYour Name 
149*5113495bSYour Name 	if (event->type == HIF_CE_SRC_RING_BUFFER_POST)
150*5113495bSYour Name 		hif_ce_desc_record_rx_paddr(scn, event, memory);
151*5113495bSYour Name 
152*5113495bSYour Name 	if (ce_hist->data_enable[ce_id])
153*5113495bSYour Name 		hif_ce_desc_data_record(event, len);
154*5113495bSYour Name 
155*5113495bSYour Name 	hif_record_latest_evt(ce_hist, type, ce_id, event->time,
156*5113495bSYour Name 			      event->current_hp, event->current_tp);
157*5113495bSYour Name }
158*5113495bSYour Name #endif /* HIF_CONFIG_SLUB_DEBUG_ON || HIF_CE_DEBUG_DATA_BUF */
159*5113495bSYour Name 
160*5113495bSYour Name static QDF_STATUS
ce_send_nolock_srng(struct CE_handle * copyeng,void * per_transfer_context,qdf_dma_addr_t buffer,uint32_t nbytes,uint32_t transfer_id,uint32_t flags,uint32_t user_flags)161*5113495bSYour Name ce_send_nolock_srng(struct CE_handle *copyeng,
162*5113495bSYour Name 			   void *per_transfer_context,
163*5113495bSYour Name 			   qdf_dma_addr_t buffer,
164*5113495bSYour Name 			   uint32_t nbytes,
165*5113495bSYour Name 			   uint32_t transfer_id,
166*5113495bSYour Name 			   uint32_t flags,
167*5113495bSYour Name 			   uint32_t user_flags)
168*5113495bSYour Name {
169*5113495bSYour Name 	QDF_STATUS status;
170*5113495bSYour Name 	struct CE_state *CE_state = (struct CE_state *)copyeng;
171*5113495bSYour Name 	struct CE_ring_state *src_ring = CE_state->src_ring;
172*5113495bSYour Name 	unsigned int nentries_mask = src_ring->nentries_mask;
173*5113495bSYour Name 	unsigned int write_index = src_ring->write_index;
174*5113495bSYour Name 	uint64_t dma_addr = buffer;
175*5113495bSYour Name 	struct hif_softc *scn = CE_state->scn;
176*5113495bSYour Name 
177*5113495bSYour Name 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
178*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
179*5113495bSYour Name 	if (unlikely(hal_srng_src_num_avail(scn->hal_soc, src_ring->srng_ctx,
180*5113495bSYour Name 					false) <= 0)) {
181*5113495bSYour Name 		OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL);
182*5113495bSYour Name 		Q_TARGET_ACCESS_END(scn);
183*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
184*5113495bSYour Name 	}
185*5113495bSYour Name 	{
186*5113495bSYour Name 		enum hif_ce_event_type event_type = HIF_CE_SRC_RING_BUFFER_POST;
187*5113495bSYour Name 		struct ce_srng_src_desc *src_desc;
188*5113495bSYour Name 
189*5113495bSYour Name 		if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx)) {
190*5113495bSYour Name 			Q_TARGET_ACCESS_END(scn);
191*5113495bSYour Name 			return QDF_STATUS_E_FAILURE;
192*5113495bSYour Name 		}
193*5113495bSYour Name 
194*5113495bSYour Name 		src_desc = hal_srng_src_get_next_reaped(scn->hal_soc,
195*5113495bSYour Name 				src_ring->srng_ctx);
196*5113495bSYour Name 		if (!src_desc) {
197*5113495bSYour Name 			Q_TARGET_ACCESS_END(scn);
198*5113495bSYour Name 			return QDF_STATUS_E_INVAL;
199*5113495bSYour Name 		}
200*5113495bSYour Name 
201*5113495bSYour Name 		/* Update low 32 bits source descriptor address */
202*5113495bSYour Name 		src_desc->buffer_addr_lo =
203*5113495bSYour Name 			(uint32_t)(dma_addr & 0xFFFFFFFF);
204*5113495bSYour Name 		src_desc->buffer_addr_hi =
205*5113495bSYour Name 			(uint32_t)((dma_addr >> 32) & 0xFF);
206*5113495bSYour Name 
207*5113495bSYour Name 		src_desc->meta_data = transfer_id;
208*5113495bSYour Name 
209*5113495bSYour Name 		/*
210*5113495bSYour Name 		 * Set the swap bit if:
211*5113495bSYour Name 		 * typical sends on this CE are swapped (host is big-endian)
212*5113495bSYour Name 		 * and this send doesn't disable the swapping
213*5113495bSYour Name 		 * (data is not bytestream)
214*5113495bSYour Name 		 */
215*5113495bSYour Name 		src_desc->byte_swap =
216*5113495bSYour Name 			(((CE_state->attr_flags & CE_ATTR_BYTE_SWAP_DATA)
217*5113495bSYour Name 			  != 0) & ((flags & CE_SEND_FLAG_SWAP_DISABLE) == 0));
218*5113495bSYour Name 		src_desc->gather = ((flags & CE_SEND_FLAG_GATHER) != 0);
219*5113495bSYour Name 		src_desc->nbytes = nbytes;
220*5113495bSYour Name 
221*5113495bSYour Name 		src_ring->per_transfer_context[write_index] =
222*5113495bSYour Name 			per_transfer_context;
223*5113495bSYour Name 		write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
224*5113495bSYour Name 
225*5113495bSYour Name 		hal_srng_access_end(scn->hal_soc, src_ring->srng_ctx);
226*5113495bSYour Name 
227*5113495bSYour Name 		/* src_ring->write index hasn't been updated event though
228*5113495bSYour Name 		 * the register has already been written to.
229*5113495bSYour Name 		 */
230*5113495bSYour Name 		hif_record_ce_srng_desc_event(scn, CE_state->id, event_type,
231*5113495bSYour Name 					      (union ce_srng_desc *)src_desc,
232*5113495bSYour Name 					      per_transfer_context,
233*5113495bSYour Name 					      src_ring->write_index, nbytes,
234*5113495bSYour Name 					      src_ring->srng_ctx);
235*5113495bSYour Name 
236*5113495bSYour Name 		src_ring->write_index = write_index;
237*5113495bSYour Name 		status = QDF_STATUS_SUCCESS;
238*5113495bSYour Name 	}
239*5113495bSYour Name 	Q_TARGET_ACCESS_END(scn);
240*5113495bSYour Name 	return status;
241*5113495bSYour Name }
242*5113495bSYour Name 
243*5113495bSYour Name static QDF_STATUS
ce_sendlist_send_srng(struct CE_handle * copyeng,void * per_transfer_context,struct ce_sendlist * sendlist,unsigned int transfer_id)244*5113495bSYour Name ce_sendlist_send_srng(struct CE_handle *copyeng,
245*5113495bSYour Name 		 void *per_transfer_context,
246*5113495bSYour Name 		 struct ce_sendlist *sendlist, unsigned int transfer_id)
247*5113495bSYour Name {
248*5113495bSYour Name 	QDF_STATUS status = QDF_STATUS_E_NOMEM;
249*5113495bSYour Name 	struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
250*5113495bSYour Name 	struct CE_state *CE_state = (struct CE_state *)copyeng;
251*5113495bSYour Name 	struct CE_ring_state *src_ring = CE_state->src_ring;
252*5113495bSYour Name 	unsigned int num_items = sl->num_items;
253*5113495bSYour Name 	unsigned int sw_index;
254*5113495bSYour Name 	unsigned int write_index;
255*5113495bSYour Name 	struct hif_softc *scn = CE_state->scn;
256*5113495bSYour Name 
257*5113495bSYour Name 	QDF_ASSERT((num_items > 0) && (num_items < src_ring->nentries));
258*5113495bSYour Name 
259*5113495bSYour Name 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
260*5113495bSYour Name 	sw_index = src_ring->sw_index;
261*5113495bSYour Name 	write_index = src_ring->write_index;
262*5113495bSYour Name 
263*5113495bSYour Name 	if (hal_srng_src_num_avail(scn->hal_soc, src_ring->srng_ctx, false) >=
264*5113495bSYour Name 	    num_items) {
265*5113495bSYour Name 		struct ce_sendlist_item *item;
266*5113495bSYour Name 		int i;
267*5113495bSYour Name 
268*5113495bSYour Name 		/* handle all but the last item uniformly */
269*5113495bSYour Name 		for (i = 0; i < num_items - 1; i++) {
270*5113495bSYour Name 			item = &sl->item[i];
271*5113495bSYour Name 			/* TBDXXX: Support extensible sendlist_types? */
272*5113495bSYour Name 			QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
273*5113495bSYour Name 			status = ce_send_nolock_srng(copyeng,
274*5113495bSYour Name 					CE_SENDLIST_ITEM_CTXT,
275*5113495bSYour Name 				(qdf_dma_addr_t) item->data,
276*5113495bSYour Name 				item->u.nbytes, transfer_id,
277*5113495bSYour Name 				item->flags | CE_SEND_FLAG_GATHER,
278*5113495bSYour Name 				item->user_flags);
279*5113495bSYour Name 			QDF_ASSERT(status == QDF_STATUS_SUCCESS);
280*5113495bSYour Name 		}
281*5113495bSYour Name 		/* provide valid context pointer for final item */
282*5113495bSYour Name 		item = &sl->item[i];
283*5113495bSYour Name 		/* TBDXXX: Support extensible sendlist_types? */
284*5113495bSYour Name 		QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
285*5113495bSYour Name 		status = ce_send_nolock_srng(copyeng, per_transfer_context,
286*5113495bSYour Name 					(qdf_dma_addr_t) item->data,
287*5113495bSYour Name 					item->u.nbytes,
288*5113495bSYour Name 					transfer_id, item->flags,
289*5113495bSYour Name 					item->user_flags);
290*5113495bSYour Name 		QDF_ASSERT(status == QDF_STATUS_SUCCESS);
291*5113495bSYour Name 		QDF_NBUF_UPDATE_TX_PKT_COUNT((qdf_nbuf_t)per_transfer_context,
292*5113495bSYour Name 					QDF_NBUF_TX_PKT_CE);
293*5113495bSYour Name 		DPTRACE(qdf_dp_trace((qdf_nbuf_t)per_transfer_context,
294*5113495bSYour Name 			QDF_DP_TRACE_CE_PACKET_PTR_RECORD,
295*5113495bSYour Name 			QDF_TRACE_DEFAULT_PDEV_ID,
296*5113495bSYour Name 			(uint8_t *)(((qdf_nbuf_t)per_transfer_context)->data),
297*5113495bSYour Name 			sizeof(((qdf_nbuf_t)per_transfer_context)->data), QDF_TX));
298*5113495bSYour Name 	} else {
299*5113495bSYour Name 		/*
300*5113495bSYour Name 		 * Probably not worth the additional complexity to support
301*5113495bSYour Name 		 * partial sends with continuation or notification.  We expect
302*5113495bSYour Name 		 * to use large rings and small sendlists. If we can't handle
303*5113495bSYour Name 		 * the entire request at once, punt it back to the caller.
304*5113495bSYour Name 		 */
305*5113495bSYour Name 	}
306*5113495bSYour Name 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
307*5113495bSYour Name 
308*5113495bSYour Name 	return status;
309*5113495bSYour Name }
310*5113495bSYour Name 
311*5113495bSYour Name #define SLOTS_PER_DATAPATH_TX 2
312*5113495bSYour Name 
313*5113495bSYour Name #ifndef AH_NEED_TX_DATA_SWAP
314*5113495bSYour Name #define AH_NEED_TX_DATA_SWAP 0
315*5113495bSYour Name #endif
316*5113495bSYour Name /**
317*5113495bSYour Name  * ce_recv_buf_enqueue_srng() - enqueue a recv buffer into a copy engine
318*5113495bSYour Name  * @copyeng: copy engine handle
319*5113495bSYour Name  * @per_recv_context: virtual address of the nbuf
320*5113495bSYour Name  * @buffer: physical address of the nbuf
321*5113495bSYour Name  *
322*5113495bSYour Name  * Return: QDF_STATUS_SUCCESS if the buffer is enqueued
323*5113495bSYour Name  */
324*5113495bSYour Name static QDF_STATUS
ce_recv_buf_enqueue_srng(struct CE_handle * copyeng,void * per_recv_context,qdf_dma_addr_t buffer)325*5113495bSYour Name ce_recv_buf_enqueue_srng(struct CE_handle *copyeng,
326*5113495bSYour Name 		    void *per_recv_context, qdf_dma_addr_t buffer)
327*5113495bSYour Name {
328*5113495bSYour Name 	QDF_STATUS status;
329*5113495bSYour Name 	struct CE_state *CE_state = (struct CE_state *)copyeng;
330*5113495bSYour Name 	struct CE_ring_state *dest_ring = CE_state->dest_ring;
331*5113495bSYour Name 	unsigned int nentries_mask = dest_ring->nentries_mask;
332*5113495bSYour Name 	unsigned int write_index;
333*5113495bSYour Name 	unsigned int sw_index;
334*5113495bSYour Name 	uint64_t dma_addr = buffer;
335*5113495bSYour Name 	struct hif_softc *scn = CE_state->scn;
336*5113495bSYour Name 	struct ce_srng_dest_desc *dest_desc = NULL;
337*5113495bSYour Name 
338*5113495bSYour Name 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
339*5113495bSYour Name 	write_index = dest_ring->write_index;
340*5113495bSYour Name 	sw_index = dest_ring->sw_index;
341*5113495bSYour Name 
342*5113495bSYour Name 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0) {
343*5113495bSYour Name 		qdf_spin_unlock_bh(&CE_state->ce_index_lock);
344*5113495bSYour Name 		return QDF_STATUS_E_IO;
345*5113495bSYour Name 	}
346*5113495bSYour Name 
347*5113495bSYour Name 	/* HP/TP update if any should happen only once per interrupt,
348*5113495bSYour Name 	 * therefore checking for CE receive_count.
349*5113495bSYour Name 	 */
350*5113495bSYour Name 	hal_srng_check_and_update_hptp(scn->hal_soc, dest_ring->srng_ctx,
351*5113495bSYour Name 				       !CE_state->receive_count);
352*5113495bSYour Name 
353*5113495bSYour Name 	if (hal_srng_access_start(scn->hal_soc, dest_ring->srng_ctx)) {
354*5113495bSYour Name 		qdf_spin_unlock_bh(&CE_state->ce_index_lock);
355*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
356*5113495bSYour Name 	}
357*5113495bSYour Name 
358*5113495bSYour Name 	if ((hal_srng_src_num_avail(scn->hal_soc,
359*5113495bSYour Name 					dest_ring->srng_ctx, false) > 0)) {
360*5113495bSYour Name 		dest_desc = hal_srng_src_get_next(scn->hal_soc,
361*5113495bSYour Name 						  dest_ring->srng_ctx);
362*5113495bSYour Name 
363*5113495bSYour Name 		if (!dest_desc) {
364*5113495bSYour Name 			status = QDF_STATUS_E_FAILURE;
365*5113495bSYour Name 		} else {
366*5113495bSYour Name 
367*5113495bSYour Name 			CE_ADDR_COPY(dest_desc, dma_addr);
368*5113495bSYour Name 
369*5113495bSYour Name 			dest_ring->per_transfer_context[write_index] =
370*5113495bSYour Name 				per_recv_context;
371*5113495bSYour Name 
372*5113495bSYour Name 			/* Update Destination Ring Write Index */
373*5113495bSYour Name 			write_index = CE_RING_IDX_INCR(nentries_mask,
374*5113495bSYour Name 								write_index);
375*5113495bSYour Name 			status = QDF_STATUS_SUCCESS;
376*5113495bSYour Name 		}
377*5113495bSYour Name 	} else {
378*5113495bSYour Name 		dest_desc = NULL;
379*5113495bSYour Name 		status = QDF_STATUS_E_FAILURE;
380*5113495bSYour Name 	}
381*5113495bSYour Name 
382*5113495bSYour Name 	dest_ring->write_index = write_index;
383*5113495bSYour Name 	hal_srng_access_end(scn->hal_soc, dest_ring->srng_ctx);
384*5113495bSYour Name 	hif_record_ce_srng_desc_event(scn, CE_state->id,
385*5113495bSYour Name 				      HIF_CE_DEST_RING_BUFFER_POST,
386*5113495bSYour Name 				      (union ce_srng_desc *)dest_desc,
387*5113495bSYour Name 				      per_recv_context,
388*5113495bSYour Name 				      dest_ring->write_index, 0,
389*5113495bSYour Name 				      dest_ring->srng_ctx);
390*5113495bSYour Name 
391*5113495bSYour Name 	Q_TARGET_ACCESS_END(scn);
392*5113495bSYour Name 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
393*5113495bSYour Name 	return status;
394*5113495bSYour Name }
395*5113495bSYour Name 
396*5113495bSYour Name /*
397*5113495bSYour Name  * Guts of ce_recv_entries_done.
398*5113495bSYour Name  * The caller takes responsibility for any necessary locking.
399*5113495bSYour Name  */
400*5113495bSYour Name static unsigned int
ce_recv_entries_done_nolock_srng(struct hif_softc * scn,struct CE_state * CE_state)401*5113495bSYour Name ce_recv_entries_done_nolock_srng(struct hif_softc *scn,
402*5113495bSYour Name 			    struct CE_state *CE_state)
403*5113495bSYour Name {
404*5113495bSYour Name 	struct CE_ring_state *status_ring = CE_state->status_ring;
405*5113495bSYour Name 
406*5113495bSYour Name 	return hal_srng_dst_num_valid(scn->hal_soc,
407*5113495bSYour Name 				status_ring->srng_ctx, false);
408*5113495bSYour Name }
409*5113495bSYour Name 
410*5113495bSYour Name /*
411*5113495bSYour Name  * Guts of ce_send_entries_done.
412*5113495bSYour Name  * The caller takes responsibility for any necessary locking.
413*5113495bSYour Name  */
414*5113495bSYour Name static unsigned int
ce_send_entries_done_nolock_srng(struct hif_softc * scn,struct CE_state * CE_state)415*5113495bSYour Name ce_send_entries_done_nolock_srng(struct hif_softc *scn,
416*5113495bSYour Name 					struct CE_state *CE_state)
417*5113495bSYour Name {
418*5113495bSYour Name 
419*5113495bSYour Name 	struct CE_ring_state *src_ring = CE_state->src_ring;
420*5113495bSYour Name 	int count = 0;
421*5113495bSYour Name 
422*5113495bSYour Name 	if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx))
423*5113495bSYour Name 		return 0;
424*5113495bSYour Name 
425*5113495bSYour Name 	count = hal_srng_src_done_val(scn->hal_soc, src_ring->srng_ctx);
426*5113495bSYour Name 
427*5113495bSYour Name 	hal_srng_access_end_reap(scn->hal_soc, src_ring->srng_ctx);
428*5113495bSYour Name 
429*5113495bSYour Name 	return count;
430*5113495bSYour Name }
431*5113495bSYour Name 
432*5113495bSYour Name /*
433*5113495bSYour Name  * Guts of ce_completed_recv_next.
434*5113495bSYour Name  * The caller takes responsibility for any necessary locking.
435*5113495bSYour Name  */
436*5113495bSYour Name static QDF_STATUS
ce_completed_recv_next_nolock_srng(struct CE_state * CE_state,void ** per_CE_contextp,void ** per_transfer_contextp,qdf_dma_addr_t * bufferp,unsigned int * nbytesp,unsigned int * transfer_idp,unsigned int * flagsp)437*5113495bSYour Name ce_completed_recv_next_nolock_srng(struct CE_state *CE_state,
438*5113495bSYour Name 			      void **per_CE_contextp,
439*5113495bSYour Name 			      void **per_transfer_contextp,
440*5113495bSYour Name 			      qdf_dma_addr_t *bufferp,
441*5113495bSYour Name 			      unsigned int *nbytesp,
442*5113495bSYour Name 			      unsigned int *transfer_idp,
443*5113495bSYour Name 			      unsigned int *flagsp)
444*5113495bSYour Name {
445*5113495bSYour Name 	QDF_STATUS status;
446*5113495bSYour Name 	struct CE_ring_state *dest_ring = CE_state->dest_ring;
447*5113495bSYour Name 	struct CE_ring_state *status_ring = CE_state->status_ring;
448*5113495bSYour Name 	unsigned int nentries_mask = dest_ring->nentries_mask;
449*5113495bSYour Name 	unsigned int sw_index = dest_ring->sw_index;
450*5113495bSYour Name 	struct hif_softc *scn = CE_state->scn;
451*5113495bSYour Name 	struct ce_srng_dest_status_desc *dest_status = NULL;
452*5113495bSYour Name 	int nbytes;
453*5113495bSYour Name 	struct ce_srng_dest_status_desc dest_status_info;
454*5113495bSYour Name 
455*5113495bSYour Name 	/* HP/TP update if any should happen only once per interrupt,
456*5113495bSYour Name 	 * therefore checking for CE receive_count.
457*5113495bSYour Name 	 */
458*5113495bSYour Name 	hal_srng_check_and_update_hptp(scn->hal_soc, status_ring->srng_ctx,
459*5113495bSYour Name 				       !CE_state->receive_count);
460*5113495bSYour Name 
461*5113495bSYour Name 	if (hal_srng_access_start(scn->hal_soc, status_ring->srng_ctx))
462*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
463*5113495bSYour Name 
464*5113495bSYour Name 	dest_status = hal_srng_dst_peek(scn->hal_soc, status_ring->srng_ctx);
465*5113495bSYour Name 	if (!dest_status) {
466*5113495bSYour Name 		hal_srng_access_end_reap(scn->hal_soc, status_ring->srng_ctx);
467*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
468*5113495bSYour Name 	}
469*5113495bSYour Name 
470*5113495bSYour Name 	/*
471*5113495bSYour Name 	 * By copying the dest_desc_info element to local memory, we could
472*5113495bSYour Name 	 * avoid extra memory read from non-cachable memory.
473*5113495bSYour Name 	 */
474*5113495bSYour Name 	dest_status_info = *dest_status;
475*5113495bSYour Name 	nbytes = dest_status_info.nbytes;
476*5113495bSYour Name 	if (nbytes == 0) {
477*5113495bSYour Name 		uint32_t hp, tp;
478*5113495bSYour Name 
479*5113495bSYour Name 		/*
480*5113495bSYour Name 		 * This closes a relatively unusual race where the Host
481*5113495bSYour Name 		 * sees the updated DRRI before the update to the
482*5113495bSYour Name 		 * corresponding descriptor has completed. We treat this
483*5113495bSYour Name 		 * as a descriptor that is not yet done.
484*5113495bSYour Name 		 */
485*5113495bSYour Name 		hal_get_sw_hptp(scn->hal_soc, status_ring->srng_ctx,
486*5113495bSYour Name 				&tp, &hp);
487*5113495bSYour Name 		hif_info_rl("No data to reap, hp %d tp %d", hp, tp);
488*5113495bSYour Name 		status = QDF_STATUS_E_FAILURE;
489*5113495bSYour Name 		hal_srng_access_end_reap(scn->hal_soc, status_ring->srng_ctx);
490*5113495bSYour Name 		goto done;
491*5113495bSYour Name 	}
492*5113495bSYour Name 
493*5113495bSYour Name 	/*
494*5113495bSYour Name 	 * Move the tail pointer since nbytes is non-zero and
495*5113495bSYour Name 	 * this entry is processed.
496*5113495bSYour Name 	 */
497*5113495bSYour Name 	hal_srng_dst_get_next(scn->hal_soc, status_ring->srng_ctx);
498*5113495bSYour Name 
499*5113495bSYour Name 	dest_status->nbytes = 0;
500*5113495bSYour Name 
501*5113495bSYour Name 	*nbytesp = nbytes;
502*5113495bSYour Name 	*transfer_idp = dest_status_info.meta_data;
503*5113495bSYour Name 	*flagsp = (dest_status_info.byte_swap) ? CE_RECV_FLAG_SWAPPED : 0;
504*5113495bSYour Name 
505*5113495bSYour Name 	if (per_CE_contextp)
506*5113495bSYour Name 		*per_CE_contextp = CE_state->recv_context;
507*5113495bSYour Name 
508*5113495bSYour Name 	/* NOTE: sw_index is more like a read_index in this context. It has a
509*5113495bSYour Name 	 * one-to-one mapping with status ring.
510*5113495bSYour Name 	 * Get the per trasnfer context from dest_ring.
511*5113495bSYour Name 	 */
512*5113495bSYour Name 	if (per_transfer_contextp)
513*5113495bSYour Name 		*per_transfer_contextp =
514*5113495bSYour Name 			dest_ring->per_transfer_context[sw_index];
515*5113495bSYour Name 
516*5113495bSYour Name 	dest_ring->per_transfer_context[sw_index] = 0;  /* sanity */
517*5113495bSYour Name 
518*5113495bSYour Name 	/* Update sw_index */
519*5113495bSYour Name 	sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
520*5113495bSYour Name 	dest_ring->sw_index = sw_index;
521*5113495bSYour Name 	status = QDF_STATUS_SUCCESS;
522*5113495bSYour Name 
523*5113495bSYour Name 	hal_srng_access_end(scn->hal_soc, status_ring->srng_ctx);
524*5113495bSYour Name 	hif_record_ce_srng_desc_event(scn, CE_state->id,
525*5113495bSYour Name 				      HIF_CE_DEST_RING_BUFFER_REAP,
526*5113495bSYour Name 				      NULL,
527*5113495bSYour Name 				      dest_ring->
528*5113495bSYour Name 				      per_transfer_context[sw_index],
529*5113495bSYour Name 				      dest_ring->sw_index, nbytes,
530*5113495bSYour Name 				      dest_ring->srng_ctx);
531*5113495bSYour Name 
532*5113495bSYour Name done:
533*5113495bSYour Name 	hif_record_ce_srng_desc_event(scn, CE_state->id,
534*5113495bSYour Name 				      HIF_CE_DEST_STATUS_RING_REAP,
535*5113495bSYour Name 				      (union ce_srng_desc *)dest_status,
536*5113495bSYour Name 				      NULL,
537*5113495bSYour Name 				      -1, 0,
538*5113495bSYour Name 				      status_ring->srng_ctx);
539*5113495bSYour Name 
540*5113495bSYour Name 	return status;
541*5113495bSYour Name }
542*5113495bSYour Name 
543*5113495bSYour Name static QDF_STATUS
ce_revoke_recv_next_srng(struct CE_handle * copyeng,void ** per_CE_contextp,void ** per_transfer_contextp,qdf_dma_addr_t * bufferp)544*5113495bSYour Name ce_revoke_recv_next_srng(struct CE_handle *copyeng,
545*5113495bSYour Name 		    void **per_CE_contextp,
546*5113495bSYour Name 		    void **per_transfer_contextp, qdf_dma_addr_t *bufferp)
547*5113495bSYour Name {
548*5113495bSYour Name 	struct CE_state *CE_state = (struct CE_state *)copyeng;
549*5113495bSYour Name 	struct CE_ring_state *dest_ring = CE_state->dest_ring;
550*5113495bSYour Name 	unsigned int sw_index;
551*5113495bSYour Name 
552*5113495bSYour Name 	if (!dest_ring)
553*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
554*5113495bSYour Name 
555*5113495bSYour Name 	sw_index = dest_ring->sw_index;
556*5113495bSYour Name 
557*5113495bSYour Name 	if (per_CE_contextp)
558*5113495bSYour Name 		*per_CE_contextp = CE_state->recv_context;
559*5113495bSYour Name 
560*5113495bSYour Name 	/* NOTE: sw_index is more like a read_index in this context. It has a
561*5113495bSYour Name 	 * one-to-one mapping with status ring.
562*5113495bSYour Name 	 * Get the per trasnfer context from dest_ring.
563*5113495bSYour Name 	 */
564*5113495bSYour Name 	if (per_transfer_contextp)
565*5113495bSYour Name 		*per_transfer_contextp =
566*5113495bSYour Name 			dest_ring->per_transfer_context[sw_index];
567*5113495bSYour Name 
568*5113495bSYour Name 	if (!dest_ring->per_transfer_context[sw_index])
569*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
570*5113495bSYour Name 
571*5113495bSYour Name 	/* provide end condition */
572*5113495bSYour Name 	dest_ring->per_transfer_context[sw_index] = NULL;
573*5113495bSYour Name 
574*5113495bSYour Name 	/* Update sw_index */
575*5113495bSYour Name 	sw_index = CE_RING_IDX_INCR(dest_ring->nentries_mask, sw_index);
576*5113495bSYour Name 	dest_ring->sw_index = sw_index;
577*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
578*5113495bSYour Name }
579*5113495bSYour Name 
580*5113495bSYour Name /*
581*5113495bSYour Name  * Guts of ce_completed_send_next.
582*5113495bSYour Name  * The caller takes responsibility for any necessary locking.
583*5113495bSYour Name  */
584*5113495bSYour Name static QDF_STATUS
ce_completed_send_next_nolock_srng(struct CE_state * CE_state,void ** per_CE_contextp,void ** per_transfer_contextp,qdf_dma_addr_t * bufferp,unsigned int * nbytesp,unsigned int * transfer_idp,unsigned int * sw_idx,unsigned int * hw_idx,uint32_t * toeplitz_hash_result)585*5113495bSYour Name ce_completed_send_next_nolock_srng(struct CE_state *CE_state,
586*5113495bSYour Name 			      void **per_CE_contextp,
587*5113495bSYour Name 			      void **per_transfer_contextp,
588*5113495bSYour Name 			      qdf_dma_addr_t *bufferp,
589*5113495bSYour Name 			      unsigned int *nbytesp,
590*5113495bSYour Name 			      unsigned int *transfer_idp,
591*5113495bSYour Name 			      unsigned int *sw_idx,
592*5113495bSYour Name 			      unsigned int *hw_idx,
593*5113495bSYour Name 			      uint32_t *toeplitz_hash_result)
594*5113495bSYour Name {
595*5113495bSYour Name 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
596*5113495bSYour Name 	struct CE_ring_state *src_ring = CE_state->src_ring;
597*5113495bSYour Name 	unsigned int nentries_mask = src_ring->nentries_mask;
598*5113495bSYour Name 	unsigned int sw_index = src_ring->sw_index;
599*5113495bSYour Name 	unsigned int swi = src_ring->sw_index;
600*5113495bSYour Name 	struct hif_softc *scn = CE_state->scn;
601*5113495bSYour Name 	struct ce_srng_src_desc *src_desc;
602*5113495bSYour Name 
603*5113495bSYour Name 	if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx)) {
604*5113495bSYour Name 		status = QDF_STATUS_E_FAILURE;
605*5113495bSYour Name 		return status;
606*5113495bSYour Name 	}
607*5113495bSYour Name 
608*5113495bSYour Name 	src_desc = hal_srng_src_reap_next(scn->hal_soc, src_ring->srng_ctx);
609*5113495bSYour Name 	if (src_desc) {
610*5113495bSYour Name 		hif_record_ce_srng_desc_event(scn, CE_state->id,
611*5113495bSYour Name 					      HIF_TX_DESC_COMPLETION,
612*5113495bSYour Name 					      (union ce_srng_desc *)src_desc,
613*5113495bSYour Name 					      src_ring->
614*5113495bSYour Name 					      per_transfer_context[swi],
615*5113495bSYour Name 					      swi, src_desc->nbytes,
616*5113495bSYour Name 					      src_ring->srng_ctx);
617*5113495bSYour Name 
618*5113495bSYour Name 		/* Return data from completed source descriptor */
619*5113495bSYour Name 		*bufferp = (qdf_dma_addr_t)
620*5113495bSYour Name 			(((uint64_t)(src_desc)->buffer_addr_lo +
621*5113495bSYour Name 			  ((uint64_t)((src_desc)->buffer_addr_hi &
622*5113495bSYour Name 				  0xFF) << 32)));
623*5113495bSYour Name 		*nbytesp = src_desc->nbytes;
624*5113495bSYour Name 		*transfer_idp = src_desc->meta_data;
625*5113495bSYour Name 		*toeplitz_hash_result = 0; /*src_desc->toeplitz_hash_result;*/
626*5113495bSYour Name 
627*5113495bSYour Name 		if (per_CE_contextp)
628*5113495bSYour Name 			*per_CE_contextp = CE_state->send_context;
629*5113495bSYour Name 
630*5113495bSYour Name 		/* sw_index is used more like read index */
631*5113495bSYour Name 		if (per_transfer_contextp)
632*5113495bSYour Name 			*per_transfer_contextp =
633*5113495bSYour Name 				src_ring->per_transfer_context[sw_index];
634*5113495bSYour Name 
635*5113495bSYour Name 		src_ring->per_transfer_context[sw_index] = 0;   /* sanity */
636*5113495bSYour Name 
637*5113495bSYour Name 		/* Update sw_index */
638*5113495bSYour Name 		sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
639*5113495bSYour Name 		src_ring->sw_index = sw_index;
640*5113495bSYour Name 		status = QDF_STATUS_SUCCESS;
641*5113495bSYour Name 	}
642*5113495bSYour Name 	hal_srng_access_end_reap(scn->hal_soc, src_ring->srng_ctx);
643*5113495bSYour Name 
644*5113495bSYour Name 	return status;
645*5113495bSYour Name }
646*5113495bSYour Name 
647*5113495bSYour Name /* NB: Modelled after ce_completed_send_next */
648*5113495bSYour Name static QDF_STATUS
ce_cancel_send_next_srng(struct CE_handle * copyeng,void ** per_CE_contextp,void ** per_transfer_contextp,qdf_dma_addr_t * bufferp,unsigned int * nbytesp,unsigned int * transfer_idp,uint32_t * toeplitz_hash_result)649*5113495bSYour Name ce_cancel_send_next_srng(struct CE_handle *copyeng,
650*5113495bSYour Name 		void **per_CE_contextp,
651*5113495bSYour Name 		void **per_transfer_contextp,
652*5113495bSYour Name 		qdf_dma_addr_t *bufferp,
653*5113495bSYour Name 		unsigned int *nbytesp,
654*5113495bSYour Name 		unsigned int *transfer_idp,
655*5113495bSYour Name 		uint32_t *toeplitz_hash_result)
656*5113495bSYour Name {
657*5113495bSYour Name 	struct CE_state *CE_state;
658*5113495bSYour Name 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
659*5113495bSYour Name 	struct CE_ring_state *src_ring;
660*5113495bSYour Name 	unsigned int nentries_mask;
661*5113495bSYour Name 	unsigned int sw_index;
662*5113495bSYour Name 	struct hif_softc *scn;
663*5113495bSYour Name 	struct ce_srng_src_desc *src_desc;
664*5113495bSYour Name 
665*5113495bSYour Name 	CE_state = (struct CE_state *)copyeng;
666*5113495bSYour Name 	src_ring = CE_state->src_ring;
667*5113495bSYour Name 	if (!src_ring)
668*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
669*5113495bSYour Name 
670*5113495bSYour Name 	nentries_mask = src_ring->nentries_mask;
671*5113495bSYour Name 	sw_index = src_ring->sw_index;
672*5113495bSYour Name 	scn = CE_state->scn;
673*5113495bSYour Name 
674*5113495bSYour Name 	if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx)) {
675*5113495bSYour Name 		status = QDF_STATUS_E_FAILURE;
676*5113495bSYour Name 		return status;
677*5113495bSYour Name 	}
678*5113495bSYour Name 
679*5113495bSYour Name 	src_desc = hal_srng_src_pending_reap_next(scn->hal_soc,
680*5113495bSYour Name 			src_ring->srng_ctx);
681*5113495bSYour Name 	if (src_desc) {
682*5113495bSYour Name 		/* Return data from completed source descriptor */
683*5113495bSYour Name 		*bufferp = (qdf_dma_addr_t)
684*5113495bSYour Name 			(((uint64_t)(src_desc)->buffer_addr_lo +
685*5113495bSYour Name 			  ((uint64_t)((src_desc)->buffer_addr_hi &
686*5113495bSYour Name 				  0xFF) << 32)));
687*5113495bSYour Name 		*nbytesp = src_desc->nbytes;
688*5113495bSYour Name 		*transfer_idp = src_desc->meta_data;
689*5113495bSYour Name 		*toeplitz_hash_result = 0; /*src_desc->toeplitz_hash_result;*/
690*5113495bSYour Name 
691*5113495bSYour Name 		if (per_CE_contextp)
692*5113495bSYour Name 			*per_CE_contextp = CE_state->send_context;
693*5113495bSYour Name 
694*5113495bSYour Name 		/* sw_index is used more like read index */
695*5113495bSYour Name 		if (per_transfer_contextp)
696*5113495bSYour Name 			*per_transfer_contextp =
697*5113495bSYour Name 				src_ring->per_transfer_context[sw_index];
698*5113495bSYour Name 
699*5113495bSYour Name 		src_ring->per_transfer_context[sw_index] = 0;   /* sanity */
700*5113495bSYour Name 
701*5113495bSYour Name 		/* Update sw_index */
702*5113495bSYour Name 		sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
703*5113495bSYour Name 		src_ring->sw_index = sw_index;
704*5113495bSYour Name 		status = QDF_STATUS_SUCCESS;
705*5113495bSYour Name 	}
706*5113495bSYour Name 	hal_srng_access_end_reap(scn->hal_soc, src_ring->srng_ctx);
707*5113495bSYour Name 
708*5113495bSYour Name 	return status;
709*5113495bSYour Name }
710*5113495bSYour Name 
711*5113495bSYour Name /*
712*5113495bSYour Name  * Adjust interrupts for the copy complete handler.
713*5113495bSYour Name  * If it's needed for either send or recv, then unmask
714*5113495bSYour Name  * this interrupt; otherwise, mask it.
715*5113495bSYour Name  *
716*5113495bSYour Name  * Called with target_lock held.
717*5113495bSYour Name  */
718*5113495bSYour Name static void
ce_per_engine_handler_adjust_srng(struct CE_state * CE_state,int disable_copy_compl_intr)719*5113495bSYour Name ce_per_engine_handler_adjust_srng(struct CE_state *CE_state,
720*5113495bSYour Name 			     int disable_copy_compl_intr)
721*5113495bSYour Name {
722*5113495bSYour Name }
723*5113495bSYour Name 
ce_check_int_watermark_srng(struct CE_state * CE_state,unsigned int * flags)724*5113495bSYour Name static bool ce_check_int_watermark_srng(struct CE_state *CE_state,
725*5113495bSYour Name 					unsigned int *flags)
726*5113495bSYour Name {
727*5113495bSYour Name 	/*TODO*/
728*5113495bSYour Name 	return false;
729*5113495bSYour Name }
730*5113495bSYour Name 
ce_get_desc_size_srng(uint8_t ring_type)731*5113495bSYour Name static uint32_t ce_get_desc_size_srng(uint8_t ring_type)
732*5113495bSYour Name {
733*5113495bSYour Name 	switch (ring_type) {
734*5113495bSYour Name 	case CE_RING_SRC:
735*5113495bSYour Name 		return sizeof(struct ce_srng_src_desc);
736*5113495bSYour Name 	case CE_RING_DEST:
737*5113495bSYour Name 		return sizeof(struct ce_srng_dest_desc);
738*5113495bSYour Name 	case CE_RING_STATUS:
739*5113495bSYour Name 		return sizeof(struct ce_srng_dest_status_desc);
740*5113495bSYour Name 	default:
741*5113495bSYour Name 		return 0;
742*5113495bSYour Name 	}
743*5113495bSYour Name 	return 0;
744*5113495bSYour Name }
745*5113495bSYour Name 
ce_srng_msi_ring_params_setup(struct hif_softc * scn,uint32_t ce_id,struct hal_srng_params * ring_params)746*5113495bSYour Name static void ce_srng_msi_ring_params_setup(struct hif_softc *scn, uint32_t ce_id,
747*5113495bSYour Name 			      struct hal_srng_params *ring_params)
748*5113495bSYour Name {
749*5113495bSYour Name 	uint32_t addr_low;
750*5113495bSYour Name 	uint32_t addr_high;
751*5113495bSYour Name 	uint32_t msi_data_start;
752*5113495bSYour Name 	uint32_t msi_data_count;
753*5113495bSYour Name 	uint32_t msi_irq_start;
754*5113495bSYour Name 	int ret;
755*5113495bSYour Name 	int irq_id;
756*5113495bSYour Name 
757*5113495bSYour Name 	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
758*5113495bSYour Name 					  &msi_data_count, &msi_data_start,
759*5113495bSYour Name 					  &msi_irq_start);
760*5113495bSYour Name 
761*5113495bSYour Name 	/* msi config not found */
762*5113495bSYour Name 	if (ret)
763*5113495bSYour Name 		return;
764*5113495bSYour Name 
765*5113495bSYour Name 	irq_id = scn->int_assignment->msi_idx[ce_id];
766*5113495bSYour Name 	pld_get_msi_address(scn->qdf_dev->dev, &addr_low, &addr_high);
767*5113495bSYour Name 
768*5113495bSYour Name 	ring_params->msi_addr = addr_low;
769*5113495bSYour Name 	ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
770*5113495bSYour Name 	ring_params->msi_data =  irq_id + msi_data_start;
771*5113495bSYour Name 	ring_params->flags |= HAL_SRNG_MSI_INTR;
772*5113495bSYour Name 
773*5113495bSYour Name 	hif_debug("ce_id %d irq_id %d, msi_addr %pK, msi_data %d", ce_id,
774*5113495bSYour Name 		  irq_id, (void *)ring_params->msi_addr, ring_params->msi_data);
775*5113495bSYour Name }
776*5113495bSYour Name 
ce_srng_src_ring_setup(struct hif_softc * scn,uint32_t ce_id,struct CE_ring_state * src_ring,struct CE_attr * attr)777*5113495bSYour Name static void ce_srng_src_ring_setup(struct hif_softc *scn, uint32_t ce_id,
778*5113495bSYour Name 				   struct CE_ring_state *src_ring,
779*5113495bSYour Name 				   struct CE_attr *attr)
780*5113495bSYour Name {
781*5113495bSYour Name 	struct hal_srng_params ring_params = {0};
782*5113495bSYour Name 
783*5113495bSYour Name 	hif_debug("%s: ce_id %d", __func__, ce_id);
784*5113495bSYour Name 
785*5113495bSYour Name 	ring_params.ring_base_paddr = src_ring->base_addr_CE_space;
786*5113495bSYour Name 	ring_params.ring_base_vaddr = src_ring->base_addr_owner_space;
787*5113495bSYour Name 	ring_params.num_entries = src_ring->nentries;
788*5113495bSYour Name 	/*
789*5113495bSYour Name 	 * The minimum increment for the timer is 8us
790*5113495bSYour Name 	 * A default value of 0 disables the timer
791*5113495bSYour Name 	 * A valid default value caused continuous interrupts to
792*5113495bSYour Name 	 * fire with MSI enabled. Need to revisit usage of the timer
793*5113495bSYour Name 	 */
794*5113495bSYour Name 
795*5113495bSYour Name 	if (!(CE_ATTR_DISABLE_INTR & attr->flags)) {
796*5113495bSYour Name 		ce_srng_msi_ring_params_setup(scn, ce_id, &ring_params);
797*5113495bSYour Name 
798*5113495bSYour Name 		ring_params.intr_timer_thres_us = 0;
799*5113495bSYour Name 		ring_params.intr_batch_cntr_thres_entries = 1;
800*5113495bSYour Name 		ring_params.prefetch_timer = HAL_SRNG_PREFETCH_TIMER;
801*5113495bSYour Name 	}
802*5113495bSYour Name 
803*5113495bSYour Name 	src_ring->srng_ctx = hal_srng_setup(scn->hal_soc, CE_SRC, ce_id, 0,
804*5113495bSYour Name 					    &ring_params, 0);
805*5113495bSYour Name }
806*5113495bSYour Name 
807*5113495bSYour Name #ifdef WLAN_WAR_CE_DISABLE_SRNG_TIMER_IRQ
808*5113495bSYour Name static void
ce_srng_initialize_dest_ring_thresh(struct CE_ring_state * dest_ring,struct hal_srng_params * ring_params)809*5113495bSYour Name ce_srng_initialize_dest_ring_thresh(struct CE_ring_state *dest_ring,
810*5113495bSYour Name 				    struct hal_srng_params *ring_params)
811*5113495bSYour Name {
812*5113495bSYour Name 	ring_params->low_threshold = dest_ring->nentries >> 3;
813*5113495bSYour Name 	ring_params->intr_timer_thres_us = 0;
814*5113495bSYour Name 	ring_params->intr_batch_cntr_thres_entries = 1;
815*5113495bSYour Name 	ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
816*5113495bSYour Name }
817*5113495bSYour Name #else
818*5113495bSYour Name static void
ce_srng_initialize_dest_ring_thresh(struct CE_ring_state * dest_ring,struct hal_srng_params * ring_params)819*5113495bSYour Name ce_srng_initialize_dest_ring_thresh(struct CE_ring_state *dest_ring,
820*5113495bSYour Name 				    struct hal_srng_params *ring_params)
821*5113495bSYour Name {
822*5113495bSYour Name 	ring_params->low_threshold = dest_ring->nentries >> 3;
823*5113495bSYour Name 	ring_params->intr_timer_thres_us = 100000;
824*5113495bSYour Name 	ring_params->intr_batch_cntr_thres_entries = 0;
825*5113495bSYour Name 	ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
826*5113495bSYour Name }
827*5113495bSYour Name #endif
828*5113495bSYour Name 
829*5113495bSYour Name #ifdef WLAN_DISABLE_STATUS_RING_TIMER_WAR
ce_is_status_ring_timer_thresh_war_needed(void)830*5113495bSYour Name static inline bool ce_is_status_ring_timer_thresh_war_needed(void)
831*5113495bSYour Name {
832*5113495bSYour Name 	return false;
833*5113495bSYour Name }
834*5113495bSYour Name #else
ce_is_status_ring_timer_thresh_war_needed(void)835*5113495bSYour Name static inline bool ce_is_status_ring_timer_thresh_war_needed(void)
836*5113495bSYour Name {
837*5113495bSYour Name 	return true;
838*5113495bSYour Name }
839*5113495bSYour Name #endif
840*5113495bSYour Name 
841*5113495bSYour Name /**
842*5113495bSYour Name  * ce_srng_initialize_dest_timer_interrupt_war() - war initialization
843*5113495bSYour Name  * @dest_ring: ring being initialized
844*5113495bSYour Name  * @ring_params: pointer to initialized parameters
845*5113495bSYour Name  *
846*5113495bSYour Name  * For Napier & Hawkeye v1, the status ring timer interrupts do not work
847*5113495bSYour Name  * As a workaround host configures the destination rings to be a proxy for
848*5113495bSYour Name  * work needing to be done.
849*5113495bSYour Name  *
850*5113495bSYour Name  * The interrupts are setup such that if the destination ring is less than fully
851*5113495bSYour Name  * posted, there is likely undone work for the status ring that the host should
852*5113495bSYour Name  * process.
853*5113495bSYour Name  *
854*5113495bSYour Name  * There is a timing bug in srng based copy engines such that a fully posted
855*5113495bSYour Name  * srng based copy engine has 2 empty entries instead of just one.  The copy
856*5113495bSYour Name  * engine data structures work with 1 empty entry, but the software frequently
857*5113495bSYour Name  * fails to post the last entry due to the race condition.
858*5113495bSYour Name  */
ce_srng_initialize_dest_timer_interrupt_war(struct CE_ring_state * dest_ring,struct hal_srng_params * ring_params)859*5113495bSYour Name static void ce_srng_initialize_dest_timer_interrupt_war(
860*5113495bSYour Name 					struct CE_ring_state *dest_ring,
861*5113495bSYour Name 					struct hal_srng_params *ring_params)
862*5113495bSYour Name {
863*5113495bSYour Name 	int num_buffers_when_fully_posted = dest_ring->nentries - 2;
864*5113495bSYour Name 
865*5113495bSYour Name 	ring_params->low_threshold = num_buffers_when_fully_posted - 1;
866*5113495bSYour Name 	ring_params->intr_timer_thres_us = 1024;
867*5113495bSYour Name 	ring_params->intr_batch_cntr_thres_entries = 0;
868*5113495bSYour Name 	ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
869*5113495bSYour Name }
870*5113495bSYour Name 
ce_srng_dest_ring_setup(struct hif_softc * scn,uint32_t ce_id,struct CE_ring_state * dest_ring,struct CE_attr * attr)871*5113495bSYour Name static void ce_srng_dest_ring_setup(struct hif_softc *scn,
872*5113495bSYour Name 				    uint32_t ce_id,
873*5113495bSYour Name 				    struct CE_ring_state *dest_ring,
874*5113495bSYour Name 				    struct CE_attr *attr)
875*5113495bSYour Name {
876*5113495bSYour Name 	struct hal_srng_params ring_params = {0};
877*5113495bSYour Name 
878*5113495bSYour Name 	hif_debug("ce_id: %d", ce_id);
879*5113495bSYour Name 
880*5113495bSYour Name 	ring_params.ring_base_paddr = dest_ring->base_addr_CE_space;
881*5113495bSYour Name 	ring_params.ring_base_vaddr = dest_ring->base_addr_owner_space;
882*5113495bSYour Name 	ring_params.num_entries = dest_ring->nentries;
883*5113495bSYour Name 	ring_params.max_buffer_length = attr->src_sz_max;
884*5113495bSYour Name 
885*5113495bSYour Name 	if (!(CE_ATTR_DISABLE_INTR & attr->flags)) {
886*5113495bSYour Name 		ce_srng_msi_ring_params_setup(scn, ce_id, &ring_params);
887*5113495bSYour Name 		if (ce_is_status_ring_timer_thresh_war_needed()) {
888*5113495bSYour Name 			ce_srng_initialize_dest_timer_interrupt_war(
889*5113495bSYour Name 					dest_ring, &ring_params);
890*5113495bSYour Name 		} else {
891*5113495bSYour Name 			/* normal behavior for future chips */
892*5113495bSYour Name 			ce_srng_initialize_dest_ring_thresh(dest_ring,
893*5113495bSYour Name 							    &ring_params);
894*5113495bSYour Name 		}
895*5113495bSYour Name 		ring_params.prefetch_timer = HAL_SRNG_PREFETCH_TIMER;
896*5113495bSYour Name 	}
897*5113495bSYour Name 
898*5113495bSYour Name 	/*Dest ring is also source ring*/
899*5113495bSYour Name 	dest_ring->srng_ctx = hal_srng_setup(scn->hal_soc, CE_DST, ce_id, 0,
900*5113495bSYour Name 					     &ring_params, 0);
901*5113495bSYour Name }
902*5113495bSYour Name 
903*5113495bSYour Name #ifdef WLAN_CE_INTERRUPT_THRESHOLD_CONFIG
904*5113495bSYour Name /**
905*5113495bSYour Name  * ce_status_ring_config_int_threshold() - configure ce status ring interrupt
906*5113495bSYour Name  *                                         thresholds
907*5113495bSYour Name  * @scn: hif handle
908*5113495bSYour Name  * @ring_params: ce srng params
909*5113495bSYour Name  *
910*5113495bSYour Name  * Return: None
911*5113495bSYour Name  */
912*5113495bSYour Name static inline
ce_status_ring_config_int_threshold(struct hif_softc * scn,struct hal_srng_params * ring_params)913*5113495bSYour Name void ce_status_ring_config_int_threshold(struct hif_softc *scn,
914*5113495bSYour Name 					 struct hal_srng_params *ring_params)
915*5113495bSYour Name {
916*5113495bSYour Name 	ring_params->intr_timer_thres_us =
917*5113495bSYour Name 			scn->ini_cfg.ce_status_ring_timer_threshold;
918*5113495bSYour Name 	ring_params->intr_batch_cntr_thres_entries =
919*5113495bSYour Name 			scn->ini_cfg.ce_status_ring_batch_count_threshold;
920*5113495bSYour Name }
921*5113495bSYour Name #else
922*5113495bSYour Name static inline
ce_status_ring_config_int_threshold(struct hif_softc * scn,struct hal_srng_params * ring_params)923*5113495bSYour Name void ce_status_ring_config_int_threshold(struct hif_softc *scn,
924*5113495bSYour Name 					 struct hal_srng_params *ring_params)
925*5113495bSYour Name {
926*5113495bSYour Name 	ring_params->intr_timer_thres_us = 0x1000;
927*5113495bSYour Name 	ring_params->intr_batch_cntr_thres_entries = 0x1;
928*5113495bSYour Name }
929*5113495bSYour Name #endif /* WLAN_CE_INTERRUPT_THRESHOLD_CONFIG */
930*5113495bSYour Name 
ce_srng_status_ring_setup(struct hif_softc * scn,uint32_t ce_id,struct CE_ring_state * status_ring,struct CE_attr * attr)931*5113495bSYour Name static void ce_srng_status_ring_setup(struct hif_softc *scn, uint32_t ce_id,
932*5113495bSYour Name 				struct CE_ring_state *status_ring,
933*5113495bSYour Name 				struct CE_attr *attr)
934*5113495bSYour Name {
935*5113495bSYour Name 	struct hal_srng_params ring_params = {0};
936*5113495bSYour Name 
937*5113495bSYour Name 	hif_debug("ce_id: %d", ce_id);
938*5113495bSYour Name 
939*5113495bSYour Name 	ring_params.ring_base_paddr = status_ring->base_addr_CE_space;
940*5113495bSYour Name 	ring_params.ring_base_vaddr = status_ring->base_addr_owner_space;
941*5113495bSYour Name 	ring_params.num_entries = status_ring->nentries;
942*5113495bSYour Name 
943*5113495bSYour Name 	if (!(CE_ATTR_DISABLE_INTR & attr->flags)) {
944*5113495bSYour Name 		ce_srng_msi_ring_params_setup(scn, ce_id, &ring_params);
945*5113495bSYour Name 		ce_status_ring_config_int_threshold(scn, &ring_params);
946*5113495bSYour Name 	}
947*5113495bSYour Name 
948*5113495bSYour Name 	status_ring->srng_ctx = hal_srng_setup(scn->hal_soc, CE_DST_STATUS,
949*5113495bSYour Name 					       ce_id, 0, &ring_params, 0);
950*5113495bSYour Name }
951*5113495bSYour Name 
ce_ring_setup_srng(struct hif_softc * scn,uint8_t ring_type,uint32_t ce_id,struct CE_ring_state * ring,struct CE_attr * attr)952*5113495bSYour Name static int ce_ring_setup_srng(struct hif_softc *scn, uint8_t ring_type,
953*5113495bSYour Name 		uint32_t ce_id, struct CE_ring_state *ring,
954*5113495bSYour Name 		struct CE_attr *attr)
955*5113495bSYour Name {
956*5113495bSYour Name 	switch (ring_type) {
957*5113495bSYour Name 	case CE_RING_SRC:
958*5113495bSYour Name 		ce_srng_src_ring_setup(scn, ce_id, ring, attr);
959*5113495bSYour Name 		break;
960*5113495bSYour Name 	case CE_RING_DEST:
961*5113495bSYour Name 		ce_srng_dest_ring_setup(scn, ce_id, ring, attr);
962*5113495bSYour Name 		break;
963*5113495bSYour Name 	case CE_RING_STATUS:
964*5113495bSYour Name 		ce_srng_status_ring_setup(scn, ce_id, ring, attr);
965*5113495bSYour Name 		break;
966*5113495bSYour Name 	default:
967*5113495bSYour Name 		qdf_assert(0);
968*5113495bSYour Name 		break;
969*5113495bSYour Name 	}
970*5113495bSYour Name 
971*5113495bSYour Name 	return 0;
972*5113495bSYour Name }
973*5113495bSYour Name 
ce_ring_cleanup_srng(struct hif_softc * scn,struct CE_state * CE_state,uint8_t ring_type)974*5113495bSYour Name static void ce_ring_cleanup_srng(struct hif_softc *scn,
975*5113495bSYour Name 				 struct CE_state *CE_state,
976*5113495bSYour Name 				 uint8_t ring_type)
977*5113495bSYour Name {
978*5113495bSYour Name 	hal_ring_handle_t hal_srng = NULL;
979*5113495bSYour Name 
980*5113495bSYour Name 	switch (ring_type) {
981*5113495bSYour Name 	case CE_RING_SRC:
982*5113495bSYour Name 		hal_srng = (hal_ring_handle_t)CE_state->src_ring->srng_ctx;
983*5113495bSYour Name 	break;
984*5113495bSYour Name 	case CE_RING_DEST:
985*5113495bSYour Name 		hal_srng = (hal_ring_handle_t)CE_state->dest_ring->srng_ctx;
986*5113495bSYour Name 	break;
987*5113495bSYour Name 	case CE_RING_STATUS:
988*5113495bSYour Name 		hal_srng = (hal_ring_handle_t)CE_state->status_ring->srng_ctx;
989*5113495bSYour Name 	break;
990*5113495bSYour Name 	}
991*5113495bSYour Name 
992*5113495bSYour Name 	if (hal_srng)
993*5113495bSYour Name 		hal_srng_cleanup(scn->hal_soc, hal_srng, 0);
994*5113495bSYour Name }
995*5113495bSYour Name 
ce_construct_shadow_config_srng(struct hif_softc * scn)996*5113495bSYour Name static void ce_construct_shadow_config_srng(struct hif_softc *scn)
997*5113495bSYour Name {
998*5113495bSYour Name 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
999*5113495bSYour Name 	int ce_id;
1000*5113495bSYour Name 
1001*5113495bSYour Name 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
1002*5113495bSYour Name 		if (hif_state->host_ce_config[ce_id].src_nentries)
1003*5113495bSYour Name 			hal_set_one_shadow_config(scn->hal_soc,
1004*5113495bSYour Name 						  CE_SRC, ce_id);
1005*5113495bSYour Name 
1006*5113495bSYour Name 		if (hif_state->host_ce_config[ce_id].dest_nentries) {
1007*5113495bSYour Name 			hal_set_one_shadow_config(scn->hal_soc,
1008*5113495bSYour Name 						  CE_DST, ce_id);
1009*5113495bSYour Name 
1010*5113495bSYour Name 			hal_set_one_shadow_config(scn->hal_soc,
1011*5113495bSYour Name 						  CE_DST_STATUS, ce_id);
1012*5113495bSYour Name 		}
1013*5113495bSYour Name 	}
1014*5113495bSYour Name }
1015*5113495bSYour Name 
ce_prepare_shadow_register_v2_cfg_srng(struct hif_softc * scn,struct pld_shadow_reg_v2_cfg ** shadow_config,int * num_shadow_registers_configured)1016*5113495bSYour Name static void ce_prepare_shadow_register_v2_cfg_srng(struct hif_softc *scn,
1017*5113495bSYour Name 		struct pld_shadow_reg_v2_cfg **shadow_config,
1018*5113495bSYour Name 		int *num_shadow_registers_configured)
1019*5113495bSYour Name {
1020*5113495bSYour Name 	if (!scn->hal_soc) {
1021*5113495bSYour Name 		hif_err("hal not initialized: not initializing shadow config");
1022*5113495bSYour Name 		return;
1023*5113495bSYour Name 	}
1024*5113495bSYour Name 
1025*5113495bSYour Name 	hal_get_shadow_config(scn->hal_soc, shadow_config,
1026*5113495bSYour Name 			      num_shadow_registers_configured);
1027*5113495bSYour Name 
1028*5113495bSYour Name 	if (*num_shadow_registers_configured != 0) {
1029*5113495bSYour Name 		hif_err("hal shadow register configuration already constructed");
1030*5113495bSYour Name 
1031*5113495bSYour Name 		/* return with original configuration*/
1032*5113495bSYour Name 		return;
1033*5113495bSYour Name 	}
1034*5113495bSYour Name 	hal_construct_srng_shadow_regs(scn->hal_soc);
1035*5113495bSYour Name 	ce_construct_shadow_config_srng(scn);
1036*5113495bSYour Name 	hal_set_shadow_regs(scn->hal_soc);
1037*5113495bSYour Name 	hal_construct_shadow_regs(scn->hal_soc);
1038*5113495bSYour Name 	/* get updated configuration */
1039*5113495bSYour Name 	hal_get_shadow_config(scn->hal_soc, shadow_config,
1040*5113495bSYour Name 			      num_shadow_registers_configured);
1041*5113495bSYour Name }
1042*5113495bSYour Name 
1043*5113495bSYour Name #ifdef CONFIG_SHADOW_V3
ce_prepare_shadow_register_v3_cfg_srng(struct hif_softc * scn,struct pld_shadow_reg_v3_cfg ** shadow_config,int * num_shadow_registers_configured)1044*5113495bSYour Name static void ce_prepare_shadow_register_v3_cfg_srng(struct hif_softc *scn,
1045*5113495bSYour Name 		struct pld_shadow_reg_v3_cfg **shadow_config,
1046*5113495bSYour Name 		int *num_shadow_registers_configured)
1047*5113495bSYour Name {
1048*5113495bSYour Name 	if (!scn->hal_soc) {
1049*5113495bSYour Name 		hif_err("hal not initialized: not initializing shadow config");
1050*5113495bSYour Name 		return;
1051*5113495bSYour Name 	}
1052*5113495bSYour Name 
1053*5113495bSYour Name 	hal_get_shadow_v3_config(scn->hal_soc, shadow_config,
1054*5113495bSYour Name 				 num_shadow_registers_configured);
1055*5113495bSYour Name 
1056*5113495bSYour Name 	if (*num_shadow_registers_configured != 0) {
1057*5113495bSYour Name 		hif_err("hal shadow register configuration already constructed");
1058*5113495bSYour Name 
1059*5113495bSYour Name 		/* return with original configuration*/
1060*5113495bSYour Name 		return;
1061*5113495bSYour Name 	}
1062*5113495bSYour Name 	hal_construct_srng_shadow_regs(scn->hal_soc);
1063*5113495bSYour Name 	ce_construct_shadow_config_srng(scn);
1064*5113495bSYour Name 	hal_set_shadow_regs(scn->hal_soc);
1065*5113495bSYour Name 	hal_construct_shadow_regs(scn->hal_soc);
1066*5113495bSYour Name 	/* get updated configuration */
1067*5113495bSYour Name 	hal_get_shadow_v3_config(scn->hal_soc, shadow_config,
1068*5113495bSYour Name 				 num_shadow_registers_configured);
1069*5113495bSYour Name }
1070*5113495bSYour Name #endif
1071*5113495bSYour Name 
1072*5113495bSYour Name #ifdef HIF_CE_LOG_INFO
1073*5113495bSYour Name /**
1074*5113495bSYour Name  * ce_get_index_info_srng(): Get CE index info
1075*5113495bSYour Name  * @scn: HIF Context
1076*5113495bSYour Name  * @ce_state: CE opaque handle
1077*5113495bSYour Name  * @info: CE info
1078*5113495bSYour Name  *
1079*5113495bSYour Name  * Return: 0 for success and non zero for failure
1080*5113495bSYour Name  */
1081*5113495bSYour Name static
ce_get_index_info_srng(struct hif_softc * scn,void * ce_state,struct ce_index * info)1082*5113495bSYour Name int ce_get_index_info_srng(struct hif_softc *scn, void *ce_state,
1083*5113495bSYour Name 			   struct ce_index *info)
1084*5113495bSYour Name {
1085*5113495bSYour Name 	struct CE_state *CE_state = (struct CE_state *)ce_state;
1086*5113495bSYour Name 	uint32_t tp, hp;
1087*5113495bSYour Name 
1088*5113495bSYour Name 	info->id = CE_state->id;
1089*5113495bSYour Name 	if (CE_state->src_ring) {
1090*5113495bSYour Name 		hal_get_sw_hptp(scn->hal_soc, CE_state->src_ring->srng_ctx,
1091*5113495bSYour Name 				&tp, &hp);
1092*5113495bSYour Name 		info->u.srng_info.tp = tp;
1093*5113495bSYour Name 		info->u.srng_info.hp = hp;
1094*5113495bSYour Name 	} else if (CE_state->dest_ring && CE_state->status_ring) {
1095*5113495bSYour Name 		hal_get_sw_hptp(scn->hal_soc, CE_state->status_ring->srng_ctx,
1096*5113495bSYour Name 				&tp, &hp);
1097*5113495bSYour Name 		info->u.srng_info.status_tp = tp;
1098*5113495bSYour Name 		info->u.srng_info.status_hp = hp;
1099*5113495bSYour Name 		hal_get_sw_hptp(scn->hal_soc, CE_state->dest_ring->srng_ctx,
1100*5113495bSYour Name 				&tp, &hp);
1101*5113495bSYour Name 		info->u.srng_info.tp = tp;
1102*5113495bSYour Name 		info->u.srng_info.hp = hp;
1103*5113495bSYour Name 	}
1104*5113495bSYour Name 
1105*5113495bSYour Name 	return 0;
1106*5113495bSYour Name }
1107*5113495bSYour Name #endif
1108*5113495bSYour Name 
1109*5113495bSYour Name #ifdef FEATURE_DIRECT_LINK
1110*5113495bSYour Name /**
1111*5113495bSYour Name  * ce_set_srng_msi_irq_config_by_ceid(): Set srng MSI irq configuration for CE
1112*5113495bSYour Name  *  given by id
1113*5113495bSYour Name  * @scn: HIF Context
1114*5113495bSYour Name  * @ce_id:
1115*5113495bSYour Name  * @addr:
1116*5113495bSYour Name  * @data:
1117*5113495bSYour Name  *
1118*5113495bSYour Name  * Return: 0 for success and non zero for failure
1119*5113495bSYour Name  */
1120*5113495bSYour Name static QDF_STATUS
ce_set_srng_msi_irq_config_by_ceid(struct hif_softc * scn,uint8_t ce_id,uint64_t addr,uint32_t data)1121*5113495bSYour Name ce_set_srng_msi_irq_config_by_ceid(struct hif_softc *scn, uint8_t ce_id,
1122*5113495bSYour Name 				   uint64_t addr, uint32_t data)
1123*5113495bSYour Name {
1124*5113495bSYour Name 	struct CE_state *ce_state;
1125*5113495bSYour Name 	hal_ring_handle_t ring_hdl;
1126*5113495bSYour Name 	struct hal_srng_params ring_params = {0};
1127*5113495bSYour Name 
1128*5113495bSYour Name 	ce_state = scn->ce_id_to_state[ce_id];
1129*5113495bSYour Name 	if (!ce_state)
1130*5113495bSYour Name 		return QDF_STATUS_E_NOSUPPORT;
1131*5113495bSYour Name 
1132*5113495bSYour Name 	ring_params.msi_addr = addr;
1133*5113495bSYour Name 	ring_params.msi_data = data;
1134*5113495bSYour Name 
1135*5113495bSYour Name 	if (ce_state->src_ring) {
1136*5113495bSYour Name 		ring_hdl = ce_state->src_ring->srng_ctx;
1137*5113495bSYour Name 
1138*5113495bSYour Name 		ring_params.intr_timer_thres_us = 0;
1139*5113495bSYour Name 		ring_params.intr_batch_cntr_thres_entries = 1;
1140*5113495bSYour Name 		ring_params.prefetch_timer = HAL_SRNG_PREFETCH_TIMER;
1141*5113495bSYour Name 	} else if (ce_state->dest_ring) {
1142*5113495bSYour Name 		ring_hdl = ce_state->status_ring->srng_ctx;
1143*5113495bSYour Name 
1144*5113495bSYour Name 		ce_status_ring_config_int_threshold(scn, &ring_params);
1145*5113495bSYour Name 
1146*5113495bSYour Name 		hal_srng_set_msi_irq_config(scn->hal_soc, ring_hdl,
1147*5113495bSYour Name 					    &ring_params);
1148*5113495bSYour Name 
1149*5113495bSYour Name 		if (ce_is_status_ring_timer_thresh_war_needed()) {
1150*5113495bSYour Name 			ce_srng_initialize_dest_timer_interrupt_war(
1151*5113495bSYour Name 					ce_state->dest_ring, &ring_params);
1152*5113495bSYour Name 		} else {
1153*5113495bSYour Name 			ce_srng_initialize_dest_ring_thresh(ce_state->dest_ring,
1154*5113495bSYour Name 							    &ring_params);
1155*5113495bSYour Name 		}
1156*5113495bSYour Name 		ring_params.prefetch_timer = HAL_SRNG_PREFETCH_TIMER;
1157*5113495bSYour Name 		ring_hdl = ce_state->dest_ring->srng_ctx;
1158*5113495bSYour Name 	} else {
1159*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
1160*5113495bSYour Name 	}
1161*5113495bSYour Name 
1162*5113495bSYour Name 	hal_srng_set_msi_irq_config(scn->hal_soc, ring_hdl, &ring_params);
1163*5113495bSYour Name 
1164*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
1165*5113495bSYour Name }
1166*5113495bSYour Name 
1167*5113495bSYour Name static
ce_get_direct_link_dest_srng_buffers(struct hif_softc * scn,uint64_t ** dma_addr,uint32_t * buf_size)1168*5113495bSYour Name uint16_t ce_get_direct_link_dest_srng_buffers(struct hif_softc *scn,
1169*5113495bSYour Name 					      uint64_t **dma_addr,
1170*5113495bSYour Name 					      uint32_t *buf_size)
1171*5113495bSYour Name {
1172*5113495bSYour Name 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1173*5113495bSYour Name 	struct CE_state *ce_state;
1174*5113495bSYour Name 	struct service_to_pipe *tgt_svc_cfg;
1175*5113495bSYour Name 	uint64_t *dma_addr_arr = NULL;
1176*5113495bSYour Name 	uint32_t i;
1177*5113495bSYour Name 	uint32_t j = 0;
1178*5113495bSYour Name 
1179*5113495bSYour Name 	tgt_svc_cfg = hif_state->tgt_svc_map;
1180*5113495bSYour Name 
1181*5113495bSYour Name 	for (i = 0; i < hif_state->sz_tgt_svc_map; i++) {
1182*5113495bSYour Name 		if (tgt_svc_cfg[i].service_id != LPASS_DATA_MSG_SVC ||
1183*5113495bSYour Name 		    tgt_svc_cfg[i].pipedir != PIPEDIR_IN)
1184*5113495bSYour Name 			continue;
1185*5113495bSYour Name 
1186*5113495bSYour Name 		ce_state = scn->ce_id_to_state[tgt_svc_cfg[i].pipenum];
1187*5113495bSYour Name 		if (!ce_state || !ce_state->dest_ring) {
1188*5113495bSYour Name 			hif_err("Direct Link CE pipe %d not initialized",
1189*5113495bSYour Name 				tgt_svc_cfg[i].pipenum);
1190*5113495bSYour Name 			return QDF_STATUS_E_FAILURE;
1191*5113495bSYour Name 		}
1192*5113495bSYour Name 
1193*5113495bSYour Name 		QDF_ASSERT(scn->dl_recv_pages.dma_pages);
1194*5113495bSYour Name 
1195*5113495bSYour Name 		dma_addr_arr = qdf_mem_malloc(sizeof(*dma_addr_arr) *
1196*5113495bSYour Name 					      scn->dl_recv_pages.num_pages);
1197*5113495bSYour Name 		if (!dma_addr_arr)
1198*5113495bSYour Name 			return 0;
1199*5113495bSYour Name 
1200*5113495bSYour Name 		for (j = 0; j < scn->dl_recv_pages.num_pages; j++)
1201*5113495bSYour Name 			dma_addr_arr[j] =
1202*5113495bSYour Name 				scn->dl_recv_pages.dma_pages[j].page_p_addr;
1203*5113495bSYour Name 
1204*5113495bSYour Name 		*buf_size = ce_state->src_sz_max;
1205*5113495bSYour Name 
1206*5113495bSYour Name 		break;
1207*5113495bSYour Name 	}
1208*5113495bSYour Name 
1209*5113495bSYour Name 	*dma_addr = dma_addr_arr;
1210*5113495bSYour Name 
1211*5113495bSYour Name 	return j;
1212*5113495bSYour Name }
1213*5113495bSYour Name 
1214*5113495bSYour Name /**
1215*5113495bSYour Name  * ce_save_srng_info() - Get and save srng information
1216*5113495bSYour Name  * @hif_ctx: hif context
1217*5113495bSYour Name  * @srng_info: Direct Link CE srng information
1218*5113495bSYour Name  * @srng_ctx: Direct Link CE srng context
1219*5113495bSYour Name  *
1220*5113495bSYour Name  * Return: QDF status
1221*5113495bSYour Name  */
1222*5113495bSYour Name static void
ce_save_srng_info(struct hif_softc * hif_ctx,struct hif_ce_ring_info * srng_info,void * srng_ctx)1223*5113495bSYour Name ce_save_srng_info(struct hif_softc *hif_ctx, struct hif_ce_ring_info *srng_info,
1224*5113495bSYour Name 		  void *srng_ctx)
1225*5113495bSYour Name {
1226*5113495bSYour Name 	struct hal_srng_params params;
1227*5113495bSYour Name 
1228*5113495bSYour Name 	hal_get_srng_params(hif_ctx->hal_soc, srng_ctx, &params);
1229*5113495bSYour Name 
1230*5113495bSYour Name 	srng_info->ring_id = params.ring_id;
1231*5113495bSYour Name 	srng_info->ring_dir = params.ring_dir;
1232*5113495bSYour Name 	srng_info->num_entries = params.num_entries;
1233*5113495bSYour Name 	srng_info->entry_size = params.entry_size;
1234*5113495bSYour Name 	srng_info->ring_base_paddr = params.ring_base_paddr;
1235*5113495bSYour Name 	srng_info->hp_paddr =
1236*5113495bSYour Name 		      hal_srng_get_hp_addr(hif_ctx->hal_soc, srng_ctx);
1237*5113495bSYour Name 	srng_info->tp_paddr =
1238*5113495bSYour Name 		      hal_srng_get_tp_addr(hif_ctx->hal_soc, srng_ctx);
1239*5113495bSYour Name }
1240*5113495bSYour Name 
1241*5113495bSYour Name static
ce_get_direct_link_srng_info(struct hif_softc * scn,struct hif_direct_link_ce_info * info,uint8_t max_ce_info_len)1242*5113495bSYour Name QDF_STATUS ce_get_direct_link_srng_info(struct hif_softc *scn,
1243*5113495bSYour Name 					struct hif_direct_link_ce_info *info,
1244*5113495bSYour Name 					uint8_t max_ce_info_len)
1245*5113495bSYour Name {
1246*5113495bSYour Name 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1247*5113495bSYour Name 	struct CE_state *ce_state;
1248*5113495bSYour Name 	struct service_to_pipe *tgt_svc_cfg;
1249*5113495bSYour Name 	uint8_t ce_info_idx = 0;
1250*5113495bSYour Name 	uint32_t i;
1251*5113495bSYour Name 
1252*5113495bSYour Name 	tgt_svc_cfg = hif_state->tgt_svc_map;
1253*5113495bSYour Name 
1254*5113495bSYour Name 	for (i = 0; i < hif_state->sz_tgt_svc_map; i++) {
1255*5113495bSYour Name 		if (tgt_svc_cfg[i].service_id != LPASS_DATA_MSG_SVC)
1256*5113495bSYour Name 			continue;
1257*5113495bSYour Name 
1258*5113495bSYour Name 		ce_state = scn->ce_id_to_state[tgt_svc_cfg[i].pipenum];
1259*5113495bSYour Name 		if (!ce_state) {
1260*5113495bSYour Name 			hif_err("Direct Link CE pipe %d not initialized",
1261*5113495bSYour Name 				tgt_svc_cfg[i].pipenum);
1262*5113495bSYour Name 			return QDF_STATUS_E_FAILURE;
1263*5113495bSYour Name 		}
1264*5113495bSYour Name 
1265*5113495bSYour Name 		if (ce_info_idx > max_ce_info_len)
1266*5113495bSYour Name 			return QDF_STATUS_E_FAILURE;
1267*5113495bSYour Name 
1268*5113495bSYour Name 		info[ce_info_idx].ce_id = ce_state->id;
1269*5113495bSYour Name 		info[ce_info_idx].pipe_dir = tgt_svc_cfg[i].pipedir;
1270*5113495bSYour Name 
1271*5113495bSYour Name 		if (ce_state->src_ring)
1272*5113495bSYour Name 			ce_save_srng_info(scn, &info[ce_info_idx].ring_info,
1273*5113495bSYour Name 					  ce_state->src_ring->srng_ctx);
1274*5113495bSYour Name 		else
1275*5113495bSYour Name 			ce_save_srng_info(scn, &info[ce_info_idx].ring_info,
1276*5113495bSYour Name 					  ce_state->dest_ring->srng_ctx);
1277*5113495bSYour Name 
1278*5113495bSYour Name 		ce_info_idx++;
1279*5113495bSYour Name 
1280*5113495bSYour Name 		if (!ce_state->status_ring)
1281*5113495bSYour Name 			continue;
1282*5113495bSYour Name 
1283*5113495bSYour Name 		if (ce_info_idx > max_ce_info_len)
1284*5113495bSYour Name 			return QDF_STATUS_E_FAILURE;
1285*5113495bSYour Name 
1286*5113495bSYour Name 		info[ce_info_idx].ce_id = ce_state->id;
1287*5113495bSYour Name 		info[ce_info_idx].pipe_dir = tgt_svc_cfg[i].pipedir;
1288*5113495bSYour Name 
1289*5113495bSYour Name 		ce_save_srng_info(scn, &info[ce_info_idx].ring_info,
1290*5113495bSYour Name 				  ce_state->status_ring->srng_ctx);
1291*5113495bSYour Name 		ce_info_idx++;
1292*5113495bSYour Name 	}
1293*5113495bSYour Name 
1294*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
1295*5113495bSYour Name }
1296*5113495bSYour Name #endif
1297*5113495bSYour Name 
1298*5113495bSYour Name static struct ce_ops ce_service_srng = {
1299*5113495bSYour Name 	.ce_get_desc_size = ce_get_desc_size_srng,
1300*5113495bSYour Name 	.ce_ring_setup = ce_ring_setup_srng,
1301*5113495bSYour Name 	.ce_srng_cleanup = ce_ring_cleanup_srng,
1302*5113495bSYour Name 	.ce_sendlist_send = ce_sendlist_send_srng,
1303*5113495bSYour Name 	.ce_completed_recv_next_nolock = ce_completed_recv_next_nolock_srng,
1304*5113495bSYour Name 	.ce_revoke_recv_next = ce_revoke_recv_next_srng,
1305*5113495bSYour Name 	.ce_cancel_send_next = ce_cancel_send_next_srng,
1306*5113495bSYour Name 	.ce_recv_buf_enqueue = ce_recv_buf_enqueue_srng,
1307*5113495bSYour Name 	.ce_per_engine_handler_adjust = ce_per_engine_handler_adjust_srng,
1308*5113495bSYour Name 	.ce_send_nolock = ce_send_nolock_srng,
1309*5113495bSYour Name 	.watermark_int = ce_check_int_watermark_srng,
1310*5113495bSYour Name 	.ce_completed_send_next_nolock = ce_completed_send_next_nolock_srng,
1311*5113495bSYour Name 	.ce_recv_entries_done_nolock = ce_recv_entries_done_nolock_srng,
1312*5113495bSYour Name 	.ce_send_entries_done_nolock = ce_send_entries_done_nolock_srng,
1313*5113495bSYour Name 	.ce_prepare_shadow_register_v2_cfg =
1314*5113495bSYour Name 		ce_prepare_shadow_register_v2_cfg_srng,
1315*5113495bSYour Name #ifdef CONFIG_SHADOW_V3
1316*5113495bSYour Name 	.ce_prepare_shadow_register_v3_cfg =
1317*5113495bSYour Name 		ce_prepare_shadow_register_v3_cfg_srng,
1318*5113495bSYour Name #endif
1319*5113495bSYour Name #ifdef HIF_CE_LOG_INFO
1320*5113495bSYour Name 	.ce_get_index_info =
1321*5113495bSYour Name 		ce_get_index_info_srng,
1322*5113495bSYour Name #endif
1323*5113495bSYour Name #ifdef FEATURE_DIRECT_LINK
1324*5113495bSYour Name 	.ce_set_irq_config_by_ceid = ce_set_srng_msi_irq_config_by_ceid,
1325*5113495bSYour Name 	.ce_get_direct_link_dest_buffers = ce_get_direct_link_dest_srng_buffers,
1326*5113495bSYour Name 	.ce_get_direct_link_ring_info = ce_get_direct_link_srng_info,
1327*5113495bSYour Name #endif
1328*5113495bSYour Name };
1329*5113495bSYour Name 
ce_services_srng(void)1330*5113495bSYour Name struct ce_ops *ce_services_srng(void)
1331*5113495bSYour Name {
1332*5113495bSYour Name 	return &ce_service_srng;
1333*5113495bSYour Name }
1334*5113495bSYour Name qdf_export_symbol(ce_services_srng);
1335*5113495bSYour Name 
ce_service_srng_init(void)1336*5113495bSYour Name void ce_service_srng_init(void)
1337*5113495bSYour Name {
1338*5113495bSYour Name 	ce_service_register_module(CE_SVC_SRNG, &ce_services_srng);
1339*5113495bSYour Name }
1340