xref: /wlan-driver/qca-wifi-host-cmn/hif/src/ce/ce_service_srng.c (revision 5113495b16420b49004c444715d2daae2066e7dc)
1 /*
2  * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 #include "hif_io32.h"
20 #include "reg_struct.h"
21 #include "ce_api.h"
22 #include "ce_main.h"
23 #include "ce_internal.h"
24 #include "ce_reg.h"
25 #include "qdf_lock.h"
26 #include "regtable.h"
27 #include "hif_main.h"
28 #include "hif_debug.h"
29 #include "hal_api.h"
30 #include "pld_common.h"
31 #include "qdf_module.h"
32 #include "hif.h"
33 
34 /*
35  * Support for Copy Engine hardware, which is mainly used for
36  * communication between Host and Target over a PCIe interconnect.
37  */
38 
39 /*
40  * A single CopyEngine (CE) comprises two "rings":
41  *   a source ring
42  *   a destination ring
43  *
44  * Each ring consists of a number of descriptors which specify
45  * an address, length, and meta-data.
46  *
47  * Typically, one side of the PCIe interconnect (Host or Target)
48  * controls one ring and the other side controls the other ring.
49  * The source side chooses when to initiate a transfer and it
50  * chooses what to send (buffer address, length). The destination
51  * side keeps a supply of "anonymous receive buffers" available and
52  * it handles incoming data as it arrives (when the destination
53  * receives an interrupt).
54  *
55  * The sender may send a simple buffer (address/length) or it may
56  * send a small list of buffers.  When a small list is sent, hardware
57  * "gathers" these and they end up in a single destination buffer
58  * with a single interrupt.
59  *
60  * There are several "contexts" managed by this layer -- more, it
61  * may seem -- than should be needed. These are provided mainly for
62  * maximum flexibility and especially to facilitate a simpler HIF
63  * implementation. There are per-CopyEngine recv, send, and watermark
64  * contexts. These are supplied by the caller when a recv, send,
65  * or watermark handler is established and they are echoed back to
66  * the caller when the respective callbacks are invoked. There is
67  * also a per-transfer context supplied by the caller when a buffer
68  * (or sendlist) is sent and when a buffer is enqueued for recv.
69  * These per-transfer contexts are echoed back to the caller when
70  * the buffer is sent/received.
71  * Target TX harsh result toeplitz_hash_result
72  */
73 
74 #define CE_ADDR_COPY(desc, dma_addr) do {\
75 		(desc)->buffer_addr_lo = (uint32_t)((dma_addr) &\
76 							  0xFFFFFFFF);\
77 		(desc)->buffer_addr_hi =\
78 			(uint32_t)(((dma_addr) >> 32) & 0xFF);\
79 	} while (0)
80 
hif_display_ctrl_traffic_pipes_state(struct hif_opaque_softc * hif_ctx)81 void hif_display_ctrl_traffic_pipes_state(struct hif_opaque_softc *hif_ctx)
82 {
83 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
84 	struct CE_state *CE_state;
85 	uint32_t hp = 0, tp = 0;
86 
87 	CE_state = scn->ce_id_to_state[2];
88 	hal_get_sw_hptp(scn->hal_soc,
89 			CE_state->status_ring->srng_ctx,
90 			&tp, &hp);
91 	hif_info_high("CE-2 Dest status ring current snapshot HP:%u TP:%u",
92 		      hp, tp);
93 
94 	hp = 0;
95 	tp = 0;
96 	CE_state = scn->ce_id_to_state[3];
97 	hal_get_sw_hptp(scn->hal_soc, CE_state->src_ring->srng_ctx, &tp, &hp);
98 	hif_info_high("CE-3 Source ring current snapshot HP:%u TP:%u", hp, tp);
99 }
100 
101 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
hif_record_ce_srng_desc_event(struct hif_softc * scn,int ce_id,enum hif_ce_event_type type,union ce_srng_desc * descriptor,void * memory,int index,int len,void * hal_ring)102 void hif_record_ce_srng_desc_event(struct hif_softc *scn, int ce_id,
103 				   enum hif_ce_event_type type,
104 				   union ce_srng_desc *descriptor,
105 				   void *memory, int index,
106 				   int len, void *hal_ring)
107 {
108 	int record_index;
109 	struct hif_ce_desc_event *event;
110 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
111 	struct hif_ce_desc_event *hist_ev = NULL;
112 
113 	if (ce_id < CE_COUNT_MAX)
114 		hist_ev = (struct hif_ce_desc_event *)ce_hist->hist_ev[ce_id];
115 	else
116 		return;
117 
118 	if (ce_id >= CE_COUNT_MAX)
119 		return;
120 
121 	if (!ce_hist->enable[ce_id])
122 		return;
123 
124 	if (!hist_ev)
125 		return;
126 
127 	record_index = get_next_record_index(
128 			&ce_hist->history_index[ce_id], HIF_CE_HISTORY_MAX);
129 
130 	event = &hist_ev[record_index];
131 
132 	hif_clear_ce_desc_debug_data(event);
133 
134 	event->type = type;
135 	event->time = qdf_get_log_timestamp();
136 	event->cpu_id = qdf_get_cpu();
137 
138 	if (descriptor)
139 		qdf_mem_copy(&event->descriptor, descriptor,
140 			     hal_get_entrysize_from_srng(hal_ring));
141 
142 	if (hal_ring)
143 		hal_get_sw_hptp(scn->hal_soc, hal_ring, &event->current_tp,
144 				&event->current_hp);
145 
146 	event->memory = memory;
147 	event->index = index;
148 
149 	if (event->type == HIF_CE_SRC_RING_BUFFER_POST)
150 		hif_ce_desc_record_rx_paddr(scn, event, memory);
151 
152 	if (ce_hist->data_enable[ce_id])
153 		hif_ce_desc_data_record(event, len);
154 
155 	hif_record_latest_evt(ce_hist, type, ce_id, event->time,
156 			      event->current_hp, event->current_tp);
157 }
158 #endif /* HIF_CONFIG_SLUB_DEBUG_ON || HIF_CE_DEBUG_DATA_BUF */
159 
160 static QDF_STATUS
ce_send_nolock_srng(struct CE_handle * copyeng,void * per_transfer_context,qdf_dma_addr_t buffer,uint32_t nbytes,uint32_t transfer_id,uint32_t flags,uint32_t user_flags)161 ce_send_nolock_srng(struct CE_handle *copyeng,
162 			   void *per_transfer_context,
163 			   qdf_dma_addr_t buffer,
164 			   uint32_t nbytes,
165 			   uint32_t transfer_id,
166 			   uint32_t flags,
167 			   uint32_t user_flags)
168 {
169 	QDF_STATUS status;
170 	struct CE_state *CE_state = (struct CE_state *)copyeng;
171 	struct CE_ring_state *src_ring = CE_state->src_ring;
172 	unsigned int nentries_mask = src_ring->nentries_mask;
173 	unsigned int write_index = src_ring->write_index;
174 	uint64_t dma_addr = buffer;
175 	struct hif_softc *scn = CE_state->scn;
176 
177 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
178 		return QDF_STATUS_E_FAILURE;
179 	if (unlikely(hal_srng_src_num_avail(scn->hal_soc, src_ring->srng_ctx,
180 					false) <= 0)) {
181 		OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL);
182 		Q_TARGET_ACCESS_END(scn);
183 		return QDF_STATUS_E_FAILURE;
184 	}
185 	{
186 		enum hif_ce_event_type event_type = HIF_CE_SRC_RING_BUFFER_POST;
187 		struct ce_srng_src_desc *src_desc;
188 
189 		if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx)) {
190 			Q_TARGET_ACCESS_END(scn);
191 			return QDF_STATUS_E_FAILURE;
192 		}
193 
194 		src_desc = hal_srng_src_get_next_reaped(scn->hal_soc,
195 				src_ring->srng_ctx);
196 		if (!src_desc) {
197 			Q_TARGET_ACCESS_END(scn);
198 			return QDF_STATUS_E_INVAL;
199 		}
200 
201 		/* Update low 32 bits source descriptor address */
202 		src_desc->buffer_addr_lo =
203 			(uint32_t)(dma_addr & 0xFFFFFFFF);
204 		src_desc->buffer_addr_hi =
205 			(uint32_t)((dma_addr >> 32) & 0xFF);
206 
207 		src_desc->meta_data = transfer_id;
208 
209 		/*
210 		 * Set the swap bit if:
211 		 * typical sends on this CE are swapped (host is big-endian)
212 		 * and this send doesn't disable the swapping
213 		 * (data is not bytestream)
214 		 */
215 		src_desc->byte_swap =
216 			(((CE_state->attr_flags & CE_ATTR_BYTE_SWAP_DATA)
217 			  != 0) & ((flags & CE_SEND_FLAG_SWAP_DISABLE) == 0));
218 		src_desc->gather = ((flags & CE_SEND_FLAG_GATHER) != 0);
219 		src_desc->nbytes = nbytes;
220 
221 		src_ring->per_transfer_context[write_index] =
222 			per_transfer_context;
223 		write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
224 
225 		hal_srng_access_end(scn->hal_soc, src_ring->srng_ctx);
226 
227 		/* src_ring->write index hasn't been updated event though
228 		 * the register has already been written to.
229 		 */
230 		hif_record_ce_srng_desc_event(scn, CE_state->id, event_type,
231 					      (union ce_srng_desc *)src_desc,
232 					      per_transfer_context,
233 					      src_ring->write_index, nbytes,
234 					      src_ring->srng_ctx);
235 
236 		src_ring->write_index = write_index;
237 		status = QDF_STATUS_SUCCESS;
238 	}
239 	Q_TARGET_ACCESS_END(scn);
240 	return status;
241 }
242 
243 static QDF_STATUS
ce_sendlist_send_srng(struct CE_handle * copyeng,void * per_transfer_context,struct ce_sendlist * sendlist,unsigned int transfer_id)244 ce_sendlist_send_srng(struct CE_handle *copyeng,
245 		 void *per_transfer_context,
246 		 struct ce_sendlist *sendlist, unsigned int transfer_id)
247 {
248 	QDF_STATUS status = QDF_STATUS_E_NOMEM;
249 	struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
250 	struct CE_state *CE_state = (struct CE_state *)copyeng;
251 	struct CE_ring_state *src_ring = CE_state->src_ring;
252 	unsigned int num_items = sl->num_items;
253 	unsigned int sw_index;
254 	unsigned int write_index;
255 	struct hif_softc *scn = CE_state->scn;
256 
257 	QDF_ASSERT((num_items > 0) && (num_items < src_ring->nentries));
258 
259 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
260 	sw_index = src_ring->sw_index;
261 	write_index = src_ring->write_index;
262 
263 	if (hal_srng_src_num_avail(scn->hal_soc, src_ring->srng_ctx, false) >=
264 	    num_items) {
265 		struct ce_sendlist_item *item;
266 		int i;
267 
268 		/* handle all but the last item uniformly */
269 		for (i = 0; i < num_items - 1; i++) {
270 			item = &sl->item[i];
271 			/* TBDXXX: Support extensible sendlist_types? */
272 			QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
273 			status = ce_send_nolock_srng(copyeng,
274 					CE_SENDLIST_ITEM_CTXT,
275 				(qdf_dma_addr_t) item->data,
276 				item->u.nbytes, transfer_id,
277 				item->flags | CE_SEND_FLAG_GATHER,
278 				item->user_flags);
279 			QDF_ASSERT(status == QDF_STATUS_SUCCESS);
280 		}
281 		/* provide valid context pointer for final item */
282 		item = &sl->item[i];
283 		/* TBDXXX: Support extensible sendlist_types? */
284 		QDF_ASSERT(item->send_type == CE_SIMPLE_BUFFER_TYPE);
285 		status = ce_send_nolock_srng(copyeng, per_transfer_context,
286 					(qdf_dma_addr_t) item->data,
287 					item->u.nbytes,
288 					transfer_id, item->flags,
289 					item->user_flags);
290 		QDF_ASSERT(status == QDF_STATUS_SUCCESS);
291 		QDF_NBUF_UPDATE_TX_PKT_COUNT((qdf_nbuf_t)per_transfer_context,
292 					QDF_NBUF_TX_PKT_CE);
293 		DPTRACE(qdf_dp_trace((qdf_nbuf_t)per_transfer_context,
294 			QDF_DP_TRACE_CE_PACKET_PTR_RECORD,
295 			QDF_TRACE_DEFAULT_PDEV_ID,
296 			(uint8_t *)(((qdf_nbuf_t)per_transfer_context)->data),
297 			sizeof(((qdf_nbuf_t)per_transfer_context)->data), QDF_TX));
298 	} else {
299 		/*
300 		 * Probably not worth the additional complexity to support
301 		 * partial sends with continuation or notification.  We expect
302 		 * to use large rings and small sendlists. If we can't handle
303 		 * the entire request at once, punt it back to the caller.
304 		 */
305 	}
306 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
307 
308 	return status;
309 }
310 
311 #define SLOTS_PER_DATAPATH_TX 2
312 
313 #ifndef AH_NEED_TX_DATA_SWAP
314 #define AH_NEED_TX_DATA_SWAP 0
315 #endif
316 /**
317  * ce_recv_buf_enqueue_srng() - enqueue a recv buffer into a copy engine
318  * @copyeng: copy engine handle
319  * @per_recv_context: virtual address of the nbuf
320  * @buffer: physical address of the nbuf
321  *
322  * Return: QDF_STATUS_SUCCESS if the buffer is enqueued
323  */
324 static QDF_STATUS
ce_recv_buf_enqueue_srng(struct CE_handle * copyeng,void * per_recv_context,qdf_dma_addr_t buffer)325 ce_recv_buf_enqueue_srng(struct CE_handle *copyeng,
326 		    void *per_recv_context, qdf_dma_addr_t buffer)
327 {
328 	QDF_STATUS status;
329 	struct CE_state *CE_state = (struct CE_state *)copyeng;
330 	struct CE_ring_state *dest_ring = CE_state->dest_ring;
331 	unsigned int nentries_mask = dest_ring->nentries_mask;
332 	unsigned int write_index;
333 	unsigned int sw_index;
334 	uint64_t dma_addr = buffer;
335 	struct hif_softc *scn = CE_state->scn;
336 	struct ce_srng_dest_desc *dest_desc = NULL;
337 
338 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
339 	write_index = dest_ring->write_index;
340 	sw_index = dest_ring->sw_index;
341 
342 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0) {
343 		qdf_spin_unlock_bh(&CE_state->ce_index_lock);
344 		return QDF_STATUS_E_IO;
345 	}
346 
347 	/* HP/TP update if any should happen only once per interrupt,
348 	 * therefore checking for CE receive_count.
349 	 */
350 	hal_srng_check_and_update_hptp(scn->hal_soc, dest_ring->srng_ctx,
351 				       !CE_state->receive_count);
352 
353 	if (hal_srng_access_start(scn->hal_soc, dest_ring->srng_ctx)) {
354 		qdf_spin_unlock_bh(&CE_state->ce_index_lock);
355 		return QDF_STATUS_E_FAILURE;
356 	}
357 
358 	if ((hal_srng_src_num_avail(scn->hal_soc,
359 					dest_ring->srng_ctx, false) > 0)) {
360 		dest_desc = hal_srng_src_get_next(scn->hal_soc,
361 						  dest_ring->srng_ctx);
362 
363 		if (!dest_desc) {
364 			status = QDF_STATUS_E_FAILURE;
365 		} else {
366 
367 			CE_ADDR_COPY(dest_desc, dma_addr);
368 
369 			dest_ring->per_transfer_context[write_index] =
370 				per_recv_context;
371 
372 			/* Update Destination Ring Write Index */
373 			write_index = CE_RING_IDX_INCR(nentries_mask,
374 								write_index);
375 			status = QDF_STATUS_SUCCESS;
376 		}
377 	} else {
378 		dest_desc = NULL;
379 		status = QDF_STATUS_E_FAILURE;
380 	}
381 
382 	dest_ring->write_index = write_index;
383 	hal_srng_access_end(scn->hal_soc, dest_ring->srng_ctx);
384 	hif_record_ce_srng_desc_event(scn, CE_state->id,
385 				      HIF_CE_DEST_RING_BUFFER_POST,
386 				      (union ce_srng_desc *)dest_desc,
387 				      per_recv_context,
388 				      dest_ring->write_index, 0,
389 				      dest_ring->srng_ctx);
390 
391 	Q_TARGET_ACCESS_END(scn);
392 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
393 	return status;
394 }
395 
396 /*
397  * Guts of ce_recv_entries_done.
398  * The caller takes responsibility for any necessary locking.
399  */
400 static unsigned int
ce_recv_entries_done_nolock_srng(struct hif_softc * scn,struct CE_state * CE_state)401 ce_recv_entries_done_nolock_srng(struct hif_softc *scn,
402 			    struct CE_state *CE_state)
403 {
404 	struct CE_ring_state *status_ring = CE_state->status_ring;
405 
406 	return hal_srng_dst_num_valid(scn->hal_soc,
407 				status_ring->srng_ctx, false);
408 }
409 
410 /*
411  * Guts of ce_send_entries_done.
412  * The caller takes responsibility for any necessary locking.
413  */
414 static unsigned int
ce_send_entries_done_nolock_srng(struct hif_softc * scn,struct CE_state * CE_state)415 ce_send_entries_done_nolock_srng(struct hif_softc *scn,
416 					struct CE_state *CE_state)
417 {
418 
419 	struct CE_ring_state *src_ring = CE_state->src_ring;
420 	int count = 0;
421 
422 	if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx))
423 		return 0;
424 
425 	count = hal_srng_src_done_val(scn->hal_soc, src_ring->srng_ctx);
426 
427 	hal_srng_access_end_reap(scn->hal_soc, src_ring->srng_ctx);
428 
429 	return count;
430 }
431 
432 /*
433  * Guts of ce_completed_recv_next.
434  * The caller takes responsibility for any necessary locking.
435  */
436 static QDF_STATUS
ce_completed_recv_next_nolock_srng(struct CE_state * CE_state,void ** per_CE_contextp,void ** per_transfer_contextp,qdf_dma_addr_t * bufferp,unsigned int * nbytesp,unsigned int * transfer_idp,unsigned int * flagsp)437 ce_completed_recv_next_nolock_srng(struct CE_state *CE_state,
438 			      void **per_CE_contextp,
439 			      void **per_transfer_contextp,
440 			      qdf_dma_addr_t *bufferp,
441 			      unsigned int *nbytesp,
442 			      unsigned int *transfer_idp,
443 			      unsigned int *flagsp)
444 {
445 	QDF_STATUS status;
446 	struct CE_ring_state *dest_ring = CE_state->dest_ring;
447 	struct CE_ring_state *status_ring = CE_state->status_ring;
448 	unsigned int nentries_mask = dest_ring->nentries_mask;
449 	unsigned int sw_index = dest_ring->sw_index;
450 	struct hif_softc *scn = CE_state->scn;
451 	struct ce_srng_dest_status_desc *dest_status = NULL;
452 	int nbytes;
453 	struct ce_srng_dest_status_desc dest_status_info;
454 
455 	/* HP/TP update if any should happen only once per interrupt,
456 	 * therefore checking for CE receive_count.
457 	 */
458 	hal_srng_check_and_update_hptp(scn->hal_soc, status_ring->srng_ctx,
459 				       !CE_state->receive_count);
460 
461 	if (hal_srng_access_start(scn->hal_soc, status_ring->srng_ctx))
462 		return QDF_STATUS_E_FAILURE;
463 
464 	dest_status = hal_srng_dst_peek(scn->hal_soc, status_ring->srng_ctx);
465 	if (!dest_status) {
466 		hal_srng_access_end_reap(scn->hal_soc, status_ring->srng_ctx);
467 		return QDF_STATUS_E_FAILURE;
468 	}
469 
470 	/*
471 	 * By copying the dest_desc_info element to local memory, we could
472 	 * avoid extra memory read from non-cachable memory.
473 	 */
474 	dest_status_info = *dest_status;
475 	nbytes = dest_status_info.nbytes;
476 	if (nbytes == 0) {
477 		uint32_t hp, tp;
478 
479 		/*
480 		 * This closes a relatively unusual race where the Host
481 		 * sees the updated DRRI before the update to the
482 		 * corresponding descriptor has completed. We treat this
483 		 * as a descriptor that is not yet done.
484 		 */
485 		hal_get_sw_hptp(scn->hal_soc, status_ring->srng_ctx,
486 				&tp, &hp);
487 		hif_info_rl("No data to reap, hp %d tp %d", hp, tp);
488 		status = QDF_STATUS_E_FAILURE;
489 		hal_srng_access_end_reap(scn->hal_soc, status_ring->srng_ctx);
490 		goto done;
491 	}
492 
493 	/*
494 	 * Move the tail pointer since nbytes is non-zero and
495 	 * this entry is processed.
496 	 */
497 	hal_srng_dst_get_next(scn->hal_soc, status_ring->srng_ctx);
498 
499 	dest_status->nbytes = 0;
500 
501 	*nbytesp = nbytes;
502 	*transfer_idp = dest_status_info.meta_data;
503 	*flagsp = (dest_status_info.byte_swap) ? CE_RECV_FLAG_SWAPPED : 0;
504 
505 	if (per_CE_contextp)
506 		*per_CE_contextp = CE_state->recv_context;
507 
508 	/* NOTE: sw_index is more like a read_index in this context. It has a
509 	 * one-to-one mapping with status ring.
510 	 * Get the per trasnfer context from dest_ring.
511 	 */
512 	if (per_transfer_contextp)
513 		*per_transfer_contextp =
514 			dest_ring->per_transfer_context[sw_index];
515 
516 	dest_ring->per_transfer_context[sw_index] = 0;  /* sanity */
517 
518 	/* Update sw_index */
519 	sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
520 	dest_ring->sw_index = sw_index;
521 	status = QDF_STATUS_SUCCESS;
522 
523 	hal_srng_access_end(scn->hal_soc, status_ring->srng_ctx);
524 	hif_record_ce_srng_desc_event(scn, CE_state->id,
525 				      HIF_CE_DEST_RING_BUFFER_REAP,
526 				      NULL,
527 				      dest_ring->
528 				      per_transfer_context[sw_index],
529 				      dest_ring->sw_index, nbytes,
530 				      dest_ring->srng_ctx);
531 
532 done:
533 	hif_record_ce_srng_desc_event(scn, CE_state->id,
534 				      HIF_CE_DEST_STATUS_RING_REAP,
535 				      (union ce_srng_desc *)dest_status,
536 				      NULL,
537 				      -1, 0,
538 				      status_ring->srng_ctx);
539 
540 	return status;
541 }
542 
543 static QDF_STATUS
ce_revoke_recv_next_srng(struct CE_handle * copyeng,void ** per_CE_contextp,void ** per_transfer_contextp,qdf_dma_addr_t * bufferp)544 ce_revoke_recv_next_srng(struct CE_handle *copyeng,
545 		    void **per_CE_contextp,
546 		    void **per_transfer_contextp, qdf_dma_addr_t *bufferp)
547 {
548 	struct CE_state *CE_state = (struct CE_state *)copyeng;
549 	struct CE_ring_state *dest_ring = CE_state->dest_ring;
550 	unsigned int sw_index;
551 
552 	if (!dest_ring)
553 		return QDF_STATUS_E_FAILURE;
554 
555 	sw_index = dest_ring->sw_index;
556 
557 	if (per_CE_contextp)
558 		*per_CE_contextp = CE_state->recv_context;
559 
560 	/* NOTE: sw_index is more like a read_index in this context. It has a
561 	 * one-to-one mapping with status ring.
562 	 * Get the per trasnfer context from dest_ring.
563 	 */
564 	if (per_transfer_contextp)
565 		*per_transfer_contextp =
566 			dest_ring->per_transfer_context[sw_index];
567 
568 	if (!dest_ring->per_transfer_context[sw_index])
569 		return QDF_STATUS_E_FAILURE;
570 
571 	/* provide end condition */
572 	dest_ring->per_transfer_context[sw_index] = NULL;
573 
574 	/* Update sw_index */
575 	sw_index = CE_RING_IDX_INCR(dest_ring->nentries_mask, sw_index);
576 	dest_ring->sw_index = sw_index;
577 	return QDF_STATUS_SUCCESS;
578 }
579 
580 /*
581  * Guts of ce_completed_send_next.
582  * The caller takes responsibility for any necessary locking.
583  */
584 static QDF_STATUS
ce_completed_send_next_nolock_srng(struct CE_state * CE_state,void ** per_CE_contextp,void ** per_transfer_contextp,qdf_dma_addr_t * bufferp,unsigned int * nbytesp,unsigned int * transfer_idp,unsigned int * sw_idx,unsigned int * hw_idx,uint32_t * toeplitz_hash_result)585 ce_completed_send_next_nolock_srng(struct CE_state *CE_state,
586 			      void **per_CE_contextp,
587 			      void **per_transfer_contextp,
588 			      qdf_dma_addr_t *bufferp,
589 			      unsigned int *nbytesp,
590 			      unsigned int *transfer_idp,
591 			      unsigned int *sw_idx,
592 			      unsigned int *hw_idx,
593 			      uint32_t *toeplitz_hash_result)
594 {
595 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
596 	struct CE_ring_state *src_ring = CE_state->src_ring;
597 	unsigned int nentries_mask = src_ring->nentries_mask;
598 	unsigned int sw_index = src_ring->sw_index;
599 	unsigned int swi = src_ring->sw_index;
600 	struct hif_softc *scn = CE_state->scn;
601 	struct ce_srng_src_desc *src_desc;
602 
603 	if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx)) {
604 		status = QDF_STATUS_E_FAILURE;
605 		return status;
606 	}
607 
608 	src_desc = hal_srng_src_reap_next(scn->hal_soc, src_ring->srng_ctx);
609 	if (src_desc) {
610 		hif_record_ce_srng_desc_event(scn, CE_state->id,
611 					      HIF_TX_DESC_COMPLETION,
612 					      (union ce_srng_desc *)src_desc,
613 					      src_ring->
614 					      per_transfer_context[swi],
615 					      swi, src_desc->nbytes,
616 					      src_ring->srng_ctx);
617 
618 		/* Return data from completed source descriptor */
619 		*bufferp = (qdf_dma_addr_t)
620 			(((uint64_t)(src_desc)->buffer_addr_lo +
621 			  ((uint64_t)((src_desc)->buffer_addr_hi &
622 				  0xFF) << 32)));
623 		*nbytesp = src_desc->nbytes;
624 		*transfer_idp = src_desc->meta_data;
625 		*toeplitz_hash_result = 0; /*src_desc->toeplitz_hash_result;*/
626 
627 		if (per_CE_contextp)
628 			*per_CE_contextp = CE_state->send_context;
629 
630 		/* sw_index is used more like read index */
631 		if (per_transfer_contextp)
632 			*per_transfer_contextp =
633 				src_ring->per_transfer_context[sw_index];
634 
635 		src_ring->per_transfer_context[sw_index] = 0;   /* sanity */
636 
637 		/* Update sw_index */
638 		sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
639 		src_ring->sw_index = sw_index;
640 		status = QDF_STATUS_SUCCESS;
641 	}
642 	hal_srng_access_end_reap(scn->hal_soc, src_ring->srng_ctx);
643 
644 	return status;
645 }
646 
647 /* NB: Modelled after ce_completed_send_next */
648 static QDF_STATUS
ce_cancel_send_next_srng(struct CE_handle * copyeng,void ** per_CE_contextp,void ** per_transfer_contextp,qdf_dma_addr_t * bufferp,unsigned int * nbytesp,unsigned int * transfer_idp,uint32_t * toeplitz_hash_result)649 ce_cancel_send_next_srng(struct CE_handle *copyeng,
650 		void **per_CE_contextp,
651 		void **per_transfer_contextp,
652 		qdf_dma_addr_t *bufferp,
653 		unsigned int *nbytesp,
654 		unsigned int *transfer_idp,
655 		uint32_t *toeplitz_hash_result)
656 {
657 	struct CE_state *CE_state;
658 	QDF_STATUS status = QDF_STATUS_E_FAILURE;
659 	struct CE_ring_state *src_ring;
660 	unsigned int nentries_mask;
661 	unsigned int sw_index;
662 	struct hif_softc *scn;
663 	struct ce_srng_src_desc *src_desc;
664 
665 	CE_state = (struct CE_state *)copyeng;
666 	src_ring = CE_state->src_ring;
667 	if (!src_ring)
668 		return QDF_STATUS_E_FAILURE;
669 
670 	nentries_mask = src_ring->nentries_mask;
671 	sw_index = src_ring->sw_index;
672 	scn = CE_state->scn;
673 
674 	if (hal_srng_access_start(scn->hal_soc, src_ring->srng_ctx)) {
675 		status = QDF_STATUS_E_FAILURE;
676 		return status;
677 	}
678 
679 	src_desc = hal_srng_src_pending_reap_next(scn->hal_soc,
680 			src_ring->srng_ctx);
681 	if (src_desc) {
682 		/* Return data from completed source descriptor */
683 		*bufferp = (qdf_dma_addr_t)
684 			(((uint64_t)(src_desc)->buffer_addr_lo +
685 			  ((uint64_t)((src_desc)->buffer_addr_hi &
686 				  0xFF) << 32)));
687 		*nbytesp = src_desc->nbytes;
688 		*transfer_idp = src_desc->meta_data;
689 		*toeplitz_hash_result = 0; /*src_desc->toeplitz_hash_result;*/
690 
691 		if (per_CE_contextp)
692 			*per_CE_contextp = CE_state->send_context;
693 
694 		/* sw_index is used more like read index */
695 		if (per_transfer_contextp)
696 			*per_transfer_contextp =
697 				src_ring->per_transfer_context[sw_index];
698 
699 		src_ring->per_transfer_context[sw_index] = 0;   /* sanity */
700 
701 		/* Update sw_index */
702 		sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
703 		src_ring->sw_index = sw_index;
704 		status = QDF_STATUS_SUCCESS;
705 	}
706 	hal_srng_access_end_reap(scn->hal_soc, src_ring->srng_ctx);
707 
708 	return status;
709 }
710 
711 /*
712  * Adjust interrupts for the copy complete handler.
713  * If it's needed for either send or recv, then unmask
714  * this interrupt; otherwise, mask it.
715  *
716  * Called with target_lock held.
717  */
718 static void
ce_per_engine_handler_adjust_srng(struct CE_state * CE_state,int disable_copy_compl_intr)719 ce_per_engine_handler_adjust_srng(struct CE_state *CE_state,
720 			     int disable_copy_compl_intr)
721 {
722 }
723 
ce_check_int_watermark_srng(struct CE_state * CE_state,unsigned int * flags)724 static bool ce_check_int_watermark_srng(struct CE_state *CE_state,
725 					unsigned int *flags)
726 {
727 	/*TODO*/
728 	return false;
729 }
730 
ce_get_desc_size_srng(uint8_t ring_type)731 static uint32_t ce_get_desc_size_srng(uint8_t ring_type)
732 {
733 	switch (ring_type) {
734 	case CE_RING_SRC:
735 		return sizeof(struct ce_srng_src_desc);
736 	case CE_RING_DEST:
737 		return sizeof(struct ce_srng_dest_desc);
738 	case CE_RING_STATUS:
739 		return sizeof(struct ce_srng_dest_status_desc);
740 	default:
741 		return 0;
742 	}
743 	return 0;
744 }
745 
ce_srng_msi_ring_params_setup(struct hif_softc * scn,uint32_t ce_id,struct hal_srng_params * ring_params)746 static void ce_srng_msi_ring_params_setup(struct hif_softc *scn, uint32_t ce_id,
747 			      struct hal_srng_params *ring_params)
748 {
749 	uint32_t addr_low;
750 	uint32_t addr_high;
751 	uint32_t msi_data_start;
752 	uint32_t msi_data_count;
753 	uint32_t msi_irq_start;
754 	int ret;
755 	int irq_id;
756 
757 	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
758 					  &msi_data_count, &msi_data_start,
759 					  &msi_irq_start);
760 
761 	/* msi config not found */
762 	if (ret)
763 		return;
764 
765 	irq_id = scn->int_assignment->msi_idx[ce_id];
766 	pld_get_msi_address(scn->qdf_dev->dev, &addr_low, &addr_high);
767 
768 	ring_params->msi_addr = addr_low;
769 	ring_params->msi_addr |= (qdf_dma_addr_t)(((uint64_t)addr_high) << 32);
770 	ring_params->msi_data =  irq_id + msi_data_start;
771 	ring_params->flags |= HAL_SRNG_MSI_INTR;
772 
773 	hif_debug("ce_id %d irq_id %d, msi_addr %pK, msi_data %d", ce_id,
774 		  irq_id, (void *)ring_params->msi_addr, ring_params->msi_data);
775 }
776 
ce_srng_src_ring_setup(struct hif_softc * scn,uint32_t ce_id,struct CE_ring_state * src_ring,struct CE_attr * attr)777 static void ce_srng_src_ring_setup(struct hif_softc *scn, uint32_t ce_id,
778 				   struct CE_ring_state *src_ring,
779 				   struct CE_attr *attr)
780 {
781 	struct hal_srng_params ring_params = {0};
782 
783 	hif_debug("%s: ce_id %d", __func__, ce_id);
784 
785 	ring_params.ring_base_paddr = src_ring->base_addr_CE_space;
786 	ring_params.ring_base_vaddr = src_ring->base_addr_owner_space;
787 	ring_params.num_entries = src_ring->nentries;
788 	/*
789 	 * The minimum increment for the timer is 8us
790 	 * A default value of 0 disables the timer
791 	 * A valid default value caused continuous interrupts to
792 	 * fire with MSI enabled. Need to revisit usage of the timer
793 	 */
794 
795 	if (!(CE_ATTR_DISABLE_INTR & attr->flags)) {
796 		ce_srng_msi_ring_params_setup(scn, ce_id, &ring_params);
797 
798 		ring_params.intr_timer_thres_us = 0;
799 		ring_params.intr_batch_cntr_thres_entries = 1;
800 		ring_params.prefetch_timer = HAL_SRNG_PREFETCH_TIMER;
801 	}
802 
803 	src_ring->srng_ctx = hal_srng_setup(scn->hal_soc, CE_SRC, ce_id, 0,
804 					    &ring_params, 0);
805 }
806 
807 #ifdef WLAN_WAR_CE_DISABLE_SRNG_TIMER_IRQ
808 static void
ce_srng_initialize_dest_ring_thresh(struct CE_ring_state * dest_ring,struct hal_srng_params * ring_params)809 ce_srng_initialize_dest_ring_thresh(struct CE_ring_state *dest_ring,
810 				    struct hal_srng_params *ring_params)
811 {
812 	ring_params->low_threshold = dest_ring->nentries >> 3;
813 	ring_params->intr_timer_thres_us = 0;
814 	ring_params->intr_batch_cntr_thres_entries = 1;
815 	ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
816 }
817 #else
818 static void
ce_srng_initialize_dest_ring_thresh(struct CE_ring_state * dest_ring,struct hal_srng_params * ring_params)819 ce_srng_initialize_dest_ring_thresh(struct CE_ring_state *dest_ring,
820 				    struct hal_srng_params *ring_params)
821 {
822 	ring_params->low_threshold = dest_ring->nentries >> 3;
823 	ring_params->intr_timer_thres_us = 100000;
824 	ring_params->intr_batch_cntr_thres_entries = 0;
825 	ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
826 }
827 #endif
828 
829 #ifdef WLAN_DISABLE_STATUS_RING_TIMER_WAR
ce_is_status_ring_timer_thresh_war_needed(void)830 static inline bool ce_is_status_ring_timer_thresh_war_needed(void)
831 {
832 	return false;
833 }
834 #else
ce_is_status_ring_timer_thresh_war_needed(void)835 static inline bool ce_is_status_ring_timer_thresh_war_needed(void)
836 {
837 	return true;
838 }
839 #endif
840 
841 /**
842  * ce_srng_initialize_dest_timer_interrupt_war() - war initialization
843  * @dest_ring: ring being initialized
844  * @ring_params: pointer to initialized parameters
845  *
846  * For Napier & Hawkeye v1, the status ring timer interrupts do not work
847  * As a workaround host configures the destination rings to be a proxy for
848  * work needing to be done.
849  *
850  * The interrupts are setup such that if the destination ring is less than fully
851  * posted, there is likely undone work for the status ring that the host should
852  * process.
853  *
854  * There is a timing bug in srng based copy engines such that a fully posted
855  * srng based copy engine has 2 empty entries instead of just one.  The copy
856  * engine data structures work with 1 empty entry, but the software frequently
857  * fails to post the last entry due to the race condition.
858  */
ce_srng_initialize_dest_timer_interrupt_war(struct CE_ring_state * dest_ring,struct hal_srng_params * ring_params)859 static void ce_srng_initialize_dest_timer_interrupt_war(
860 					struct CE_ring_state *dest_ring,
861 					struct hal_srng_params *ring_params)
862 {
863 	int num_buffers_when_fully_posted = dest_ring->nentries - 2;
864 
865 	ring_params->low_threshold = num_buffers_when_fully_posted - 1;
866 	ring_params->intr_timer_thres_us = 1024;
867 	ring_params->intr_batch_cntr_thres_entries = 0;
868 	ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
869 }
870 
ce_srng_dest_ring_setup(struct hif_softc * scn,uint32_t ce_id,struct CE_ring_state * dest_ring,struct CE_attr * attr)871 static void ce_srng_dest_ring_setup(struct hif_softc *scn,
872 				    uint32_t ce_id,
873 				    struct CE_ring_state *dest_ring,
874 				    struct CE_attr *attr)
875 {
876 	struct hal_srng_params ring_params = {0};
877 
878 	hif_debug("ce_id: %d", ce_id);
879 
880 	ring_params.ring_base_paddr = dest_ring->base_addr_CE_space;
881 	ring_params.ring_base_vaddr = dest_ring->base_addr_owner_space;
882 	ring_params.num_entries = dest_ring->nentries;
883 	ring_params.max_buffer_length = attr->src_sz_max;
884 
885 	if (!(CE_ATTR_DISABLE_INTR & attr->flags)) {
886 		ce_srng_msi_ring_params_setup(scn, ce_id, &ring_params);
887 		if (ce_is_status_ring_timer_thresh_war_needed()) {
888 			ce_srng_initialize_dest_timer_interrupt_war(
889 					dest_ring, &ring_params);
890 		} else {
891 			/* normal behavior for future chips */
892 			ce_srng_initialize_dest_ring_thresh(dest_ring,
893 							    &ring_params);
894 		}
895 		ring_params.prefetch_timer = HAL_SRNG_PREFETCH_TIMER;
896 	}
897 
898 	/*Dest ring is also source ring*/
899 	dest_ring->srng_ctx = hal_srng_setup(scn->hal_soc, CE_DST, ce_id, 0,
900 					     &ring_params, 0);
901 }
902 
903 #ifdef WLAN_CE_INTERRUPT_THRESHOLD_CONFIG
904 /**
905  * ce_status_ring_config_int_threshold() - configure ce status ring interrupt
906  *                                         thresholds
907  * @scn: hif handle
908  * @ring_params: ce srng params
909  *
910  * Return: None
911  */
912 static inline
ce_status_ring_config_int_threshold(struct hif_softc * scn,struct hal_srng_params * ring_params)913 void ce_status_ring_config_int_threshold(struct hif_softc *scn,
914 					 struct hal_srng_params *ring_params)
915 {
916 	ring_params->intr_timer_thres_us =
917 			scn->ini_cfg.ce_status_ring_timer_threshold;
918 	ring_params->intr_batch_cntr_thres_entries =
919 			scn->ini_cfg.ce_status_ring_batch_count_threshold;
920 }
921 #else
922 static inline
ce_status_ring_config_int_threshold(struct hif_softc * scn,struct hal_srng_params * ring_params)923 void ce_status_ring_config_int_threshold(struct hif_softc *scn,
924 					 struct hal_srng_params *ring_params)
925 {
926 	ring_params->intr_timer_thres_us = 0x1000;
927 	ring_params->intr_batch_cntr_thres_entries = 0x1;
928 }
929 #endif /* WLAN_CE_INTERRUPT_THRESHOLD_CONFIG */
930 
ce_srng_status_ring_setup(struct hif_softc * scn,uint32_t ce_id,struct CE_ring_state * status_ring,struct CE_attr * attr)931 static void ce_srng_status_ring_setup(struct hif_softc *scn, uint32_t ce_id,
932 				struct CE_ring_state *status_ring,
933 				struct CE_attr *attr)
934 {
935 	struct hal_srng_params ring_params = {0};
936 
937 	hif_debug("ce_id: %d", ce_id);
938 
939 	ring_params.ring_base_paddr = status_ring->base_addr_CE_space;
940 	ring_params.ring_base_vaddr = status_ring->base_addr_owner_space;
941 	ring_params.num_entries = status_ring->nentries;
942 
943 	if (!(CE_ATTR_DISABLE_INTR & attr->flags)) {
944 		ce_srng_msi_ring_params_setup(scn, ce_id, &ring_params);
945 		ce_status_ring_config_int_threshold(scn, &ring_params);
946 	}
947 
948 	status_ring->srng_ctx = hal_srng_setup(scn->hal_soc, CE_DST_STATUS,
949 					       ce_id, 0, &ring_params, 0);
950 }
951 
ce_ring_setup_srng(struct hif_softc * scn,uint8_t ring_type,uint32_t ce_id,struct CE_ring_state * ring,struct CE_attr * attr)952 static int ce_ring_setup_srng(struct hif_softc *scn, uint8_t ring_type,
953 		uint32_t ce_id, struct CE_ring_state *ring,
954 		struct CE_attr *attr)
955 {
956 	switch (ring_type) {
957 	case CE_RING_SRC:
958 		ce_srng_src_ring_setup(scn, ce_id, ring, attr);
959 		break;
960 	case CE_RING_DEST:
961 		ce_srng_dest_ring_setup(scn, ce_id, ring, attr);
962 		break;
963 	case CE_RING_STATUS:
964 		ce_srng_status_ring_setup(scn, ce_id, ring, attr);
965 		break;
966 	default:
967 		qdf_assert(0);
968 		break;
969 	}
970 
971 	return 0;
972 }
973 
ce_ring_cleanup_srng(struct hif_softc * scn,struct CE_state * CE_state,uint8_t ring_type)974 static void ce_ring_cleanup_srng(struct hif_softc *scn,
975 				 struct CE_state *CE_state,
976 				 uint8_t ring_type)
977 {
978 	hal_ring_handle_t hal_srng = NULL;
979 
980 	switch (ring_type) {
981 	case CE_RING_SRC:
982 		hal_srng = (hal_ring_handle_t)CE_state->src_ring->srng_ctx;
983 	break;
984 	case CE_RING_DEST:
985 		hal_srng = (hal_ring_handle_t)CE_state->dest_ring->srng_ctx;
986 	break;
987 	case CE_RING_STATUS:
988 		hal_srng = (hal_ring_handle_t)CE_state->status_ring->srng_ctx;
989 	break;
990 	}
991 
992 	if (hal_srng)
993 		hal_srng_cleanup(scn->hal_soc, hal_srng, 0);
994 }
995 
ce_construct_shadow_config_srng(struct hif_softc * scn)996 static void ce_construct_shadow_config_srng(struct hif_softc *scn)
997 {
998 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
999 	int ce_id;
1000 
1001 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
1002 		if (hif_state->host_ce_config[ce_id].src_nentries)
1003 			hal_set_one_shadow_config(scn->hal_soc,
1004 						  CE_SRC, ce_id);
1005 
1006 		if (hif_state->host_ce_config[ce_id].dest_nentries) {
1007 			hal_set_one_shadow_config(scn->hal_soc,
1008 						  CE_DST, ce_id);
1009 
1010 			hal_set_one_shadow_config(scn->hal_soc,
1011 						  CE_DST_STATUS, ce_id);
1012 		}
1013 	}
1014 }
1015 
ce_prepare_shadow_register_v2_cfg_srng(struct hif_softc * scn,struct pld_shadow_reg_v2_cfg ** shadow_config,int * num_shadow_registers_configured)1016 static void ce_prepare_shadow_register_v2_cfg_srng(struct hif_softc *scn,
1017 		struct pld_shadow_reg_v2_cfg **shadow_config,
1018 		int *num_shadow_registers_configured)
1019 {
1020 	if (!scn->hal_soc) {
1021 		hif_err("hal not initialized: not initializing shadow config");
1022 		return;
1023 	}
1024 
1025 	hal_get_shadow_config(scn->hal_soc, shadow_config,
1026 			      num_shadow_registers_configured);
1027 
1028 	if (*num_shadow_registers_configured != 0) {
1029 		hif_err("hal shadow register configuration already constructed");
1030 
1031 		/* return with original configuration*/
1032 		return;
1033 	}
1034 	hal_construct_srng_shadow_regs(scn->hal_soc);
1035 	ce_construct_shadow_config_srng(scn);
1036 	hal_set_shadow_regs(scn->hal_soc);
1037 	hal_construct_shadow_regs(scn->hal_soc);
1038 	/* get updated configuration */
1039 	hal_get_shadow_config(scn->hal_soc, shadow_config,
1040 			      num_shadow_registers_configured);
1041 }
1042 
1043 #ifdef CONFIG_SHADOW_V3
ce_prepare_shadow_register_v3_cfg_srng(struct hif_softc * scn,struct pld_shadow_reg_v3_cfg ** shadow_config,int * num_shadow_registers_configured)1044 static void ce_prepare_shadow_register_v3_cfg_srng(struct hif_softc *scn,
1045 		struct pld_shadow_reg_v3_cfg **shadow_config,
1046 		int *num_shadow_registers_configured)
1047 {
1048 	if (!scn->hal_soc) {
1049 		hif_err("hal not initialized: not initializing shadow config");
1050 		return;
1051 	}
1052 
1053 	hal_get_shadow_v3_config(scn->hal_soc, shadow_config,
1054 				 num_shadow_registers_configured);
1055 
1056 	if (*num_shadow_registers_configured != 0) {
1057 		hif_err("hal shadow register configuration already constructed");
1058 
1059 		/* return with original configuration*/
1060 		return;
1061 	}
1062 	hal_construct_srng_shadow_regs(scn->hal_soc);
1063 	ce_construct_shadow_config_srng(scn);
1064 	hal_set_shadow_regs(scn->hal_soc);
1065 	hal_construct_shadow_regs(scn->hal_soc);
1066 	/* get updated configuration */
1067 	hal_get_shadow_v3_config(scn->hal_soc, shadow_config,
1068 				 num_shadow_registers_configured);
1069 }
1070 #endif
1071 
1072 #ifdef HIF_CE_LOG_INFO
1073 /**
1074  * ce_get_index_info_srng(): Get CE index info
1075  * @scn: HIF Context
1076  * @ce_state: CE opaque handle
1077  * @info: CE info
1078  *
1079  * Return: 0 for success and non zero for failure
1080  */
1081 static
ce_get_index_info_srng(struct hif_softc * scn,void * ce_state,struct ce_index * info)1082 int ce_get_index_info_srng(struct hif_softc *scn, void *ce_state,
1083 			   struct ce_index *info)
1084 {
1085 	struct CE_state *CE_state = (struct CE_state *)ce_state;
1086 	uint32_t tp, hp;
1087 
1088 	info->id = CE_state->id;
1089 	if (CE_state->src_ring) {
1090 		hal_get_sw_hptp(scn->hal_soc, CE_state->src_ring->srng_ctx,
1091 				&tp, &hp);
1092 		info->u.srng_info.tp = tp;
1093 		info->u.srng_info.hp = hp;
1094 	} else if (CE_state->dest_ring && CE_state->status_ring) {
1095 		hal_get_sw_hptp(scn->hal_soc, CE_state->status_ring->srng_ctx,
1096 				&tp, &hp);
1097 		info->u.srng_info.status_tp = tp;
1098 		info->u.srng_info.status_hp = hp;
1099 		hal_get_sw_hptp(scn->hal_soc, CE_state->dest_ring->srng_ctx,
1100 				&tp, &hp);
1101 		info->u.srng_info.tp = tp;
1102 		info->u.srng_info.hp = hp;
1103 	}
1104 
1105 	return 0;
1106 }
1107 #endif
1108 
1109 #ifdef FEATURE_DIRECT_LINK
1110 /**
1111  * ce_set_srng_msi_irq_config_by_ceid(): Set srng MSI irq configuration for CE
1112  *  given by id
1113  * @scn: HIF Context
1114  * @ce_id:
1115  * @addr:
1116  * @data:
1117  *
1118  * Return: 0 for success and non zero for failure
1119  */
1120 static QDF_STATUS
ce_set_srng_msi_irq_config_by_ceid(struct hif_softc * scn,uint8_t ce_id,uint64_t addr,uint32_t data)1121 ce_set_srng_msi_irq_config_by_ceid(struct hif_softc *scn, uint8_t ce_id,
1122 				   uint64_t addr, uint32_t data)
1123 {
1124 	struct CE_state *ce_state;
1125 	hal_ring_handle_t ring_hdl;
1126 	struct hal_srng_params ring_params = {0};
1127 
1128 	ce_state = scn->ce_id_to_state[ce_id];
1129 	if (!ce_state)
1130 		return QDF_STATUS_E_NOSUPPORT;
1131 
1132 	ring_params.msi_addr = addr;
1133 	ring_params.msi_data = data;
1134 
1135 	if (ce_state->src_ring) {
1136 		ring_hdl = ce_state->src_ring->srng_ctx;
1137 
1138 		ring_params.intr_timer_thres_us = 0;
1139 		ring_params.intr_batch_cntr_thres_entries = 1;
1140 		ring_params.prefetch_timer = HAL_SRNG_PREFETCH_TIMER;
1141 	} else if (ce_state->dest_ring) {
1142 		ring_hdl = ce_state->status_ring->srng_ctx;
1143 
1144 		ce_status_ring_config_int_threshold(scn, &ring_params);
1145 
1146 		hal_srng_set_msi_irq_config(scn->hal_soc, ring_hdl,
1147 					    &ring_params);
1148 
1149 		if (ce_is_status_ring_timer_thresh_war_needed()) {
1150 			ce_srng_initialize_dest_timer_interrupt_war(
1151 					ce_state->dest_ring, &ring_params);
1152 		} else {
1153 			ce_srng_initialize_dest_ring_thresh(ce_state->dest_ring,
1154 							    &ring_params);
1155 		}
1156 		ring_params.prefetch_timer = HAL_SRNG_PREFETCH_TIMER;
1157 		ring_hdl = ce_state->dest_ring->srng_ctx;
1158 	} else {
1159 		return QDF_STATUS_E_FAILURE;
1160 	}
1161 
1162 	hal_srng_set_msi_irq_config(scn->hal_soc, ring_hdl, &ring_params);
1163 
1164 	return QDF_STATUS_SUCCESS;
1165 }
1166 
1167 static
ce_get_direct_link_dest_srng_buffers(struct hif_softc * scn,uint64_t ** dma_addr,uint32_t * buf_size)1168 uint16_t ce_get_direct_link_dest_srng_buffers(struct hif_softc *scn,
1169 					      uint64_t **dma_addr,
1170 					      uint32_t *buf_size)
1171 {
1172 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1173 	struct CE_state *ce_state;
1174 	struct service_to_pipe *tgt_svc_cfg;
1175 	uint64_t *dma_addr_arr = NULL;
1176 	uint32_t i;
1177 	uint32_t j = 0;
1178 
1179 	tgt_svc_cfg = hif_state->tgt_svc_map;
1180 
1181 	for (i = 0; i < hif_state->sz_tgt_svc_map; i++) {
1182 		if (tgt_svc_cfg[i].service_id != LPASS_DATA_MSG_SVC ||
1183 		    tgt_svc_cfg[i].pipedir != PIPEDIR_IN)
1184 			continue;
1185 
1186 		ce_state = scn->ce_id_to_state[tgt_svc_cfg[i].pipenum];
1187 		if (!ce_state || !ce_state->dest_ring) {
1188 			hif_err("Direct Link CE pipe %d not initialized",
1189 				tgt_svc_cfg[i].pipenum);
1190 			return QDF_STATUS_E_FAILURE;
1191 		}
1192 
1193 		QDF_ASSERT(scn->dl_recv_pages.dma_pages);
1194 
1195 		dma_addr_arr = qdf_mem_malloc(sizeof(*dma_addr_arr) *
1196 					      scn->dl_recv_pages.num_pages);
1197 		if (!dma_addr_arr)
1198 			return 0;
1199 
1200 		for (j = 0; j < scn->dl_recv_pages.num_pages; j++)
1201 			dma_addr_arr[j] =
1202 				scn->dl_recv_pages.dma_pages[j].page_p_addr;
1203 
1204 		*buf_size = ce_state->src_sz_max;
1205 
1206 		break;
1207 	}
1208 
1209 	*dma_addr = dma_addr_arr;
1210 
1211 	return j;
1212 }
1213 
1214 /**
1215  * ce_save_srng_info() - Get and save srng information
1216  * @hif_ctx: hif context
1217  * @srng_info: Direct Link CE srng information
1218  * @srng_ctx: Direct Link CE srng context
1219  *
1220  * Return: QDF status
1221  */
1222 static void
ce_save_srng_info(struct hif_softc * hif_ctx,struct hif_ce_ring_info * srng_info,void * srng_ctx)1223 ce_save_srng_info(struct hif_softc *hif_ctx, struct hif_ce_ring_info *srng_info,
1224 		  void *srng_ctx)
1225 {
1226 	struct hal_srng_params params;
1227 
1228 	hal_get_srng_params(hif_ctx->hal_soc, srng_ctx, &params);
1229 
1230 	srng_info->ring_id = params.ring_id;
1231 	srng_info->ring_dir = params.ring_dir;
1232 	srng_info->num_entries = params.num_entries;
1233 	srng_info->entry_size = params.entry_size;
1234 	srng_info->ring_base_paddr = params.ring_base_paddr;
1235 	srng_info->hp_paddr =
1236 		      hal_srng_get_hp_addr(hif_ctx->hal_soc, srng_ctx);
1237 	srng_info->tp_paddr =
1238 		      hal_srng_get_tp_addr(hif_ctx->hal_soc, srng_ctx);
1239 }
1240 
1241 static
ce_get_direct_link_srng_info(struct hif_softc * scn,struct hif_direct_link_ce_info * info,uint8_t max_ce_info_len)1242 QDF_STATUS ce_get_direct_link_srng_info(struct hif_softc *scn,
1243 					struct hif_direct_link_ce_info *info,
1244 					uint8_t max_ce_info_len)
1245 {
1246 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1247 	struct CE_state *ce_state;
1248 	struct service_to_pipe *tgt_svc_cfg;
1249 	uint8_t ce_info_idx = 0;
1250 	uint32_t i;
1251 
1252 	tgt_svc_cfg = hif_state->tgt_svc_map;
1253 
1254 	for (i = 0; i < hif_state->sz_tgt_svc_map; i++) {
1255 		if (tgt_svc_cfg[i].service_id != LPASS_DATA_MSG_SVC)
1256 			continue;
1257 
1258 		ce_state = scn->ce_id_to_state[tgt_svc_cfg[i].pipenum];
1259 		if (!ce_state) {
1260 			hif_err("Direct Link CE pipe %d not initialized",
1261 				tgt_svc_cfg[i].pipenum);
1262 			return QDF_STATUS_E_FAILURE;
1263 		}
1264 
1265 		if (ce_info_idx > max_ce_info_len)
1266 			return QDF_STATUS_E_FAILURE;
1267 
1268 		info[ce_info_idx].ce_id = ce_state->id;
1269 		info[ce_info_idx].pipe_dir = tgt_svc_cfg[i].pipedir;
1270 
1271 		if (ce_state->src_ring)
1272 			ce_save_srng_info(scn, &info[ce_info_idx].ring_info,
1273 					  ce_state->src_ring->srng_ctx);
1274 		else
1275 			ce_save_srng_info(scn, &info[ce_info_idx].ring_info,
1276 					  ce_state->dest_ring->srng_ctx);
1277 
1278 		ce_info_idx++;
1279 
1280 		if (!ce_state->status_ring)
1281 			continue;
1282 
1283 		if (ce_info_idx > max_ce_info_len)
1284 			return QDF_STATUS_E_FAILURE;
1285 
1286 		info[ce_info_idx].ce_id = ce_state->id;
1287 		info[ce_info_idx].pipe_dir = tgt_svc_cfg[i].pipedir;
1288 
1289 		ce_save_srng_info(scn, &info[ce_info_idx].ring_info,
1290 				  ce_state->status_ring->srng_ctx);
1291 		ce_info_idx++;
1292 	}
1293 
1294 	return QDF_STATUS_SUCCESS;
1295 }
1296 #endif
1297 
1298 static struct ce_ops ce_service_srng = {
1299 	.ce_get_desc_size = ce_get_desc_size_srng,
1300 	.ce_ring_setup = ce_ring_setup_srng,
1301 	.ce_srng_cleanup = ce_ring_cleanup_srng,
1302 	.ce_sendlist_send = ce_sendlist_send_srng,
1303 	.ce_completed_recv_next_nolock = ce_completed_recv_next_nolock_srng,
1304 	.ce_revoke_recv_next = ce_revoke_recv_next_srng,
1305 	.ce_cancel_send_next = ce_cancel_send_next_srng,
1306 	.ce_recv_buf_enqueue = ce_recv_buf_enqueue_srng,
1307 	.ce_per_engine_handler_adjust = ce_per_engine_handler_adjust_srng,
1308 	.ce_send_nolock = ce_send_nolock_srng,
1309 	.watermark_int = ce_check_int_watermark_srng,
1310 	.ce_completed_send_next_nolock = ce_completed_send_next_nolock_srng,
1311 	.ce_recv_entries_done_nolock = ce_recv_entries_done_nolock_srng,
1312 	.ce_send_entries_done_nolock = ce_send_entries_done_nolock_srng,
1313 	.ce_prepare_shadow_register_v2_cfg =
1314 		ce_prepare_shadow_register_v2_cfg_srng,
1315 #ifdef CONFIG_SHADOW_V3
1316 	.ce_prepare_shadow_register_v3_cfg =
1317 		ce_prepare_shadow_register_v3_cfg_srng,
1318 #endif
1319 #ifdef HIF_CE_LOG_INFO
1320 	.ce_get_index_info =
1321 		ce_get_index_info_srng,
1322 #endif
1323 #ifdef FEATURE_DIRECT_LINK
1324 	.ce_set_irq_config_by_ceid = ce_set_srng_msi_irq_config_by_ceid,
1325 	.ce_get_direct_link_dest_buffers = ce_get_direct_link_dest_srng_buffers,
1326 	.ce_get_direct_link_ring_info = ce_get_direct_link_srng_info,
1327 #endif
1328 };
1329 
ce_services_srng(void)1330 struct ce_ops *ce_services_srng(void)
1331 {
1332 	return &ce_service_srng;
1333 }
1334 qdf_export_symbol(ce_services_srng);
1335 
ce_service_srng_init(void)1336 void ce_service_srng_init(void)
1337 {
1338 	ce_service_register_module(CE_SVC_SRNG, &ce_services_srng);
1339 }
1340