xref: /wlan-driver/qca-wifi-host-cmn/hif/src/ce/ce_service.c (revision 5113495b16420b49004c444715d2daae2066e7dc)
1 /*
2  * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "hif.h"
21 #include "hif_io32.h"
22 #include "ce_api.h"
23 #include "ce_main.h"
24 #include "ce_internal.h"
25 #include "ce_reg.h"
26 #include "qdf_lock.h"
27 #include "regtable.h"
28 #include "hif_main.h"
29 #include "hif_debug.h"
30 #include "hif_napi.h"
31 #include "qdf_module.h"
32 #include <qdf_tracepoint.h>
33 
34 #ifdef IPA_OFFLOAD
35 #ifdef QCA_WIFI_3_0
36 #define CE_IPA_RING_INIT(ce_desc)                       \
37 	do {                                            \
38 		ce_desc->gather = 0;                    \
39 		ce_desc->enable_11h = 0;                \
40 		ce_desc->meta_data_low = 0;             \
41 		ce_desc->packet_result_offset = 64;     \
42 		ce_desc->toeplitz_hash_enable = 0;      \
43 		ce_desc->addr_y_search_disable = 0;     \
44 		ce_desc->addr_x_search_disable = 0;     \
45 		ce_desc->misc_int_disable = 0;          \
46 		ce_desc->target_int_disable = 0;        \
47 		ce_desc->host_int_disable = 0;          \
48 		ce_desc->dest_byte_swap = 0;            \
49 		ce_desc->byte_swap = 0;                 \
50 		ce_desc->type = 2;                      \
51 		ce_desc->tx_classify = 1;               \
52 		ce_desc->buffer_addr_hi = 0;            \
53 		ce_desc->meta_data = 0;                 \
54 		ce_desc->nbytes = 128;                  \
55 	} while (0)
56 #else
57 #define CE_IPA_RING_INIT(ce_desc)                       \
58 	do {                                            \
59 		ce_desc->byte_swap = 0;                 \
60 		ce_desc->nbytes = 60;                   \
61 		ce_desc->gather = 0;                    \
62 	} while (0)
63 #endif /* QCA_WIFI_3_0 */
64 #endif /* IPA_OFFLOAD */
65 
66 static int war1_allow_sleep;
67 /* io32 write workaround */
68 static int hif_ce_war1;
69 
70 /**
71  * hif_ce_war_disable() - disable ce war gobally
72  */
hif_ce_war_disable(void)73 void hif_ce_war_disable(void)
74 {
75 	hif_ce_war1 = 0;
76 }
77 
78 /**
79  * hif_ce_war_enable() - enable ce war gobally
80  */
hif_ce_war_enable(void)81 void hif_ce_war_enable(void)
82 {
83 	hif_ce_war1 = 1;
84 }
85 
86 /*
87  * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
88  * for defined here
89  */
90 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
91 
92 #define CE_DEBUG_PRINT_BUF_SIZE(x) (((x) * 3) - 1)
93 #define CE_DEBUG_DATA_PER_ROW 16
94 
95 static const char *ce_event_type_to_str(enum hif_ce_event_type type);
96 
get_next_record_index(qdf_atomic_t * table_index,int array_size)97 int get_next_record_index(qdf_atomic_t *table_index, int array_size)
98 {
99 	int record_index = qdf_atomic_inc_return(table_index);
100 
101 	if (record_index == array_size)
102 		qdf_atomic_sub(array_size, table_index);
103 
104 	while (record_index >= array_size)
105 		record_index -= array_size;
106 
107 	return record_index;
108 }
109 
110 qdf_export_symbol(get_next_record_index);
111 
112 #ifdef HIF_CE_DEBUG_DATA_BUF
hif_ce_desc_data_record(struct hif_ce_desc_event * event,int len)113 void hif_ce_desc_data_record(struct hif_ce_desc_event *event, int len)
114 {
115 	uint8_t *data = NULL;
116 
117 	if (!event->data) {
118 		hif_err_rl("No ce debug memory allocated");
119 		return;
120 	}
121 
122 	if (event->memory && len > 0)
123 		data = qdf_nbuf_data((qdf_nbuf_t)event->memory);
124 
125 	event->actual_data_len = 0;
126 	qdf_mem_zero(event->data, CE_DEBUG_MAX_DATA_BUF_SIZE);
127 
128 	if (data && len > 0) {
129 		qdf_mem_copy(event->data, data,
130 				((len < CE_DEBUG_MAX_DATA_BUF_SIZE) ?
131 				 len : CE_DEBUG_MAX_DATA_BUF_SIZE));
132 		event->actual_data_len = len;
133 	}
134 }
135 
136 qdf_export_symbol(hif_ce_desc_data_record);
137 
hif_clear_ce_desc_debug_data(struct hif_ce_desc_event * event)138 void hif_clear_ce_desc_debug_data(struct hif_ce_desc_event *event)
139 {
140 	qdf_mem_zero(event,
141 		     offsetof(struct hif_ce_desc_event, data));
142 }
143 
144 qdf_export_symbol(hif_clear_ce_desc_debug_data);
145 #else
hif_clear_ce_desc_debug_data(struct hif_ce_desc_event * event)146 void hif_clear_ce_desc_debug_data(struct hif_ce_desc_event *event)
147 {
148 	qdf_mem_zero(event, sizeof(struct hif_ce_desc_event));
149 }
150 
151 qdf_export_symbol(hif_clear_ce_desc_debug_data);
152 #endif /* HIF_CE_DEBUG_DATA_BUF */
153 
154 #if defined(HIF_RECORD_PADDR)
hif_ce_desc_record_rx_paddr(struct hif_softc * scn,struct hif_ce_desc_event * event,qdf_nbuf_t memory)155 void hif_ce_desc_record_rx_paddr(struct hif_softc *scn,
156 				 struct hif_ce_desc_event *event,
157 				 qdf_nbuf_t memory)
158 {
159 	if (memory) {
160 		event->dma_addr = QDF_NBUF_CB_PADDR(memory);
161 		event->dma_to_phy = qdf_mem_paddr_from_dmaaddr(
162 					scn->qdf_dev,
163 					event->dma_addr);
164 
165 		event->virt_to_phy =
166 			virt_to_phys(qdf_nbuf_data(memory));
167 	}
168 }
169 #endif /* HIF_RECORD_RX_PADDR */
170 
hif_display_latest_desc_hist(struct hif_opaque_softc * hif_ctx)171 void hif_display_latest_desc_hist(struct hif_opaque_softc *hif_ctx)
172 {
173 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
174 	struct ce_desc_hist *ce_hist;
175 	struct latest_evt_history *evt;
176 	int i, j;
177 
178 	if (!scn)
179 		return;
180 
181 	ce_hist = &scn->hif_ce_desc_hist;
182 
183 	for (i = 0; i < HIF_CE_MAX_LATEST_HIST; i++) {
184 		if (!ce_hist->enable[i + HIF_CE_MAX_LATEST_HIST])
185 			continue;
186 
187 		for (j = 0; j < HIF_CE_MAX_LATEST_EVTS; j++) {
188 			evt = &ce_hist->latest_evts[i][j];
189 			hif_info_high("CE_id:%d event_idx:%d cpu_id:%d irq_entry:0x%llx tasklet_entry:0x%llx tasklet_resched:0x%llx tasklet_exit:0x%llx ce_work:0x%llx hp:%x tp:%x",
190 				      (i + HIF_CE_MAX_LATEST_HIST), j, evt->cpu_id,
191 				      evt->irq_entry_ts, evt->bh_entry_ts,
192 				      evt->bh_resched_ts, evt->bh_exit_ts,
193 				      evt->bh_work_ts, evt->ring_hp, evt->ring_tp);
194 		}
195 	}
196 }
197 
hif_record_latest_evt(struct ce_desc_hist * ce_hist,uint8_t type,int ce_id,uint64_t time,uint32_t hp,uint32_t tp)198 void hif_record_latest_evt(struct ce_desc_hist *ce_hist,
199 			   uint8_t type,
200 			   int ce_id, uint64_t time,
201 			   uint32_t hp, uint32_t tp)
202 {
203 	struct latest_evt_history *latest_evts;
204 	int idx = 0;
205 
206 	if (ce_id != 2 && ce_id != 3)
207 		return;
208 
209 	latest_evts = &ce_hist->latest_evts[ce_id - HIF_CE_MAX_LATEST_HIST][idx];
210 
211 	switch (type) {
212 	case HIF_IRQ_EVENT:
213 		if (latest_evts[idx].irq_entry_ts >
214 		    latest_evts[idx + 1].irq_entry_ts)
215 			idx = 1;
216 		latest_evts[idx].irq_entry_ts = time;
217 		latest_evts[idx].cpu_id = qdf_get_cpu();
218 		break;
219 	case HIF_CE_TASKLET_ENTRY:
220 		if (latest_evts[idx].bh_entry_ts >
221 		    latest_evts[idx + 1].bh_entry_ts)
222 			idx = 1;
223 		latest_evts[idx].bh_entry_ts = time;
224 		break;
225 	case HIF_CE_TASKLET_RESCHEDULE:
226 		if (latest_evts[idx].bh_resched_ts >
227 		    latest_evts[idx + 1].bh_resched_ts)
228 			idx = 1;
229 		latest_evts[idx].bh_resched_ts = time;
230 		break;
231 	case HIF_CE_TASKLET_EXIT:
232 		if (latest_evts[idx].bh_exit_ts >
233 		    latest_evts[idx + 1].bh_exit_ts)
234 			idx = 1;
235 		latest_evts[idx].bh_exit_ts = time;
236 		break;
237 	case HIF_TX_DESC_COMPLETION:
238 	case HIF_CE_DEST_STATUS_RING_REAP:
239 		if (latest_evts[idx].bh_work_ts >
240 		    latest_evts[idx + 1].bh_work_ts)
241 			idx = 1;
242 		latest_evts[idx].bh_work_ts = time;
243 		latest_evts[idx].ring_hp = hp;
244 		latest_evts[idx].ring_tp = tp;
245 		break;
246 	default:
247 		break;
248 	}
249 }
250 
251 /**
252  * hif_record_ce_desc_event() - record ce descriptor events
253  * @scn: hif_softc
254  * @ce_id: which ce is the event occurring on
255  * @type: what happened
256  * @descriptor: pointer to the descriptor posted/completed
257  * @memory: virtual address of buffer related to the descriptor
258  * @index: index that the descriptor was/will be at.
259  * @len:
260  */
hif_record_ce_desc_event(struct hif_softc * scn,int ce_id,enum hif_ce_event_type type,union ce_desc * descriptor,void * memory,int index,int len)261 void hif_record_ce_desc_event(struct hif_softc *scn, int ce_id,
262 				enum hif_ce_event_type type,
263 				union ce_desc *descriptor,
264 				void *memory, int index,
265 				int len)
266 {
267 	int record_index;
268 	struct hif_ce_desc_event *event;
269 
270 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
271 	struct hif_ce_desc_event *hist_ev = NULL;
272 
273 	if (ce_id < CE_COUNT_MAX)
274 		hist_ev = (struct hif_ce_desc_event *)ce_hist->hist_ev[ce_id];
275 	else
276 		return;
277 
278 	if (ce_id >= CE_COUNT_MAX)
279 		return;
280 
281 	if (!ce_hist->enable[ce_id])
282 		return;
283 
284 	if (!hist_ev)
285 		return;
286 
287 	record_index = get_next_record_index(
288 			&ce_hist->history_index[ce_id], HIF_CE_HISTORY_MAX);
289 
290 	event = &hist_ev[record_index];
291 
292 	hif_clear_ce_desc_debug_data(event);
293 
294 	event->type = type;
295 	event->time = qdf_get_log_timestamp();
296 	event->cpu_id = qdf_get_cpu();
297 
298 	if (descriptor)
299 		qdf_mem_copy(&event->descriptor, descriptor,
300 			     sizeof(union ce_desc));
301 
302 	event->memory = memory;
303 	event->index = index;
304 
305 	if (event->type == HIF_RX_DESC_POST ||
306 	    event->type == HIF_RX_DESC_COMPLETION)
307 		hif_ce_desc_record_rx_paddr(scn, event, memory);
308 
309 	if (ce_hist->data_enable[ce_id])
310 		hif_ce_desc_data_record(event, len);
311 
312 	hif_record_latest_evt(ce_hist, type, ce_id, event->time, 0, 0);
313 }
314 qdf_export_symbol(hif_record_ce_desc_event);
315 
316 /**
317  * ce_init_ce_desc_event_log() - initialize the ce event log
318  * @scn: HIF context
319  * @ce_id: copy engine id for which we are initializing the log
320  * @size: size of array to dedicate
321  *
322  * Currently the passed size is ignored in favor of a precompiled value.
323  */
ce_init_ce_desc_event_log(struct hif_softc * scn,int ce_id,int size)324 void ce_init_ce_desc_event_log(struct hif_softc *scn, int ce_id, int size)
325 {
326 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
327 	qdf_atomic_init(&ce_hist->history_index[ce_id]);
328 	qdf_mutex_create(&ce_hist->ce_dbg_datamem_lock[ce_id]);
329 }
330 
331 /**
332  * ce_deinit_ce_desc_event_log() - deinitialize the ce event log
333  * @scn: HIF context
334  * @ce_id: copy engine id for which we are deinitializing the log
335  *
336  */
ce_deinit_ce_desc_event_log(struct hif_softc * scn,int ce_id)337 inline void ce_deinit_ce_desc_event_log(struct hif_softc *scn, int ce_id)
338 {
339 	struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
340 
341 	qdf_mutex_destroy(&ce_hist->ce_dbg_datamem_lock[ce_id]);
342 }
343 
344 #else /* (HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */
hif_record_ce_desc_event(struct hif_softc * scn,int ce_id,enum hif_ce_event_type type,union ce_desc * descriptor,void * memory,int index,int len)345 void hif_record_ce_desc_event(struct hif_softc *scn,
346 		int ce_id, enum hif_ce_event_type type,
347 		union ce_desc *descriptor, void *memory,
348 		int index, int len)
349 {
350 }
351 qdf_export_symbol(hif_record_ce_desc_event);
352 
ce_init_ce_desc_event_log(struct hif_softc * scn,int ce_id,int size)353 inline void ce_init_ce_desc_event_log(struct hif_softc *scn, int ce_id,
354 					int size)
355 {
356 }
357 
ce_deinit_ce_desc_event_log(struct hif_softc * scn,int ce_id)358 void ce_deinit_ce_desc_event_log(struct hif_softc *scn, int ce_id)
359 {
360 }
361 #endif /*defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */
362 
363 #ifdef NAPI_YIELD_BUDGET_BASED
hif_ce_service_should_yield(struct hif_softc * scn,struct CE_state * ce_state)364 bool hif_ce_service_should_yield(struct hif_softc *scn,
365 				 struct CE_state *ce_state)
366 {
367 	bool yield =  hif_max_num_receives_reached(scn, ce_state->receive_count);
368 
369 	/* Setting receive_count to MAX_NUM_OF_RECEIVES when this count goes
370 	 * beyond MAX_NUM_OF_RECEIVES for NAPI backet calculation issue. This
371 	 * can happen in fast path handling as processing is happening in
372 	 * batches.
373 	 */
374 	if (yield)
375 		ce_state->receive_count = MAX_NUM_OF_RECEIVES;
376 
377 	return yield;
378 }
379 #else
380 /**
381  * hif_ce_service_should_yield() - return true if the service is hogging the cpu
382  * @scn: hif context
383  * @ce_state: context of the copy engine being serviced
384  *
385  * Return: true if the service should yield
386  */
hif_ce_service_should_yield(struct hif_softc * scn,struct CE_state * ce_state)387 bool hif_ce_service_should_yield(struct hif_softc *scn,
388 				 struct CE_state *ce_state)
389 {
390 	bool yield, time_limit_reached, rxpkt_thresh_reached = 0;
391 
392 	time_limit_reached = qdf_time_sched_clock() >
393 					ce_state->ce_service_yield_time ? 1 : 0;
394 
395 	if (!time_limit_reached)
396 		rxpkt_thresh_reached = hif_max_num_receives_reached
397 					(scn, ce_state->receive_count);
398 
399 	/* Setting receive_count to MAX_NUM_OF_RECEIVES when this count goes
400 	 * beyond MAX_NUM_OF_RECEIVES for NAPI backet calculation issue. This
401 	 * can happen in fast path handling as processing is happening in
402 	 * batches.
403 	 */
404 	if (rxpkt_thresh_reached)
405 		ce_state->receive_count = MAX_NUM_OF_RECEIVES;
406 
407 	yield =  time_limit_reached || rxpkt_thresh_reached;
408 
409 	if (yield &&
410 	    ce_state->htt_rx_data &&
411 	    hif_napi_enabled(GET_HIF_OPAQUE_HDL(scn), ce_state->id)) {
412 		hif_napi_update_yield_stats(ce_state,
413 					    time_limit_reached,
414 					    rxpkt_thresh_reached);
415 	}
416 
417 	return yield;
418 }
419 qdf_export_symbol(hif_ce_service_should_yield);
420 #endif
421 
ce_flush_tx_ring_write_idx(struct CE_handle * ce_tx_hdl,bool force_flush)422 void ce_flush_tx_ring_write_idx(struct CE_handle *ce_tx_hdl, bool force_flush)
423 {
424 	struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl;
425 	struct CE_ring_state *src_ring = ce_state->src_ring;
426 	struct hif_softc *scn = ce_state->scn;
427 
428 	if (force_flush)
429 		ce_ring_set_event(src_ring, CE_RING_FLUSH_EVENT);
430 
431 	if (ce_ring_get_clear_event(src_ring, CE_RING_FLUSH_EVENT)) {
432 		qdf_spin_lock_bh(&ce_state->ce_index_lock);
433 		CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
434 					  src_ring->write_index);
435 		qdf_spin_unlock_bh(&ce_state->ce_index_lock);
436 
437 		src_ring->last_flush_ts = qdf_get_log_timestamp();
438 		hif_debug("flushed");
439 	}
440 }
441 
442 /* Make sure this wrapper is called under ce_index_lock */
ce_tx_ring_write_idx_update_wrapper(struct CE_handle * ce_tx_hdl,int coalesce)443 void ce_tx_ring_write_idx_update_wrapper(struct CE_handle *ce_tx_hdl,
444 					 int coalesce)
445 {
446 	struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl;
447 	struct CE_ring_state *src_ring = ce_state->src_ring;
448 	struct hif_softc *scn = ce_state->scn;
449 
450 	if (!coalesce)
451 		CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
452 					  src_ring->write_index);
453 }
454 
455 /*
456  * Guts of ce_send, used by both ce_send and ce_sendlist_send.
457  * The caller takes responsibility for any needed locking.
458  */
459 
war_ce_src_ring_write_idx_set(struct hif_softc * scn,u32 ctrl_addr,unsigned int write_index)460 void war_ce_src_ring_write_idx_set(struct hif_softc *scn,
461 				   u32 ctrl_addr, unsigned int write_index)
462 {
463 	if (hif_ce_war1) {
464 		void __iomem *indicator_addr;
465 
466 		indicator_addr = scn->mem + ctrl_addr + DST_WATERMARK_ADDRESS;
467 
468 		if (!war1_allow_sleep
469 		    && ctrl_addr == CE_BASE_ADDRESS(CDC_WAR_DATA_CE)) {
470 			hif_write32_mb(scn, indicator_addr,
471 				       (CDC_WAR_MAGIC_STR | write_index));
472 		} else {
473 			unsigned long irq_flags;
474 
475 			local_irq_save(irq_flags);
476 			hif_write32_mb(scn, indicator_addr, 1);
477 
478 			/*
479 			 * PCIE write waits for ACK in IPQ8K, there is no
480 			 * need to read back value.
481 			 */
482 			(void)hif_read32_mb(scn, indicator_addr);
483 			/* conservative */
484 			(void)hif_read32_mb(scn, indicator_addr);
485 
486 			CE_SRC_RING_WRITE_IDX_SET(scn,
487 						  ctrl_addr, write_index);
488 
489 			hif_write32_mb(scn, indicator_addr, 0);
490 			local_irq_restore(irq_flags);
491 		}
492 	} else {
493 		CE_SRC_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
494 	}
495 }
496 
497 qdf_export_symbol(war_ce_src_ring_write_idx_set);
498 
499 QDF_STATUS
ce_send(struct CE_handle * copyeng,void * per_transfer_context,qdf_dma_addr_t buffer,uint32_t nbytes,uint32_t transfer_id,uint32_t flags,uint32_t user_flag)500 ce_send(struct CE_handle *copyeng,
501 		void *per_transfer_context,
502 		qdf_dma_addr_t buffer,
503 		uint32_t nbytes,
504 		uint32_t transfer_id,
505 		uint32_t flags,
506 		uint32_t user_flag)
507 {
508 	struct CE_state *CE_state = (struct CE_state *)copyeng;
509 	QDF_STATUS status;
510 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
511 
512 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
513 	status = hif_state->ce_services->ce_send_nolock(copyeng,
514 			per_transfer_context, buffer, nbytes,
515 			transfer_id, flags, user_flag);
516 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
517 
518 	return status;
519 }
520 qdf_export_symbol(ce_send);
521 
ce_sendlist_sizeof(void)522 unsigned int ce_sendlist_sizeof(void)
523 {
524 	return sizeof(struct ce_sendlist);
525 }
526 
ce_sendlist_init(struct ce_sendlist * sendlist)527 void ce_sendlist_init(struct ce_sendlist *sendlist)
528 {
529 	struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
530 
531 	sl->num_items = 0;
532 }
533 
534 QDF_STATUS
ce_sendlist_buf_add(struct ce_sendlist * sendlist,qdf_dma_addr_t buffer,uint32_t nbytes,uint32_t flags,uint32_t user_flags)535 ce_sendlist_buf_add(struct ce_sendlist *sendlist,
536 					qdf_dma_addr_t buffer,
537 					uint32_t nbytes,
538 					uint32_t flags,
539 					uint32_t user_flags)
540 {
541 	struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
542 	unsigned int num_items = sl->num_items;
543 	struct ce_sendlist_item *item;
544 
545 	if (num_items >= CE_SENDLIST_ITEMS_MAX) {
546 		QDF_ASSERT(num_items < CE_SENDLIST_ITEMS_MAX);
547 		return QDF_STATUS_E_RESOURCES;
548 	}
549 
550 	item = &sl->item[num_items];
551 	item->send_type = CE_SIMPLE_BUFFER_TYPE;
552 	item->data = buffer;
553 	item->u.nbytes = nbytes;
554 	item->flags = flags;
555 	item->user_flags = user_flags;
556 	sl->num_items = num_items + 1;
557 	return QDF_STATUS_SUCCESS;
558 }
559 
560 QDF_STATUS
ce_sendlist_send(struct CE_handle * copyeng,void * per_transfer_context,struct ce_sendlist * sendlist,unsigned int transfer_id)561 ce_sendlist_send(struct CE_handle *copyeng,
562 		 void *per_transfer_context,
563 		 struct ce_sendlist *sendlist, unsigned int transfer_id)
564 {
565 	struct CE_state *CE_state = (struct CE_state *)copyeng;
566 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
567 
568 	return hif_state->ce_services->ce_sendlist_send(copyeng,
569 			per_transfer_context, sendlist, transfer_id);
570 }
571 
572 #ifndef AH_NEED_TX_DATA_SWAP
573 #define AH_NEED_TX_DATA_SWAP 0
574 #endif
575 
576 /**
577  * ce_batch_send() - sends bunch of msdus at once
578  * @ce_tx_hdl : pointer to CE handle
579  * @msdu : list of msdus to be sent
580  * @transfer_id : transfer id
581  * @len : Downloaded length
582  * @sendhead : sendhead
583  *
584  * Assumption : Called with an array of MSDU's
585  * Function:
586  * For each msdu in the array
587  * 1. Send each msdu
588  * 2. Increment write index accordinlgy.
589  *
590  * Return: list of msds not sent
591  */
ce_batch_send(struct CE_handle * ce_tx_hdl,qdf_nbuf_t msdu,uint32_t transfer_id,u_int32_t len,uint32_t sendhead)592 qdf_nbuf_t ce_batch_send(struct CE_handle *ce_tx_hdl,  qdf_nbuf_t msdu,
593 		uint32_t transfer_id, u_int32_t len, uint32_t sendhead)
594 {
595 	struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl;
596 	struct hif_softc *scn = ce_state->scn;
597 	struct CE_ring_state *src_ring = ce_state->src_ring;
598 	u_int32_t ctrl_addr = ce_state->ctrl_addr;
599 	/*  A_target_id_t targid = TARGID(scn);*/
600 
601 	uint32_t nentries_mask = src_ring->nentries_mask;
602 	uint32_t sw_index, write_index;
603 
604 	struct CE_src_desc *src_desc_base =
605 		(struct CE_src_desc *)src_ring->base_addr_owner_space;
606 	uint32_t *src_desc;
607 
608 	struct CE_src_desc lsrc_desc = {0};
609 	int deltacount = 0;
610 	qdf_nbuf_t freelist = NULL, hfreelist = NULL, tempnext;
611 
612 	DATA_CE_UPDATE_SWINDEX(src_ring->sw_index, scn, ctrl_addr);
613 	sw_index = src_ring->sw_index;
614 	write_index = src_ring->write_index;
615 
616 	deltacount = CE_RING_DELTA(nentries_mask, write_index, sw_index-1);
617 
618 	while (msdu) {
619 		tempnext = qdf_nbuf_next(msdu);
620 
621 		if (deltacount < 2) {
622 			if (sendhead)
623 				return msdu;
624 			hif_err("Out of descriptors");
625 			src_ring->write_index = write_index;
626 			war_ce_src_ring_write_idx_set(scn, ctrl_addr,
627 					write_index);
628 
629 			sw_index = src_ring->sw_index;
630 			write_index = src_ring->write_index;
631 
632 			deltacount = CE_RING_DELTA(nentries_mask, write_index,
633 					sw_index-1);
634 			if (!freelist) {
635 				freelist = msdu;
636 				hfreelist = msdu;
637 			} else {
638 				qdf_nbuf_set_next(freelist, msdu);
639 				freelist = msdu;
640 			}
641 			qdf_nbuf_set_next(msdu, NULL);
642 			msdu = tempnext;
643 			continue;
644 		}
645 
646 		src_desc = (uint32_t *)CE_SRC_RING_TO_DESC(src_desc_base,
647 				write_index);
648 
649 		src_desc[0]   = qdf_nbuf_get_frag_paddr(msdu, 0);
650 
651 		lsrc_desc.meta_data = transfer_id;
652 		if (len  > msdu->len)
653 			len =  msdu->len;
654 		lsrc_desc.nbytes = len;
655 		/*  Data packet is a byte stream, so disable byte swap */
656 		lsrc_desc.byte_swap = AH_NEED_TX_DATA_SWAP;
657 		lsrc_desc.gather    = 0; /*For the last one, gather is not set*/
658 
659 		src_desc[1] = ((uint32_t *)&lsrc_desc)[1];
660 
661 
662 		src_ring->per_transfer_context[write_index] = msdu;
663 		write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
664 
665 		if (sendhead)
666 			break;
667 		qdf_nbuf_set_next(msdu, NULL);
668 		msdu = tempnext;
669 
670 	}
671 
672 
673 	src_ring->write_index = write_index;
674 	war_ce_src_ring_write_idx_set(scn, ctrl_addr, write_index);
675 
676 	return hfreelist;
677 }
678 
679 /**
680  * ce_update_tx_ring() - Advance sw index.
681  * @ce_tx_hdl : pointer to CE handle
682  * @num_htt_cmpls : htt completions received.
683  *
684  * Function:
685  * Increment the value of sw index of src ring
686  * according to number of htt completions
687  * received.
688  *
689  * Return: void
690  */
691 #ifdef DATA_CE_SW_INDEX_NO_INLINE_UPDATE
ce_update_tx_ring(struct CE_handle * ce_tx_hdl,uint32_t num_htt_cmpls)692 void ce_update_tx_ring(struct CE_handle *ce_tx_hdl, uint32_t num_htt_cmpls)
693 {
694 	struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl;
695 	struct CE_ring_state *src_ring = ce_state->src_ring;
696 	uint32_t nentries_mask = src_ring->nentries_mask;
697 	/*
698 	 * Advance the s/w index:
699 	 * This effectively simulates completing the CE ring descriptors
700 	 */
701 	src_ring->sw_index =
702 		CE_RING_IDX_ADD(nentries_mask, src_ring->sw_index,
703 				num_htt_cmpls);
704 }
705 #else
ce_update_tx_ring(struct CE_handle * ce_tx_hdl,uint32_t num_htt_cmpls)706 void ce_update_tx_ring(struct CE_handle *ce_tx_hdl, uint32_t num_htt_cmpls)
707 {}
708 #endif
709 
710 /**
711  * ce_send_single() - sends
712  * @ce_tx_hdl : pointer to CE handle
713  * @msdu : msdu to be sent
714  * @transfer_id : transfer id
715  * @len : Downloaded length
716  *
717  * Function:
718  * 1. Send one msdu
719  * 2. Increment write index of src ring accordinlgy.
720  *
721  * Return: QDF_STATUS: CE sent status
722  */
ce_send_single(struct CE_handle * ce_tx_hdl,qdf_nbuf_t msdu,uint32_t transfer_id,u_int32_t len)723 QDF_STATUS ce_send_single(struct CE_handle *ce_tx_hdl, qdf_nbuf_t msdu,
724 			  uint32_t transfer_id, u_int32_t len)
725 {
726 	struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl;
727 	struct hif_softc *scn = ce_state->scn;
728 	struct CE_ring_state *src_ring = ce_state->src_ring;
729 	uint32_t ctrl_addr = ce_state->ctrl_addr;
730 	/*A_target_id_t targid = TARGID(scn);*/
731 
732 	uint32_t nentries_mask = src_ring->nentries_mask;
733 	uint32_t sw_index, write_index;
734 
735 	struct CE_src_desc *src_desc_base =
736 		(struct CE_src_desc *)src_ring->base_addr_owner_space;
737 	uint32_t *src_desc;
738 
739 	struct CE_src_desc lsrc_desc = {0};
740 	enum hif_ce_event_type event_type;
741 
742 	DATA_CE_UPDATE_SWINDEX(src_ring->sw_index, scn, ctrl_addr);
743 	sw_index = src_ring->sw_index;
744 	write_index = src_ring->write_index;
745 
746 	if (qdf_unlikely(CE_RING_DELTA(nentries_mask, write_index,
747 					sw_index-1) < 1)) {
748 		hif_err("ce send fail %d %d %d", nentries_mask,
749 		       write_index, sw_index);
750 		return QDF_STATUS_E_RESOURCES;
751 	}
752 
753 	src_desc = (uint32_t *)CE_SRC_RING_TO_DESC(src_desc_base, write_index);
754 
755 	src_desc[0] = qdf_nbuf_get_frag_paddr(msdu, 0);
756 
757 	lsrc_desc.meta_data = transfer_id;
758 	lsrc_desc.nbytes = len;
759 	/*  Data packet is a byte stream, so disable byte swap */
760 	lsrc_desc.byte_swap = AH_NEED_TX_DATA_SWAP;
761 	lsrc_desc.gather    = 0; /* For the last one, gather is not set */
762 
763 	src_desc[1] = ((uint32_t *)&lsrc_desc)[1];
764 
765 
766 	src_ring->per_transfer_context[write_index] = msdu;
767 
768 	if (((struct CE_src_desc *)src_desc)->gather)
769 		event_type = HIF_TX_GATHER_DESC_POST;
770 	else if (qdf_unlikely(ce_state->state != CE_RUNNING))
771 		event_type = HIF_TX_DESC_SOFTWARE_POST;
772 	else
773 		event_type = HIF_TX_DESC_POST;
774 
775 	hif_record_ce_desc_event(scn, ce_state->id, event_type,
776 				(union ce_desc *)src_desc, msdu,
777 				write_index, len);
778 
779 	write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
780 
781 	src_ring->write_index = write_index;
782 
783 	war_ce_src_ring_write_idx_set(scn, ctrl_addr, write_index);
784 
785 	return QDF_STATUS_SUCCESS;
786 }
787 
788 /**
789  * ce_recv_buf_enqueue() - enqueue a recv buffer into a copy engine
790  * @copyeng: copy engine handle
791  * @per_recv_context: virtual address of the nbuf
792  * @buffer: physical address of the nbuf
793  *
794  * Return: QDF_STATUS_SUCCESS if the buffer is enqueued
795  */
796 QDF_STATUS
ce_recv_buf_enqueue(struct CE_handle * copyeng,void * per_recv_context,qdf_dma_addr_t buffer)797 ce_recv_buf_enqueue(struct CE_handle *copyeng,
798 		    void *per_recv_context, qdf_dma_addr_t buffer)
799 {
800 	struct CE_state *CE_state = (struct CE_state *)copyeng;
801 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
802 
803 	return hif_state->ce_services->ce_recv_buf_enqueue(copyeng,
804 			per_recv_context, buffer);
805 }
806 qdf_export_symbol(ce_recv_buf_enqueue);
807 
808 void
ce_send_watermarks_set(struct CE_handle * copyeng,unsigned int low_alert_nentries,unsigned int high_alert_nentries)809 ce_send_watermarks_set(struct CE_handle *copyeng,
810 		       unsigned int low_alert_nentries,
811 		       unsigned int high_alert_nentries)
812 {
813 	struct CE_state *CE_state = (struct CE_state *)copyeng;
814 	uint32_t ctrl_addr = CE_state->ctrl_addr;
815 	struct hif_softc *scn = CE_state->scn;
816 
817 	CE_SRC_RING_LOWMARK_SET(scn, ctrl_addr, low_alert_nentries);
818 	CE_SRC_RING_HIGHMARK_SET(scn, ctrl_addr, high_alert_nentries);
819 }
820 
821 void
ce_recv_watermarks_set(struct CE_handle * copyeng,unsigned int low_alert_nentries,unsigned int high_alert_nentries)822 ce_recv_watermarks_set(struct CE_handle *copyeng,
823 		       unsigned int low_alert_nentries,
824 		       unsigned int high_alert_nentries)
825 {
826 	struct CE_state *CE_state = (struct CE_state *)copyeng;
827 	uint32_t ctrl_addr = CE_state->ctrl_addr;
828 	struct hif_softc *scn = CE_state->scn;
829 
830 	CE_DEST_RING_LOWMARK_SET(scn, ctrl_addr,
831 				low_alert_nentries);
832 	CE_DEST_RING_HIGHMARK_SET(scn, ctrl_addr,
833 				high_alert_nentries);
834 }
835 
ce_send_entries_avail(struct CE_handle * copyeng)836 unsigned int ce_send_entries_avail(struct CE_handle *copyeng)
837 {
838 	struct CE_state *CE_state = (struct CE_state *)copyeng;
839 	struct CE_ring_state *src_ring = CE_state->src_ring;
840 	unsigned int nentries_mask = src_ring->nentries_mask;
841 	unsigned int sw_index;
842 	unsigned int write_index;
843 
844 	qdf_spin_lock(&CE_state->ce_index_lock);
845 	sw_index = src_ring->sw_index;
846 	write_index = src_ring->write_index;
847 	qdf_spin_unlock(&CE_state->ce_index_lock);
848 
849 	return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
850 }
851 
ce_recv_entries_avail(struct CE_handle * copyeng)852 unsigned int ce_recv_entries_avail(struct CE_handle *copyeng)
853 {
854 	struct CE_state *CE_state = (struct CE_state *)copyeng;
855 	struct CE_ring_state *dest_ring = CE_state->dest_ring;
856 	unsigned int nentries_mask = dest_ring->nentries_mask;
857 	unsigned int sw_index;
858 	unsigned int write_index;
859 
860 	qdf_spin_lock(&CE_state->ce_index_lock);
861 	sw_index = dest_ring->sw_index;
862 	write_index = dest_ring->write_index;
863 	qdf_spin_unlock(&CE_state->ce_index_lock);
864 
865 	return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
866 }
867 
868 /*
869  * Guts of ce_completed_recv_next.
870  * The caller takes responsibility for any necessary locking.
871  */
872 QDF_STATUS
ce_completed_recv_next(struct CE_handle * copyeng,void ** per_CE_contextp,void ** per_transfer_contextp,qdf_dma_addr_t * bufferp,unsigned int * nbytesp,unsigned int * transfer_idp,unsigned int * flagsp)873 ce_completed_recv_next(struct CE_handle *copyeng,
874 		       void **per_CE_contextp,
875 		       void **per_transfer_contextp,
876 		       qdf_dma_addr_t *bufferp,
877 		       unsigned int *nbytesp,
878 		       unsigned int *transfer_idp, unsigned int *flagsp)
879 {
880 	struct CE_state *CE_state = (struct CE_state *)copyeng;
881 	QDF_STATUS status;
882 	struct hif_softc *scn = CE_state->scn;
883 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
884 	struct ce_ops *ce_services;
885 
886 	ce_services = hif_state->ce_services;
887 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
888 	status =
889 		ce_services->ce_completed_recv_next_nolock(CE_state,
890 				per_CE_contextp, per_transfer_contextp, bufferp,
891 					      nbytesp, transfer_idp, flagsp);
892 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
893 
894 	return status;
895 }
896 
897 QDF_STATUS
ce_revoke_recv_next(struct CE_handle * copyeng,void ** per_CE_contextp,void ** per_transfer_contextp,qdf_dma_addr_t * bufferp)898 ce_revoke_recv_next(struct CE_handle *copyeng,
899 		    void **per_CE_contextp,
900 		    void **per_transfer_contextp, qdf_dma_addr_t *bufferp)
901 {
902 	struct CE_state *CE_state = (struct CE_state *)copyeng;
903 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
904 
905 	return hif_state->ce_services->ce_revoke_recv_next(copyeng,
906 			per_CE_contextp, per_transfer_contextp, bufferp);
907 }
908 
909 QDF_STATUS
ce_cancel_send_next(struct CE_handle * copyeng,void ** per_CE_contextp,void ** per_transfer_contextp,qdf_dma_addr_t * bufferp,unsigned int * nbytesp,unsigned int * transfer_idp,uint32_t * toeplitz_hash_result)910 ce_cancel_send_next(struct CE_handle *copyeng,
911 		void **per_CE_contextp,
912 		void **per_transfer_contextp,
913 		qdf_dma_addr_t *bufferp,
914 		unsigned int *nbytesp,
915 		unsigned int *transfer_idp,
916 		uint32_t *toeplitz_hash_result)
917 {
918 	struct CE_state *CE_state = (struct CE_state *)copyeng;
919 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
920 
921 	return hif_state->ce_services->ce_cancel_send_next
922 		(copyeng, per_CE_contextp, per_transfer_contextp,
923 		 bufferp, nbytesp, transfer_idp, toeplitz_hash_result);
924 }
925 qdf_export_symbol(ce_cancel_send_next);
926 
927 QDF_STATUS
ce_completed_send_next(struct CE_handle * copyeng,void ** per_CE_contextp,void ** per_transfer_contextp,qdf_dma_addr_t * bufferp,unsigned int * nbytesp,unsigned int * transfer_idp,unsigned int * sw_idx,unsigned int * hw_idx,unsigned int * toeplitz_hash_result)928 ce_completed_send_next(struct CE_handle *copyeng,
929 		       void **per_CE_contextp,
930 		       void **per_transfer_contextp,
931 		       qdf_dma_addr_t *bufferp,
932 		       unsigned int *nbytesp,
933 		       unsigned int *transfer_idp,
934 		       unsigned int *sw_idx,
935 		       unsigned int *hw_idx,
936 		       unsigned int *toeplitz_hash_result)
937 {
938 	struct CE_state *CE_state = (struct CE_state *)copyeng;
939 	struct hif_softc *scn = CE_state->scn;
940 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
941 	struct ce_ops *ce_services;
942 	QDF_STATUS status;
943 
944 	ce_services = hif_state->ce_services;
945 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
946 	status =
947 		ce_services->ce_completed_send_next_nolock(CE_state,
948 					per_CE_contextp, per_transfer_contextp,
949 					bufferp, nbytesp, transfer_idp, sw_idx,
950 					      hw_idx, toeplitz_hash_result);
951 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
952 
953 	return status;
954 }
955 
956 #ifdef ATH_11AC_TXCOMPACT
957 /* CE engine descriptor reap
958  * Similar to ce_per_engine_service , Only difference is ce_per_engine_service
959  * does receive and reaping of completed descriptor ,
960  * This function only handles reaping of Tx complete descriptor.
961  * The Function is called from threshold reap  poll routine
962  * hif_send_complete_check so should not contain receive functionality
963  * within it .
964  */
965 
ce_per_engine_servicereap(struct hif_softc * scn,unsigned int ce_id)966 void ce_per_engine_servicereap(struct hif_softc *scn, unsigned int ce_id)
967 {
968 	void *CE_context;
969 	void *transfer_context;
970 	qdf_dma_addr_t buf;
971 	unsigned int nbytes;
972 	unsigned int id;
973 	unsigned int sw_idx, hw_idx;
974 	uint32_t toeplitz_hash_result;
975 	struct CE_state *CE_state = scn->ce_id_to_state[ce_id];
976 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
977 
978 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
979 		return;
980 
981 	hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_ENTRY,
982 			NULL, NULL, 0, 0);
983 
984 	/* Since this function is called from both user context and
985 	 * tasklet context the spinlock has to lock the bottom halves.
986 	 * This fix assumes that ATH_11AC_TXCOMPACT flag is always
987 	 * enabled in TX polling mode. If this is not the case, more
988 	 * bottom halve spin lock changes are needed. Due to data path
989 	 * performance concern, after internal discussion we've decided
990 	 * to make minimum change, i.e., only address the issue occurred
991 	 * in this function. The possible negative effect of this minimum
992 	 * change is that, in the future, if some other function will also
993 	 * be opened to let the user context to use, those cases need to be
994 	 * addressed by change spin_lock to spin_lock_bh also.
995 	 */
996 
997 	qdf_spin_lock_bh(&CE_state->ce_index_lock);
998 
999 	if (CE_state->send_cb) {
1000 		{
1001 			struct ce_ops *ce_services = hif_state->ce_services;
1002 			/* Pop completed send buffers and call the
1003 			 * registered send callback for each
1004 			 */
1005 			while (ce_services->ce_completed_send_next_nolock
1006 				 (CE_state, &CE_context,
1007 				  &transfer_context, &buf,
1008 				  &nbytes, &id, &sw_idx, &hw_idx,
1009 				  &toeplitz_hash_result) ==
1010 				  QDF_STATUS_SUCCESS) {
1011 				if (ce_id != CE_HTT_H2T_MSG) {
1012 					qdf_spin_unlock_bh(
1013 						&CE_state->ce_index_lock);
1014 					CE_state->send_cb(
1015 						(struct CE_handle *)
1016 						CE_state, CE_context,
1017 						transfer_context, buf,
1018 						nbytes, id, sw_idx, hw_idx,
1019 						toeplitz_hash_result);
1020 					qdf_spin_lock_bh(
1021 						&CE_state->ce_index_lock);
1022 				} else {
1023 					struct HIF_CE_pipe_info *pipe_info =
1024 						(struct HIF_CE_pipe_info *)
1025 						CE_context;
1026 
1027 					qdf_spin_lock_bh(&pipe_info->
1028 						 completion_freeq_lock);
1029 					pipe_info->num_sends_allowed++;
1030 					qdf_spin_unlock_bh(&pipe_info->
1031 						   completion_freeq_lock);
1032 				}
1033 			}
1034 		}
1035 	}
1036 
1037 	qdf_spin_unlock_bh(&CE_state->ce_index_lock);
1038 
1039 	hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_EXIT,
1040 			NULL, NULL, 0, 0);
1041 	Q_TARGET_ACCESS_END(scn);
1042 }
1043 
1044 #endif /*ATH_11AC_TXCOMPACT */
1045 
1046 #ifdef ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST
check_ce_id_and_epping_enabled(int CE_id,uint32_t mode)1047 static inline bool check_ce_id_and_epping_enabled(int CE_id, uint32_t mode)
1048 {
1049 	// QDF_IS_EPPING_ENABLED is pre lithium feature
1050 	// CE4 completion is enabled only lithium and later
1051 	// so no need to check for EPPING
1052 	return true;
1053 }
1054 
1055 #else /* ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST */
1056 
check_ce_id_and_epping_enabled(int CE_id,uint32_t mode)1057 static inline bool check_ce_id_and_epping_enabled(int CE_id, uint32_t mode)
1058 {
1059 	if (CE_id != CE_HTT_H2T_MSG || QDF_IS_EPPING_ENABLED(mode))
1060 		return true;
1061 	else
1062 		return false;
1063 }
1064 
1065 #endif /* ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST */
1066 
1067 /*
1068  * ce_engine_service_reg:
1069  *
1070  * Called from ce_per_engine_service and goes through the regular interrupt
1071  * handling that does not involve the WLAN fast path feature.
1072  *
1073  * Returns void
1074  */
ce_engine_service_reg(struct hif_softc * scn,int CE_id)1075 void ce_engine_service_reg(struct hif_softc *scn, int CE_id)
1076 {
1077 	struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1078 	uint32_t ctrl_addr = CE_state->ctrl_addr;
1079 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1080 	void *CE_context;
1081 	void *transfer_context;
1082 	qdf_dma_addr_t buf;
1083 	unsigned int nbytes;
1084 	unsigned int id;
1085 	unsigned int flags;
1086 	unsigned int more_comp_cnt = 0;
1087 	unsigned int more_snd_comp_cnt = 0;
1088 	unsigned int sw_idx, hw_idx;
1089 	uint32_t toeplitz_hash_result;
1090 	uint32_t mode = hif_get_conparam(scn);
1091 
1092 more_completions:
1093 	if (CE_state->recv_cb) {
1094 
1095 		/* Pop completed recv buffers and call
1096 		 * the registered recv callback for each
1097 		 */
1098 		while (hif_state->ce_services->ce_completed_recv_next_nolock
1099 				(CE_state, &CE_context, &transfer_context,
1100 				&buf, &nbytes, &id, &flags) ==
1101 				QDF_STATUS_SUCCESS) {
1102 			qdf_spin_unlock(&CE_state->ce_index_lock);
1103 			CE_state->recv_cb((struct CE_handle *)CE_state,
1104 					  CE_context, transfer_context, buf,
1105 					  nbytes, id, flags);
1106 
1107 			qdf_spin_lock(&CE_state->ce_index_lock);
1108 			/*
1109 			 * EV #112693 -
1110 			 * [Peregrine][ES1][WB342][Win8x86][Performance]
1111 			 * BSoD_0x133 occurred in VHT80 UDP_DL
1112 			 * Break out DPC by force if number of loops in
1113 			 * hif_pci_ce_recv_data reaches MAX_NUM_OF_RECEIVES
1114 			 * to avoid spending too long time in
1115 			 * DPC for each interrupt handling. Schedule another
1116 			 * DPC to avoid data loss if we had taken
1117 			 * force-break action before apply to Windows OS
1118 			 * only currently, Linux/MAC os can expand to their
1119 			 * platform if necessary
1120 			 */
1121 
1122 			/* Break the receive processes by
1123 			 * force if force_break set up
1124 			 */
1125 			if (qdf_unlikely(CE_state->force_break)) {
1126 				qdf_atomic_set(&CE_state->rx_pending, 1);
1127 				return;
1128 			}
1129 		}
1130 	}
1131 
1132 	/*
1133 	 * Attention: We may experience potential infinite loop for below
1134 	 * While Loop during Sending Stress test.
1135 	 * Resolve the same way as Receive Case (Refer to EV #112693)
1136 	 */
1137 
1138 	if (CE_state->send_cb) {
1139 		/* Pop completed send buffers and call
1140 		 * the registered send callback for each
1141 		 */
1142 
1143 #ifdef ATH_11AC_TXCOMPACT
1144 		while (hif_state->ce_services->ce_completed_send_next_nolock
1145 			 (CE_state, &CE_context,
1146 			 &transfer_context, &buf, &nbytes,
1147 			 &id, &sw_idx, &hw_idx,
1148 			 &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
1149 
1150 			if (check_ce_id_and_epping_enabled(CE_id, mode)) {
1151 				qdf_spin_unlock(&CE_state->ce_index_lock);
1152 				CE_state->send_cb((struct CE_handle *)CE_state,
1153 						  CE_context, transfer_context,
1154 						  buf, nbytes, id, sw_idx,
1155 						  hw_idx, toeplitz_hash_result);
1156 				qdf_spin_lock(&CE_state->ce_index_lock);
1157 			} else {
1158 				struct HIF_CE_pipe_info *pipe_info =
1159 					(struct HIF_CE_pipe_info *)CE_context;
1160 
1161 				qdf_spin_lock_bh(&pipe_info->
1162 					      completion_freeq_lock);
1163 				pipe_info->num_sends_allowed++;
1164 				qdf_spin_unlock_bh(&pipe_info->
1165 						completion_freeq_lock);
1166 			}
1167 		}
1168 #else                           /*ATH_11AC_TXCOMPACT */
1169 		while (hif_state->ce_services->ce_completed_send_next_nolock
1170 			 (CE_state, &CE_context,
1171 			  &transfer_context, &buf, &nbytes,
1172 			  &id, &sw_idx, &hw_idx,
1173 			  &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
1174 			qdf_spin_unlock(&CE_state->ce_index_lock);
1175 			CE_state->send_cb((struct CE_handle *)CE_state,
1176 				  CE_context, transfer_context, buf,
1177 				  nbytes, id, sw_idx, hw_idx,
1178 				  toeplitz_hash_result);
1179 			qdf_spin_lock(&CE_state->ce_index_lock);
1180 		}
1181 #endif /*ATH_11AC_TXCOMPACT */
1182 	}
1183 
1184 more_watermarks:
1185 	if (CE_state->misc_cbs) {
1186 		if (CE_state->watermark_cb &&
1187 				hif_state->ce_services->watermark_int(CE_state,
1188 					&flags)) {
1189 			qdf_spin_unlock(&CE_state->ce_index_lock);
1190 			/* Convert HW IS bits to software flags */
1191 			CE_state->watermark_cb((struct CE_handle *)CE_state,
1192 					CE_state->wm_context, flags);
1193 			qdf_spin_lock(&CE_state->ce_index_lock);
1194 		}
1195 	}
1196 
1197 	/*
1198 	 * Clear the misc interrupts (watermark) that were handled above,
1199 	 * and that will be checked again below.
1200 	 * Clear and check for copy-complete interrupts again, just in case
1201 	 * more copy completions happened while the misc interrupts were being
1202 	 * handled.
1203 	 */
1204 	if (!ce_srng_based(scn) && !CE_state->msi_supported) {
1205 		if (TARGET_REGISTER_ACCESS_ALLOWED(scn)) {
1206 			CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr,
1207 					   CE_WATERMARK_MASK |
1208 					   HOST_IS_COPY_COMPLETE_MASK);
1209 		} else {
1210 			qdf_atomic_set(&CE_state->rx_pending, 0);
1211 			hif_err_rl("%s: target access is not allowed",
1212 				   __func__);
1213 			return;
1214 		}
1215 	}
1216 
1217 	/*
1218 	 * Now that per-engine interrupts are cleared, verify that
1219 	 * no recv interrupts arrive while processing send interrupts,
1220 	 * and no recv or send interrupts happened while processing
1221 	 * misc interrupts.Go back and check again.Keep checking until
1222 	 * we find no more events to process.
1223 	 */
1224 	if (CE_state->recv_cb &&
1225 		hif_state->ce_services->ce_recv_entries_done_nolock(scn,
1226 				CE_state)) {
1227 		if (QDF_IS_EPPING_ENABLED(mode) ||
1228 		    more_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
1229 			goto more_completions;
1230 		} else {
1231 			if (!ce_srng_based(scn) &&
1232 			    !CE_state->batch_intr_supported) {
1233 				hif_err_rl(
1234 					"Potential infinite loop detected during Rx processing id:%u nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x",
1235 					CE_state->id,
1236 					CE_state->dest_ring->nentries_mask,
1237 					CE_state->dest_ring->sw_index,
1238 					CE_DEST_RING_READ_IDX_GET(scn,
1239 							  CE_state->ctrl_addr));
1240 			}
1241 		}
1242 	}
1243 
1244 	if (CE_state->send_cb &&
1245 		hif_state->ce_services->ce_send_entries_done_nolock(scn,
1246 				CE_state)) {
1247 		if (QDF_IS_EPPING_ENABLED(mode) ||
1248 		    more_snd_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
1249 			goto more_completions;
1250 		} else {
1251 			if (!ce_srng_based(scn) &&
1252 			    !CE_state->batch_intr_supported) {
1253 				hif_err_rl(
1254 					"Potential infinite loop detected during send completion id:%u mask:0x%x sw read_idx:0x%x hw_index:0x%x write_index: 0x%x hw read_idx:0x%x",
1255 					CE_state->id,
1256 					CE_state->src_ring->nentries_mask,
1257 					CE_state->src_ring->sw_index,
1258 					CE_state->src_ring->hw_index,
1259 					CE_state->src_ring->write_index,
1260 					CE_SRC_RING_READ_IDX_GET(scn,
1261 							 CE_state->ctrl_addr));
1262 			}
1263 		}
1264 	}
1265 
1266 	if (CE_state->misc_cbs && CE_state->watermark_cb) {
1267 		if (hif_state->ce_services->watermark_int(CE_state, &flags))
1268 			goto more_watermarks;
1269 	}
1270 
1271 	qdf_atomic_set(&CE_state->rx_pending, 0);
1272 }
1273 
1274 #ifdef WLAN_TRACEPOINTS
1275 /**
1276  * ce_trace_tasklet_sched_latency() - Trace ce tasklet scheduling
1277  *  latency
1278  * @ce_state: CE context
1279  *
1280  * Return: None
1281  */
1282 static inline
ce_trace_tasklet_sched_latency(struct CE_state * ce_state)1283 void ce_trace_tasklet_sched_latency(struct CE_state *ce_state)
1284 {
1285 	qdf_trace_dp_ce_tasklet_sched_latency(ce_state->id,
1286 					      ce_state->ce_service_start_time -
1287 					      ce_state->ce_tasklet_sched_time);
1288 }
1289 #else
1290 static inline
ce_trace_tasklet_sched_latency(struct CE_state * ce_state)1291 void ce_trace_tasklet_sched_latency(struct CE_state *ce_state)
1292 {
1293 }
1294 #endif
1295 
1296 /*
1297  * Guts of interrupt handler for per-engine interrupts on a particular CE.
1298  *
1299  * Invokes registered callbacks for recv_complete,
1300  * send_complete, and watermarks.
1301  *
1302  * Returns: number of messages processed
1303  */
ce_per_engine_service(struct hif_softc * scn,unsigned int CE_id)1304 int ce_per_engine_service(struct hif_softc *scn, unsigned int CE_id)
1305 {
1306 	struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1307 
1308 	if (hif_is_nss_wifi_enabled(scn) && (CE_state->htt_rx_data))
1309 		return CE_state->receive_count;
1310 
1311 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0) {
1312 		hif_err("[premature rc=0]");
1313 		return 0; /* no work done */
1314 	}
1315 
1316 	/* Clear force_break flag and re-initialize receive_count to 0 */
1317 	CE_state->receive_count = 0;
1318 	CE_state->force_break = 0;
1319 	CE_state->ce_service_start_time = qdf_time_sched_clock();
1320 	CE_state->ce_service_yield_time =
1321 		CE_state->ce_service_start_time +
1322 		hif_get_ce_service_max_yield_time(
1323 			(struct hif_opaque_softc *)scn);
1324 
1325 	ce_trace_tasklet_sched_latency(CE_state);
1326 
1327 	qdf_spin_lock(&CE_state->ce_index_lock);
1328 
1329 	CE_state->service(scn, CE_id);
1330 
1331 	qdf_spin_unlock(&CE_state->ce_index_lock);
1332 
1333 	if (Q_TARGET_ACCESS_END(scn) < 0)
1334 		hif_err("<--[premature rc=%d]", CE_state->receive_count);
1335 	return CE_state->receive_count;
1336 }
1337 qdf_export_symbol(ce_per_engine_service);
1338 
1339 /*
1340  * Handler for per-engine interrupts on ALL active CEs.
1341  * This is used in cases where the system is sharing a
1342  * single interrupt for all CEs
1343  */
1344 
ce_per_engine_service_any(int irq,struct hif_softc * scn)1345 void ce_per_engine_service_any(int irq, struct hif_softc *scn)
1346 {
1347 	int CE_id;
1348 	uint32_t intr_summary;
1349 
1350 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1351 		return;
1352 
1353 	if (!qdf_atomic_read(&scn->tasklet_from_intr)) {
1354 		for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1355 			struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1356 
1357 			if (qdf_atomic_read(&CE_state->rx_pending)) {
1358 				qdf_atomic_set(&CE_state->rx_pending, 0);
1359 				ce_per_engine_service(scn, CE_id);
1360 			}
1361 		}
1362 
1363 		Q_TARGET_ACCESS_END(scn);
1364 		return;
1365 	}
1366 
1367 	intr_summary = CE_INTERRUPT_SUMMARY(scn);
1368 
1369 	for (CE_id = 0; intr_summary && (CE_id < scn->ce_count); CE_id++) {
1370 		if (intr_summary & (1 << CE_id))
1371 			intr_summary &= ~(1 << CE_id);
1372 		else
1373 			continue;       /* no intr pending on this CE */
1374 
1375 		ce_per_engine_service(scn, CE_id);
1376 	}
1377 
1378 	Q_TARGET_ACCESS_END(scn);
1379 }
1380 
1381 /*Iterate the CE_state list and disable the compl interrupt
1382  * if it has been registered already.
1383  */
ce_disable_any_copy_compl_intr_nolock(struct hif_softc * scn)1384 void ce_disable_any_copy_compl_intr_nolock(struct hif_softc *scn)
1385 {
1386 	int CE_id;
1387 
1388 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1389 		return;
1390 
1391 	for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1392 		struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1393 		uint32_t ctrl_addr = CE_state->ctrl_addr;
1394 
1395 		/* if the interrupt is currently enabled, disable it */
1396 		if (!CE_state->disable_copy_compl_intr
1397 		    && (CE_state->send_cb || CE_state->recv_cb))
1398 			CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
1399 
1400 		if (CE_state->watermark_cb)
1401 			CE_WATERMARK_INTR_DISABLE(scn, ctrl_addr);
1402 	}
1403 	Q_TARGET_ACCESS_END(scn);
1404 }
1405 
ce_enable_any_copy_compl_intr_nolock(struct hif_softc * scn)1406 void ce_enable_any_copy_compl_intr_nolock(struct hif_softc *scn)
1407 {
1408 	int CE_id;
1409 
1410 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1411 		return;
1412 
1413 	for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1414 		struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1415 		uint32_t ctrl_addr = CE_state->ctrl_addr;
1416 
1417 		/*
1418 		 * If the CE is supposed to have copy complete interrupts
1419 		 * enabled (i.e. there a callback registered, and the
1420 		 * "disable" flag is not set), then re-enable the interrupt.
1421 		 */
1422 		if (!CE_state->disable_copy_compl_intr
1423 		    && (CE_state->send_cb || CE_state->recv_cb))
1424 			CE_COPY_COMPLETE_INTR_ENABLE(scn, ctrl_addr);
1425 
1426 		if (CE_state->watermark_cb)
1427 			CE_WATERMARK_INTR_ENABLE(scn, ctrl_addr);
1428 	}
1429 	Q_TARGET_ACCESS_END(scn);
1430 }
1431 
1432 /**
1433  * ce_send_cb_register(): register completion handler
1434  * @copyeng: CE_state representing the ce we are adding the behavior to
1435  * @fn_ptr: callback that the ce should use when processing tx completions
1436  * @ce_send_context: context to pass back in the callback
1437  * @disable_interrupts: if the interrupts should be enabled or not.
1438  *
1439  * Caller should guarantee that no transactions are in progress before
1440  * switching the callback function.
1441  *
1442  * Registers the send context before the fn pointer so that if the cb is valid
1443  * the context should be valid.
1444  *
1445  * Beware that currently this function will enable completion interrupts.
1446  */
1447 void
ce_send_cb_register(struct CE_handle * copyeng,ce_send_cb fn_ptr,void * ce_send_context,int disable_interrupts)1448 ce_send_cb_register(struct CE_handle *copyeng,
1449 		    ce_send_cb fn_ptr,
1450 		    void *ce_send_context, int disable_interrupts)
1451 {
1452 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1453 	struct hif_softc *scn;
1454 	struct HIF_CE_state *hif_state;
1455 
1456 	if (!CE_state) {
1457 		hif_err("Error CE state = NULL");
1458 		return;
1459 	}
1460 	scn = CE_state->scn;
1461 	hif_state = HIF_GET_CE_STATE(scn);
1462 	if (!hif_state) {
1463 		hif_err("Error HIF state = NULL");
1464 		return;
1465 	}
1466 	CE_state->send_context = ce_send_context;
1467 	CE_state->send_cb = fn_ptr;
1468 	hif_state->ce_services->ce_per_engine_handler_adjust(CE_state,
1469 							disable_interrupts);
1470 }
1471 qdf_export_symbol(ce_send_cb_register);
1472 
1473 /**
1474  * ce_recv_cb_register(): register completion handler
1475  * @copyeng: CE_state representing the ce we are adding the behavior to
1476  * @fn_ptr: callback that the ce should use when processing rx completions
1477  * @CE_recv_context: context to pass back in the callback
1478  * @disable_interrupts: if the interrupts should be enabled or not.
1479  *
1480  * Registers the send context before the fn pointer so that if the cb is valid
1481  * the context should be valid.
1482  *
1483  * Caller should guarantee that no transactions are in progress before
1484  * switching the callback function.
1485  */
1486 void
ce_recv_cb_register(struct CE_handle * copyeng,CE_recv_cb fn_ptr,void * CE_recv_context,int disable_interrupts)1487 ce_recv_cb_register(struct CE_handle *copyeng,
1488 		    CE_recv_cb fn_ptr,
1489 		    void *CE_recv_context, int disable_interrupts)
1490 {
1491 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1492 	struct hif_softc *scn;
1493 	struct HIF_CE_state *hif_state;
1494 
1495 	if (!CE_state) {
1496 		hif_err("ERROR CE state = NULL");
1497 		return;
1498 	}
1499 	scn = CE_state->scn;
1500 	hif_state = HIF_GET_CE_STATE(scn);
1501 	if (!hif_state) {
1502 		hif_err("Error HIF state = NULL");
1503 		return;
1504 	}
1505 	CE_state->recv_context = CE_recv_context;
1506 	CE_state->recv_cb = fn_ptr;
1507 	hif_state->ce_services->ce_per_engine_handler_adjust(CE_state,
1508 							disable_interrupts);
1509 }
1510 qdf_export_symbol(ce_recv_cb_register);
1511 
1512 /**
1513  * ce_watermark_cb_register(): register completion handler
1514  * @copyeng: CE_state representing the ce we are adding the behavior to
1515  * @fn_ptr: callback that the ce should use when processing watermark events
1516  * @CE_wm_context: context to pass back in the callback
1517  *
1518  * Caller should guarantee that no watermark events are being processed before
1519  * switching the callback function.
1520  */
1521 void
ce_watermark_cb_register(struct CE_handle * copyeng,CE_watermark_cb fn_ptr,void * CE_wm_context)1522 ce_watermark_cb_register(struct CE_handle *copyeng,
1523 			 CE_watermark_cb fn_ptr, void *CE_wm_context)
1524 {
1525 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1526 	struct hif_softc *scn = CE_state->scn;
1527 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1528 
1529 	CE_state->watermark_cb = fn_ptr;
1530 	CE_state->wm_context = CE_wm_context;
1531 	hif_state->ce_services->ce_per_engine_handler_adjust(CE_state,
1532 							0);
1533 	if (fn_ptr)
1534 		CE_state->misc_cbs = 1;
1535 }
1536 
1537 #ifdef CUSTOM_CB_SCHEDULER_SUPPORT
1538 void
ce_register_custom_cb(struct CE_handle * copyeng,void (* custom_cb)(void *),void * custom_cb_context)1539 ce_register_custom_cb(struct CE_handle *copyeng, void (*custom_cb)(void *),
1540 		      void *custom_cb_context)
1541 {
1542 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1543 
1544 	CE_state->custom_cb = custom_cb;
1545 	CE_state->custom_cb_context = custom_cb_context;
1546 	qdf_atomic_init(&CE_state->custom_cb_pending);
1547 }
1548 
1549 void
ce_unregister_custom_cb(struct CE_handle * copyeng)1550 ce_unregister_custom_cb(struct CE_handle *copyeng)
1551 {
1552 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1553 
1554 	qdf_assert_always(!qdf_atomic_read(&CE_state->custom_cb_pending));
1555 	CE_state->custom_cb = NULL;
1556 	CE_state->custom_cb_context = NULL;
1557 }
1558 
1559 void
ce_enable_custom_cb(struct CE_handle * copyeng)1560 ce_enable_custom_cb(struct CE_handle *copyeng)
1561 {
1562 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1563 	int32_t custom_cb_pending;
1564 
1565 	qdf_assert_always(CE_state->custom_cb);
1566 	qdf_assert_always(CE_state->custom_cb_context);
1567 
1568 	custom_cb_pending = qdf_atomic_inc_return(&CE_state->custom_cb_pending);
1569 	qdf_assert_always(custom_cb_pending >= 1);
1570 }
1571 
1572 void
ce_disable_custom_cb(struct CE_handle * copyeng)1573 ce_disable_custom_cb(struct CE_handle *copyeng)
1574 {
1575 	struct CE_state *CE_state = (struct CE_state *)copyeng;
1576 
1577 	qdf_assert_always(CE_state->custom_cb);
1578 	qdf_assert_always(CE_state->custom_cb_context);
1579 
1580 	qdf_atomic_dec_if_positive(&CE_state->custom_cb_pending);
1581 }
1582 #endif /* CUSTOM_CB_SCHEDULER_SUPPORT */
1583 
ce_get_rx_pending(struct hif_softc * scn)1584 bool ce_get_rx_pending(struct hif_softc *scn)
1585 {
1586 	int CE_id;
1587 
1588 	for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1589 		struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1590 
1591 		if (qdf_atomic_read(&CE_state->rx_pending))
1592 			return true;
1593 	}
1594 
1595 	return false;
1596 }
1597 
1598 /**
1599  * ce_check_rx_pending() - ce_check_rx_pending
1600  * @CE_state: context of the copy engine to check
1601  *
1602  * Return: true if there per_engine_service
1603  *	didn't process all the rx descriptors.
1604  */
ce_check_rx_pending(struct CE_state * CE_state)1605 bool ce_check_rx_pending(struct CE_state *CE_state)
1606 {
1607 	if (qdf_atomic_read(&CE_state->rx_pending))
1608 		return true;
1609 	else
1610 		return false;
1611 }
1612 qdf_export_symbol(ce_check_rx_pending);
1613 
1614 #ifdef IPA_OFFLOAD
1615 #ifdef QCN7605_SUPPORT
ce_ipa_get_wr_index_addr(struct CE_state * CE_state)1616 static qdf_dma_addr_t ce_ipa_get_wr_index_addr(struct CE_state *CE_state)
1617 {
1618 	u_int32_t ctrl_addr = CE_state->ctrl_addr;
1619 	struct hif_softc *scn = CE_state->scn;
1620 	qdf_dma_addr_t wr_index_addr;
1621 
1622 	wr_index_addr = shadow_sr_wr_ind_addr(scn, ctrl_addr);
1623 	return wr_index_addr;
1624 }
1625 #else
ce_ipa_get_wr_index_addr(struct CE_state * CE_state)1626 static qdf_dma_addr_t ce_ipa_get_wr_index_addr(struct CE_state *CE_state)
1627 {
1628 	struct hif_softc *scn = CE_state->scn;
1629 	qdf_dma_addr_t wr_index_addr;
1630 
1631 	wr_index_addr = CE_BASE_ADDRESS(CE_state->id) +
1632 			SR_WR_INDEX_ADDRESS;
1633 	return wr_index_addr;
1634 }
1635 #endif
1636 
1637 /**
1638  * ce_ipa_get_resource() - get uc resource on copyengine
1639  * @ce: copyengine context
1640  * @ce_sr: copyengine source ring resource info
1641  * @ce_sr_ring_size: copyengine source ring size
1642  * @ce_reg_paddr: copyengine register physical address
1643  *
1644  * Copy engine should release resource to micro controller
1645  * Micro controller needs
1646  *  - Copy engine source descriptor base address
1647  *  - Copy engine source descriptor size
1648  *  - PCI BAR address to access copy engine register
1649  *
1650  * Return: None
1651  */
ce_ipa_get_resource(struct CE_handle * ce,qdf_shared_mem_t ** ce_sr,uint32_t * ce_sr_ring_size,qdf_dma_addr_t * ce_reg_paddr)1652 void ce_ipa_get_resource(struct CE_handle *ce,
1653 			 qdf_shared_mem_t **ce_sr,
1654 			 uint32_t *ce_sr_ring_size,
1655 			 qdf_dma_addr_t *ce_reg_paddr)
1656 {
1657 	struct CE_state *CE_state = (struct CE_state *)ce;
1658 	uint32_t ring_loop;
1659 	struct CE_src_desc *ce_desc;
1660 	qdf_dma_addr_t phy_mem_base;
1661 	struct hif_softc *scn = CE_state->scn;
1662 
1663 	if (CE_UNUSED == CE_state->state) {
1664 		*qdf_mem_get_dma_addr_ptr(scn->qdf_dev,
1665 			&CE_state->scn->ipa_ce_ring->mem_info) = 0;
1666 		*ce_sr_ring_size = 0;
1667 		return;
1668 	}
1669 
1670 	/* Update default value for descriptor */
1671 	for (ring_loop = 0; ring_loop < CE_state->src_ring->nentries;
1672 	     ring_loop++) {
1673 		ce_desc = (struct CE_src_desc *)
1674 			  ((char *)CE_state->src_ring->base_addr_owner_space +
1675 			   ring_loop * (sizeof(struct CE_src_desc)));
1676 		CE_IPA_RING_INIT(ce_desc);
1677 	}
1678 
1679 	/* Get BAR address */
1680 	hif_read_phy_mem_base(CE_state->scn, &phy_mem_base);
1681 
1682 	*ce_sr = CE_state->scn->ipa_ce_ring;
1683 	*ce_sr_ring_size = (uint32_t)(CE_state->src_ring->nentries *
1684 		sizeof(struct CE_src_desc));
1685 	*ce_reg_paddr = phy_mem_base + ce_ipa_get_wr_index_addr(CE_state);
1686 
1687 }
1688 
1689 #endif /* IPA_OFFLOAD */
1690 
1691 #ifdef HIF_CE_DEBUG_DATA_BUF
1692 /**
1693  * hif_dump_desc_data_buf() - record ce descriptor events
1694  * @buf: buffer to copy to
1695  * @pos: Current position till which the buf is filled
1696  * @data: Data to be copied
1697  * @data_len: Length of the data to be copied
1698  */
hif_dump_desc_data_buf(uint8_t * buf,ssize_t pos,uint8_t * data,uint32_t data_len)1699 static uint32_t hif_dump_desc_data_buf(uint8_t *buf, ssize_t pos,
1700 					uint8_t *data, uint32_t data_len)
1701 {
1702 	pos += snprintf(buf + pos, PAGE_SIZE - pos, "Data:(Max%dBytes)\n",
1703 			CE_DEBUG_MAX_DATA_BUF_SIZE);
1704 
1705 	if ((data_len > 0) && data) {
1706 		if (data_len < 16) {
1707 			hex_dump_to_buffer(data,
1708 						CE_DEBUG_DATA_PER_ROW,
1709 						16, 1, buf + pos,
1710 						(ssize_t)PAGE_SIZE - pos,
1711 						false);
1712 			pos += CE_DEBUG_PRINT_BUF_SIZE(data_len);
1713 			pos += snprintf(buf + pos, PAGE_SIZE - pos, "\n");
1714 		} else {
1715 			uint32_t rows = (data_len / 16) + 1;
1716 			uint32_t row = 0;
1717 
1718 			for (row = 0; row < rows; row++) {
1719 				hex_dump_to_buffer(data + (row * 16),
1720 							CE_DEBUG_DATA_PER_ROW,
1721 							16, 1, buf + pos,
1722 							(ssize_t)PAGE_SIZE
1723 							- pos, false);
1724 				pos +=
1725 				CE_DEBUG_PRINT_BUF_SIZE(CE_DEBUG_DATA_PER_ROW);
1726 				pos += snprintf(buf + pos, PAGE_SIZE - pos,
1727 						"\n");
1728 			}
1729 		}
1730 	}
1731 
1732 	return pos;
1733 }
1734 #endif
1735 
1736 /*
1737  * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
1738  * for defined here
1739  */
1740 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
ce_event_type_to_str(enum hif_ce_event_type type)1741 static const char *ce_event_type_to_str(enum hif_ce_event_type type)
1742 {
1743 	switch (type) {
1744 	case HIF_RX_DESC_POST:
1745 		return "HIF_RX_DESC_POST";
1746 	case HIF_RX_DESC_COMPLETION:
1747 		return "HIF_RX_DESC_COMPLETION";
1748 	case HIF_TX_GATHER_DESC_POST:
1749 		return "HIF_TX_GATHER_DESC_POST";
1750 	case HIF_TX_DESC_POST:
1751 		return "HIF_TX_DESC_POST";
1752 	case HIF_TX_DESC_SOFTWARE_POST:
1753 		return "HIF_TX_DESC_SOFTWARE_POST";
1754 	case HIF_TX_DESC_COMPLETION:
1755 		return "HIF_TX_DESC_COMPLETION";
1756 	case FAST_RX_WRITE_INDEX_UPDATE:
1757 		return "FAST_RX_WRITE_INDEX_UPDATE";
1758 	case FAST_RX_SOFTWARE_INDEX_UPDATE:
1759 		return "FAST_RX_SOFTWARE_INDEX_UPDATE";
1760 	case FAST_TX_WRITE_INDEX_UPDATE:
1761 		return "FAST_TX_WRITE_INDEX_UPDATE";
1762 	case FAST_TX_WRITE_INDEX_SOFTWARE_UPDATE:
1763 		return "FAST_TX_WRITE_INDEX_SOFTWARE_UPDATE";
1764 	case FAST_TX_SOFTWARE_INDEX_UPDATE:
1765 		return "FAST_TX_SOFTWARE_INDEX_UPDATE";
1766 	case RESUME_WRITE_INDEX_UPDATE:
1767 		return "RESUME_WRITE_INDEX_UPDATE";
1768 	case HIF_IRQ_EVENT:
1769 		return "HIF_IRQ_EVENT";
1770 	case HIF_CE_TASKLET_ENTRY:
1771 		return "HIF_CE_TASKLET_ENTRY";
1772 	case HIF_CE_TASKLET_RESCHEDULE:
1773 		return "HIF_CE_TASKLET_RESCHEDULE";
1774 	case HIF_CE_TASKLET_EXIT:
1775 		return "HIF_CE_TASKLET_EXIT";
1776 	case HIF_CE_REAP_ENTRY:
1777 		return "HIF_CE_REAP_ENTRY";
1778 	case HIF_CE_REAP_EXIT:
1779 		return "HIF_CE_REAP_EXIT";
1780 	case NAPI_SCHEDULE:
1781 		return "NAPI_SCHEDULE";
1782 	case NAPI_POLL_ENTER:
1783 		return "NAPI_POLL_ENTER";
1784 	case NAPI_COMPLETE:
1785 		return "NAPI_COMPLETE";
1786 	case NAPI_POLL_EXIT:
1787 		return "NAPI_POLL_EXIT";
1788 	case HIF_RX_NBUF_ALLOC_FAILURE:
1789 		return "HIF_RX_NBUF_ALLOC_FAILURE";
1790 	case HIF_RX_NBUF_MAP_FAILURE:
1791 		return "HIF_RX_NBUF_MAP_FAILURE";
1792 	case HIF_RX_NBUF_ENQUEUE_FAILURE:
1793 		return "HIF_RX_NBUF_ENQUEUE_FAILURE";
1794 	default:
1795 		return "invalid";
1796 	}
1797 }
1798 
1799 /**
1800  * hif_dump_desc_event() - record ce descriptor events
1801  * @scn: HIF context
1802  * @buf: Buffer to which to be copied
1803  */
hif_dump_desc_event(struct hif_softc * scn,char * buf)1804 ssize_t hif_dump_desc_event(struct hif_softc *scn, char *buf)
1805 {
1806 	struct hif_ce_desc_event *event;
1807 	uint64_t secs, usecs;
1808 	ssize_t len = 0;
1809 	struct ce_desc_hist *ce_hist = NULL;
1810 	struct hif_ce_desc_event *hist_ev = NULL;
1811 
1812 	if (!scn)
1813 		return -EINVAL;
1814 
1815 	ce_hist = &scn->hif_ce_desc_hist;
1816 
1817 	if (ce_hist->hist_id >= CE_COUNT_MAX ||
1818 	    ce_hist->hist_index >= HIF_CE_HISTORY_MAX) {
1819 		qdf_print("Invalid values");
1820 		return -EINVAL;
1821 	}
1822 
1823 	hist_ev =
1824 		(struct hif_ce_desc_event *)ce_hist->hist_ev[ce_hist->hist_id];
1825 
1826 	if (!hist_ev) {
1827 		qdf_print("Low Memory");
1828 		return -EINVAL;
1829 	}
1830 
1831 	event = &hist_ev[ce_hist->hist_index];
1832 
1833 	qdf_log_timestamp_to_secs(event->time, &secs, &usecs);
1834 
1835 	len += snprintf(buf, PAGE_SIZE - len,
1836 			"\nTime:%lld.%06lld, CE:%d, EventType: %s, EventIndex: %d\nDataAddr=%pK",
1837 			secs, usecs, ce_hist->hist_id,
1838 			ce_event_type_to_str(event->type),
1839 			event->index, event->memory);
1840 #ifdef HIF_CE_DEBUG_DATA_BUF
1841 	len += snprintf(buf + len, PAGE_SIZE - len, ", Data len=%zu",
1842 			event->actual_data_len);
1843 #endif
1844 
1845 	len += snprintf(buf + len, PAGE_SIZE - len, "\nCE descriptor: ");
1846 
1847 	hex_dump_to_buffer(&event->descriptor, sizeof(union ce_desc),
1848 				16, 1, buf + len,
1849 				(ssize_t)PAGE_SIZE - len, false);
1850 	len += CE_DEBUG_PRINT_BUF_SIZE(sizeof(union ce_desc));
1851 	len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1852 
1853 #ifdef HIF_CE_DEBUG_DATA_BUF
1854 	if (ce_hist->data_enable[ce_hist->hist_id])
1855 		len = hif_dump_desc_data_buf(buf, len, event->data,
1856 						(event->actual_data_len <
1857 						 CE_DEBUG_MAX_DATA_BUF_SIZE) ?
1858 						event->actual_data_len :
1859 						CE_DEBUG_MAX_DATA_BUF_SIZE);
1860 #endif /*HIF_CE_DEBUG_DATA_BUF*/
1861 
1862 	len += snprintf(buf + len, PAGE_SIZE - len, "END\n");
1863 
1864 	return len;
1865 }
1866 
1867 /*
1868  * hif_store_desc_trace_buf_index() -
1869  * API to get the CE id and CE debug storage buffer index
1870  *
1871  * @dev: network device
1872  * @attr: sysfs attribute
1873  * @buf: data got from the user
1874  *
1875  * Return total length
1876  */
hif_input_desc_trace_buf_index(struct hif_softc * scn,const char * buf,size_t size)1877 ssize_t hif_input_desc_trace_buf_index(struct hif_softc *scn,
1878 					const char *buf, size_t size)
1879 {
1880 	struct ce_desc_hist *ce_hist = NULL;
1881 
1882 	if (!scn)
1883 		return -EINVAL;
1884 
1885 	ce_hist = &scn->hif_ce_desc_hist;
1886 
1887 	if (!size) {
1888 		qdf_nofl_err("%s: Invalid input buffer.", __func__);
1889 		return -EINVAL;
1890 	}
1891 
1892 	if (sscanf(buf, "%u %u", (unsigned int *)&ce_hist->hist_id,
1893 		   (unsigned int *)&ce_hist->hist_index) != 2) {
1894 		qdf_nofl_err("%s: Invalid input value.", __func__);
1895 		return -EINVAL;
1896 	}
1897 	if ((ce_hist->hist_id >= CE_COUNT_MAX) ||
1898 	   (ce_hist->hist_index >= HIF_CE_HISTORY_MAX)) {
1899 		qdf_print("Invalid values");
1900 		return -EINVAL;
1901 	}
1902 
1903 	return size;
1904 }
1905 
1906 #endif /*defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */
1907 
1908 #ifdef HIF_CE_DEBUG_DATA_BUF
1909 /*
1910  * hif_ce_en_desc_hist() -
1911  * API to enable recording the CE desc history
1912  *
1913  * @dev: network device
1914  * @attr: sysfs attribute
1915  * @buf: buffer to copy the data.
1916  *
1917  * Starts recording the ce desc history
1918  *
1919  * Return total length copied
1920  */
hif_ce_en_desc_hist(struct hif_softc * scn,const char * buf,size_t size)1921 ssize_t hif_ce_en_desc_hist(struct hif_softc *scn, const char *buf, size_t size)
1922 {
1923 	struct ce_desc_hist *ce_hist = NULL;
1924 	uint32_t cfg = 0;
1925 	uint32_t ce_id = 0;
1926 
1927 	if (!scn)
1928 		return -EINVAL;
1929 
1930 	ce_hist = &scn->hif_ce_desc_hist;
1931 
1932 	if (!size) {
1933 		qdf_nofl_err("%s: Invalid input buffer.", __func__);
1934 		return -EINVAL;
1935 	}
1936 
1937 	if (sscanf(buf, "%u %u", (unsigned int *)&ce_id,
1938 		   (unsigned int *)&cfg) != 2) {
1939 		qdf_nofl_err("%s: Invalid input: Enter CE Id<sp><1/0>.",
1940 			     __func__);
1941 		return -EINVAL;
1942 	}
1943 	if (ce_id >= CE_COUNT_MAX) {
1944 		qdf_print("Invalid value CE Id");
1945 		return -EINVAL;
1946 	}
1947 
1948 	if ((cfg > 1 || cfg < 0)) {
1949 		qdf_print("Invalid values: enter 0 or 1");
1950 		return -EINVAL;
1951 	}
1952 
1953 	if (!ce_hist->hist_ev[ce_id])
1954 		return -EINVAL;
1955 
1956 	qdf_mutex_acquire(&ce_hist->ce_dbg_datamem_lock[ce_id]);
1957 	if (cfg == 1) {
1958 		if (ce_hist->data_enable[ce_id] == 1) {
1959 			qdf_debug("Already Enabled");
1960 		} else {
1961 			if (alloc_mem_ce_debug_hist_data(scn, ce_id)
1962 							== QDF_STATUS_E_NOMEM){
1963 				ce_hist->data_enable[ce_id] = 0;
1964 				qdf_err("%s:Memory Alloc failed", __func__);
1965 			} else
1966 				ce_hist->data_enable[ce_id] = 1;
1967 		}
1968 	} else if (cfg == 0) {
1969 		if (ce_hist->data_enable[ce_id] == 0) {
1970 			qdf_debug("Already Disabled");
1971 		} else {
1972 			ce_hist->data_enable[ce_id] = 0;
1973 				free_mem_ce_debug_hist_data(scn, ce_id);
1974 		}
1975 	}
1976 	qdf_mutex_release(&ce_hist->ce_dbg_datamem_lock[ce_id]);
1977 
1978 	return size;
1979 }
1980 
1981 /*
1982  * hif_disp_ce_enable_desc_data_hist() -
1983  * API to display value of data_enable
1984  *
1985  * @dev: network device
1986  * @attr: sysfs attribute
1987  * @buf: buffer to copy the data.
1988  *
1989  * Return total length copied
1990  */
hif_disp_ce_enable_desc_data_hist(struct hif_softc * scn,char * buf)1991 ssize_t hif_disp_ce_enable_desc_data_hist(struct hif_softc *scn, char *buf)
1992 {
1993 	ssize_t len = 0;
1994 	uint32_t ce_id = 0;
1995 	struct ce_desc_hist *ce_hist = NULL;
1996 
1997 	if (!scn)
1998 		return -EINVAL;
1999 
2000 	ce_hist = &scn->hif_ce_desc_hist;
2001 
2002 	for (ce_id = 0; ce_id < CE_COUNT_MAX; ce_id++) {
2003 		len += snprintf(buf + len, PAGE_SIZE - len, " CE%d: %d\n",
2004 				ce_id, ce_hist->data_enable[ce_id]);
2005 	}
2006 
2007 	return len;
2008 }
2009 #endif /* HIF_CE_DEBUG_DATA_BUF */
2010 
2011 #ifdef OL_ATH_SMART_LOGGING
2012 #define GUARD_SPACE 10
2013 #define LOG_ID_SZ 4
2014 /*
2015  * hif_log_src_ce_dump() - Copy all the CE SRC ring to buf
2016  * @src_ring: SRC ring state
2017  * @buf_cur: Current pointer in ring buffer
2018  * @buf_init:Start of the ring buffer
2019  * @buf_sz: Size of the ring buffer
2020  * @skb_sz: Max size of the SKB buffer to be copied
2021  *
2022  * Dumps all the CE SRC ring descriptors and buffers pointed by them in to
2023  * the given buf, skb_sz is the max buffer size to be copied
2024  *
2025  * Return: Current pointer in ring buffer
2026  */
hif_log_src_ce_dump(struct CE_ring_state * src_ring,uint8_t * buf_cur,uint8_t * buf_init,uint32_t buf_sz,uint32_t skb_sz)2027 static uint8_t *hif_log_src_ce_dump(struct CE_ring_state *src_ring,
2028 				    uint8_t *buf_cur, uint8_t *buf_init,
2029 				    uint32_t buf_sz, uint32_t skb_sz)
2030 {
2031 	struct CE_src_desc *src_ring_base;
2032 	uint32_t len, entry;
2033 	struct CE_src_desc  *src_desc;
2034 	qdf_nbuf_t nbuf;
2035 	uint32_t available_buf;
2036 
2037 	src_ring_base = (struct CE_src_desc *)src_ring->base_addr_owner_space;
2038 	len = sizeof(struct CE_ring_state);
2039 	available_buf = buf_sz - (buf_cur - buf_init);
2040 	if (available_buf < (len + GUARD_SPACE)) {
2041 		buf_cur = buf_init;
2042 	}
2043 
2044 	qdf_mem_copy(buf_cur, src_ring, sizeof(struct CE_ring_state));
2045 	buf_cur += sizeof(struct CE_ring_state);
2046 
2047 	for (entry = 0; entry < src_ring->nentries; entry++) {
2048 		src_desc = CE_SRC_RING_TO_DESC(src_ring_base, entry);
2049 		nbuf = src_ring->per_transfer_context[entry];
2050 		if (nbuf) {
2051 			uint32_t skb_len  = qdf_nbuf_len(nbuf);
2052 			uint32_t skb_cp_len = qdf_min(skb_len, skb_sz);
2053 
2054 			len = sizeof(struct CE_src_desc) + skb_cp_len
2055 				+ LOG_ID_SZ + sizeof(skb_cp_len);
2056 			available_buf = buf_sz - (buf_cur - buf_init);
2057 			if (available_buf < (len + GUARD_SPACE)) {
2058 				buf_cur = buf_init;
2059 			}
2060 			qdf_mem_copy(buf_cur, src_desc,
2061 				     sizeof(struct CE_src_desc));
2062 			buf_cur += sizeof(struct CE_src_desc);
2063 
2064 			available_buf = buf_sz - (buf_cur - buf_init);
2065 			buf_cur += snprintf(buf_cur, available_buf, "SKB%d",
2066 						skb_cp_len);
2067 
2068 			if (skb_cp_len) {
2069 				qdf_mem_copy(buf_cur, qdf_nbuf_data(nbuf),
2070 					     skb_cp_len);
2071 				buf_cur += skb_cp_len;
2072 			}
2073 		} else {
2074 			len = sizeof(struct CE_src_desc) + LOG_ID_SZ;
2075 			available_buf = buf_sz - (buf_cur - buf_init);
2076 			if (available_buf < (len + GUARD_SPACE)) {
2077 				buf_cur = buf_init;
2078 			}
2079 			qdf_mem_copy(buf_cur, src_desc,
2080 				     sizeof(struct CE_src_desc));
2081 			buf_cur += sizeof(struct CE_src_desc);
2082 			available_buf = buf_sz - (buf_cur - buf_init);
2083 			buf_cur += snprintf(buf_cur, available_buf, "NUL");
2084 		}
2085 	}
2086 
2087 	return buf_cur;
2088 }
2089 
2090 /*
2091  * hif_log_dest_ce_dump() - Copy all the CE DEST ring to buf
2092  * @dest_ring: SRC ring state
2093  * @buf_cur: Current pointer in ring buffer
2094  * @buf_init:Start of the ring buffer
2095  * @buf_sz: Size of the ring buffer
2096  * @skb_sz: Max size of the SKB buffer to be copied
2097  *
2098  * Dumps all the CE SRC ring descriptors and buffers pointed by them in to
2099  * the given buf, skb_sz is the max buffer size to be copied
2100  *
2101  * Return: Current pointer in ring buffer
2102  */
hif_log_dest_ce_dump(struct CE_ring_state * dest_ring,uint8_t * buf_cur,uint8_t * buf_init,uint32_t buf_sz,uint32_t skb_sz)2103 static uint8_t *hif_log_dest_ce_dump(struct CE_ring_state *dest_ring,
2104 				     uint8_t *buf_cur, uint8_t *buf_init,
2105 				     uint32_t buf_sz, uint32_t skb_sz)
2106 {
2107 	struct CE_dest_desc *dest_ring_base;
2108 	uint32_t len, entry;
2109 	struct CE_dest_desc  *dest_desc;
2110 	qdf_nbuf_t nbuf;
2111 	uint32_t available_buf;
2112 
2113 	dest_ring_base =
2114 		(struct CE_dest_desc *)dest_ring->base_addr_owner_space;
2115 
2116 	len = sizeof(struct CE_ring_state);
2117 	available_buf = buf_sz - (buf_cur - buf_init);
2118 	if (available_buf < (len + GUARD_SPACE)) {
2119 		buf_cur = buf_init;
2120 	}
2121 
2122 	qdf_mem_copy(buf_cur, dest_ring, sizeof(struct CE_ring_state));
2123 	buf_cur += sizeof(struct CE_ring_state);
2124 
2125 	for (entry = 0; entry < dest_ring->nentries; entry++) {
2126 		dest_desc = CE_DEST_RING_TO_DESC(dest_ring_base, entry);
2127 
2128 		nbuf = dest_ring->per_transfer_context[entry];
2129 		if (nbuf) {
2130 			uint32_t skb_len  = qdf_nbuf_len(nbuf);
2131 			uint32_t skb_cp_len = qdf_min(skb_len, skb_sz);
2132 
2133 			len = sizeof(struct CE_dest_desc) + skb_cp_len
2134 				+ LOG_ID_SZ + sizeof(skb_cp_len);
2135 
2136 			available_buf = buf_sz - (buf_cur - buf_init);
2137 			if (available_buf < (len + GUARD_SPACE)) {
2138 				buf_cur = buf_init;
2139 			}
2140 
2141 			qdf_mem_copy(buf_cur, dest_desc,
2142 				     sizeof(struct CE_dest_desc));
2143 			buf_cur += sizeof(struct CE_dest_desc);
2144 			available_buf = buf_sz - (buf_cur - buf_init);
2145 			buf_cur += snprintf(buf_cur, available_buf, "SKB%d",
2146 						skb_cp_len);
2147 			if (skb_cp_len) {
2148 				qdf_mem_copy(buf_cur, qdf_nbuf_data(nbuf),
2149 					     skb_cp_len);
2150 				buf_cur += skb_cp_len;
2151 			}
2152 		} else {
2153 			len = sizeof(struct CE_dest_desc) + LOG_ID_SZ;
2154 			available_buf = buf_sz - (buf_cur - buf_init);
2155 			if (available_buf < (len + GUARD_SPACE)) {
2156 				buf_cur = buf_init;
2157 			}
2158 			qdf_mem_copy(buf_cur, dest_desc,
2159 				     sizeof(struct CE_dest_desc));
2160 			buf_cur += sizeof(struct CE_dest_desc);
2161 			available_buf = buf_sz - (buf_cur - buf_init);
2162 			buf_cur += snprintf(buf_cur, available_buf, "NUL");
2163 		}
2164 	}
2165 	return buf_cur;
2166 }
2167 
2168 /**
2169  * hif_log_dump_ce() - Copy all the CE DEST ring to buf
2170  * @scn:
2171  * @buf_cur:
2172  * @buf_init:
2173  * @buf_sz:
2174  * @ce:
2175  * @skb_sz:
2176  *
2177  * Calls the respective function to dump all the CE SRC/DEST ring descriptors
2178  * and buffers pointed by them in to the given buf
2179  */
hif_log_dump_ce(struct hif_softc * scn,uint8_t * buf_cur,uint8_t * buf_init,uint32_t buf_sz,uint32_t ce,uint32_t skb_sz)2180 uint8_t *hif_log_dump_ce(struct hif_softc *scn, uint8_t *buf_cur,
2181 			 uint8_t *buf_init, uint32_t buf_sz,
2182 			 uint32_t ce, uint32_t skb_sz)
2183 {
2184 	struct CE_state *ce_state;
2185 	struct CE_ring_state *src_ring;
2186 	struct CE_ring_state *dest_ring;
2187 
2188 	ce_state = scn->ce_id_to_state[ce];
2189 	src_ring = ce_state->src_ring;
2190 	dest_ring = ce_state->dest_ring;
2191 
2192 	if (src_ring) {
2193 		buf_cur = hif_log_src_ce_dump(src_ring, buf_cur,
2194 					      buf_init, buf_sz, skb_sz);
2195 	} else if (dest_ring) {
2196 		buf_cur = hif_log_dest_ce_dump(dest_ring, buf_cur,
2197 					       buf_init, buf_sz, skb_sz);
2198 	}
2199 
2200 	return buf_cur;
2201 }
2202 
2203 qdf_export_symbol(hif_log_dump_ce);
2204 #endif /* OL_ATH_SMART_LOGGING */
2205 
2206