1*5113495bSYour Name /*
2*5113495bSYour Name * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
3*5113495bSYour Name * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4*5113495bSYour Name *
5*5113495bSYour Name * Permission to use, copy, modify, and/or distribute this software for
6*5113495bSYour Name * any purpose with or without fee is hereby granted, provided that the
7*5113495bSYour Name * above copyright notice and this permission notice appear in all
8*5113495bSYour Name * copies.
9*5113495bSYour Name *
10*5113495bSYour Name * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11*5113495bSYour Name * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12*5113495bSYour Name * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13*5113495bSYour Name * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14*5113495bSYour Name * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15*5113495bSYour Name * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16*5113495bSYour Name * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17*5113495bSYour Name * PERFORMANCE OF THIS SOFTWARE.
18*5113495bSYour Name */
19*5113495bSYour Name
20*5113495bSYour Name #include "hif.h"
21*5113495bSYour Name #include "hif_io32.h"
22*5113495bSYour Name #include "ce_api.h"
23*5113495bSYour Name #include "ce_main.h"
24*5113495bSYour Name #include "ce_internal.h"
25*5113495bSYour Name #include "ce_reg.h"
26*5113495bSYour Name #include "qdf_lock.h"
27*5113495bSYour Name #include "regtable.h"
28*5113495bSYour Name #include "hif_main.h"
29*5113495bSYour Name #include "hif_debug.h"
30*5113495bSYour Name #include "hif_napi.h"
31*5113495bSYour Name #include "qdf_module.h"
32*5113495bSYour Name #include <qdf_tracepoint.h>
33*5113495bSYour Name
34*5113495bSYour Name #ifdef IPA_OFFLOAD
35*5113495bSYour Name #ifdef QCA_WIFI_3_0
36*5113495bSYour Name #define CE_IPA_RING_INIT(ce_desc) \
37*5113495bSYour Name do { \
38*5113495bSYour Name ce_desc->gather = 0; \
39*5113495bSYour Name ce_desc->enable_11h = 0; \
40*5113495bSYour Name ce_desc->meta_data_low = 0; \
41*5113495bSYour Name ce_desc->packet_result_offset = 64; \
42*5113495bSYour Name ce_desc->toeplitz_hash_enable = 0; \
43*5113495bSYour Name ce_desc->addr_y_search_disable = 0; \
44*5113495bSYour Name ce_desc->addr_x_search_disable = 0; \
45*5113495bSYour Name ce_desc->misc_int_disable = 0; \
46*5113495bSYour Name ce_desc->target_int_disable = 0; \
47*5113495bSYour Name ce_desc->host_int_disable = 0; \
48*5113495bSYour Name ce_desc->dest_byte_swap = 0; \
49*5113495bSYour Name ce_desc->byte_swap = 0; \
50*5113495bSYour Name ce_desc->type = 2; \
51*5113495bSYour Name ce_desc->tx_classify = 1; \
52*5113495bSYour Name ce_desc->buffer_addr_hi = 0; \
53*5113495bSYour Name ce_desc->meta_data = 0; \
54*5113495bSYour Name ce_desc->nbytes = 128; \
55*5113495bSYour Name } while (0)
56*5113495bSYour Name #else
57*5113495bSYour Name #define CE_IPA_RING_INIT(ce_desc) \
58*5113495bSYour Name do { \
59*5113495bSYour Name ce_desc->byte_swap = 0; \
60*5113495bSYour Name ce_desc->nbytes = 60; \
61*5113495bSYour Name ce_desc->gather = 0; \
62*5113495bSYour Name } while (0)
63*5113495bSYour Name #endif /* QCA_WIFI_3_0 */
64*5113495bSYour Name #endif /* IPA_OFFLOAD */
65*5113495bSYour Name
66*5113495bSYour Name static int war1_allow_sleep;
67*5113495bSYour Name /* io32 write workaround */
68*5113495bSYour Name static int hif_ce_war1;
69*5113495bSYour Name
70*5113495bSYour Name /**
71*5113495bSYour Name * hif_ce_war_disable() - disable ce war gobally
72*5113495bSYour Name */
hif_ce_war_disable(void)73*5113495bSYour Name void hif_ce_war_disable(void)
74*5113495bSYour Name {
75*5113495bSYour Name hif_ce_war1 = 0;
76*5113495bSYour Name }
77*5113495bSYour Name
78*5113495bSYour Name /**
79*5113495bSYour Name * hif_ce_war_enable() - enable ce war gobally
80*5113495bSYour Name */
hif_ce_war_enable(void)81*5113495bSYour Name void hif_ce_war_enable(void)
82*5113495bSYour Name {
83*5113495bSYour Name hif_ce_war1 = 1;
84*5113495bSYour Name }
85*5113495bSYour Name
86*5113495bSYour Name /*
87*5113495bSYour Name * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
88*5113495bSYour Name * for defined here
89*5113495bSYour Name */
90*5113495bSYour Name #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
91*5113495bSYour Name
92*5113495bSYour Name #define CE_DEBUG_PRINT_BUF_SIZE(x) (((x) * 3) - 1)
93*5113495bSYour Name #define CE_DEBUG_DATA_PER_ROW 16
94*5113495bSYour Name
95*5113495bSYour Name static const char *ce_event_type_to_str(enum hif_ce_event_type type);
96*5113495bSYour Name
get_next_record_index(qdf_atomic_t * table_index,int array_size)97*5113495bSYour Name int get_next_record_index(qdf_atomic_t *table_index, int array_size)
98*5113495bSYour Name {
99*5113495bSYour Name int record_index = qdf_atomic_inc_return(table_index);
100*5113495bSYour Name
101*5113495bSYour Name if (record_index == array_size)
102*5113495bSYour Name qdf_atomic_sub(array_size, table_index);
103*5113495bSYour Name
104*5113495bSYour Name while (record_index >= array_size)
105*5113495bSYour Name record_index -= array_size;
106*5113495bSYour Name
107*5113495bSYour Name return record_index;
108*5113495bSYour Name }
109*5113495bSYour Name
110*5113495bSYour Name qdf_export_symbol(get_next_record_index);
111*5113495bSYour Name
112*5113495bSYour Name #ifdef HIF_CE_DEBUG_DATA_BUF
hif_ce_desc_data_record(struct hif_ce_desc_event * event,int len)113*5113495bSYour Name void hif_ce_desc_data_record(struct hif_ce_desc_event *event, int len)
114*5113495bSYour Name {
115*5113495bSYour Name uint8_t *data = NULL;
116*5113495bSYour Name
117*5113495bSYour Name if (!event->data) {
118*5113495bSYour Name hif_err_rl("No ce debug memory allocated");
119*5113495bSYour Name return;
120*5113495bSYour Name }
121*5113495bSYour Name
122*5113495bSYour Name if (event->memory && len > 0)
123*5113495bSYour Name data = qdf_nbuf_data((qdf_nbuf_t)event->memory);
124*5113495bSYour Name
125*5113495bSYour Name event->actual_data_len = 0;
126*5113495bSYour Name qdf_mem_zero(event->data, CE_DEBUG_MAX_DATA_BUF_SIZE);
127*5113495bSYour Name
128*5113495bSYour Name if (data && len > 0) {
129*5113495bSYour Name qdf_mem_copy(event->data, data,
130*5113495bSYour Name ((len < CE_DEBUG_MAX_DATA_BUF_SIZE) ?
131*5113495bSYour Name len : CE_DEBUG_MAX_DATA_BUF_SIZE));
132*5113495bSYour Name event->actual_data_len = len;
133*5113495bSYour Name }
134*5113495bSYour Name }
135*5113495bSYour Name
136*5113495bSYour Name qdf_export_symbol(hif_ce_desc_data_record);
137*5113495bSYour Name
hif_clear_ce_desc_debug_data(struct hif_ce_desc_event * event)138*5113495bSYour Name void hif_clear_ce_desc_debug_data(struct hif_ce_desc_event *event)
139*5113495bSYour Name {
140*5113495bSYour Name qdf_mem_zero(event,
141*5113495bSYour Name offsetof(struct hif_ce_desc_event, data));
142*5113495bSYour Name }
143*5113495bSYour Name
144*5113495bSYour Name qdf_export_symbol(hif_clear_ce_desc_debug_data);
145*5113495bSYour Name #else
hif_clear_ce_desc_debug_data(struct hif_ce_desc_event * event)146*5113495bSYour Name void hif_clear_ce_desc_debug_data(struct hif_ce_desc_event *event)
147*5113495bSYour Name {
148*5113495bSYour Name qdf_mem_zero(event, sizeof(struct hif_ce_desc_event));
149*5113495bSYour Name }
150*5113495bSYour Name
151*5113495bSYour Name qdf_export_symbol(hif_clear_ce_desc_debug_data);
152*5113495bSYour Name #endif /* HIF_CE_DEBUG_DATA_BUF */
153*5113495bSYour Name
154*5113495bSYour Name #if defined(HIF_RECORD_PADDR)
hif_ce_desc_record_rx_paddr(struct hif_softc * scn,struct hif_ce_desc_event * event,qdf_nbuf_t memory)155*5113495bSYour Name void hif_ce_desc_record_rx_paddr(struct hif_softc *scn,
156*5113495bSYour Name struct hif_ce_desc_event *event,
157*5113495bSYour Name qdf_nbuf_t memory)
158*5113495bSYour Name {
159*5113495bSYour Name if (memory) {
160*5113495bSYour Name event->dma_addr = QDF_NBUF_CB_PADDR(memory);
161*5113495bSYour Name event->dma_to_phy = qdf_mem_paddr_from_dmaaddr(
162*5113495bSYour Name scn->qdf_dev,
163*5113495bSYour Name event->dma_addr);
164*5113495bSYour Name
165*5113495bSYour Name event->virt_to_phy =
166*5113495bSYour Name virt_to_phys(qdf_nbuf_data(memory));
167*5113495bSYour Name }
168*5113495bSYour Name }
169*5113495bSYour Name #endif /* HIF_RECORD_RX_PADDR */
170*5113495bSYour Name
hif_display_latest_desc_hist(struct hif_opaque_softc * hif_ctx)171*5113495bSYour Name void hif_display_latest_desc_hist(struct hif_opaque_softc *hif_ctx)
172*5113495bSYour Name {
173*5113495bSYour Name struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
174*5113495bSYour Name struct ce_desc_hist *ce_hist;
175*5113495bSYour Name struct latest_evt_history *evt;
176*5113495bSYour Name int i, j;
177*5113495bSYour Name
178*5113495bSYour Name if (!scn)
179*5113495bSYour Name return;
180*5113495bSYour Name
181*5113495bSYour Name ce_hist = &scn->hif_ce_desc_hist;
182*5113495bSYour Name
183*5113495bSYour Name for (i = 0; i < HIF_CE_MAX_LATEST_HIST; i++) {
184*5113495bSYour Name if (!ce_hist->enable[i + HIF_CE_MAX_LATEST_HIST])
185*5113495bSYour Name continue;
186*5113495bSYour Name
187*5113495bSYour Name for (j = 0; j < HIF_CE_MAX_LATEST_EVTS; j++) {
188*5113495bSYour Name evt = &ce_hist->latest_evts[i][j];
189*5113495bSYour Name hif_info_high("CE_id:%d event_idx:%d cpu_id:%d irq_entry:0x%llx tasklet_entry:0x%llx tasklet_resched:0x%llx tasklet_exit:0x%llx ce_work:0x%llx hp:%x tp:%x",
190*5113495bSYour Name (i + HIF_CE_MAX_LATEST_HIST), j, evt->cpu_id,
191*5113495bSYour Name evt->irq_entry_ts, evt->bh_entry_ts,
192*5113495bSYour Name evt->bh_resched_ts, evt->bh_exit_ts,
193*5113495bSYour Name evt->bh_work_ts, evt->ring_hp, evt->ring_tp);
194*5113495bSYour Name }
195*5113495bSYour Name }
196*5113495bSYour Name }
197*5113495bSYour Name
hif_record_latest_evt(struct ce_desc_hist * ce_hist,uint8_t type,int ce_id,uint64_t time,uint32_t hp,uint32_t tp)198*5113495bSYour Name void hif_record_latest_evt(struct ce_desc_hist *ce_hist,
199*5113495bSYour Name uint8_t type,
200*5113495bSYour Name int ce_id, uint64_t time,
201*5113495bSYour Name uint32_t hp, uint32_t tp)
202*5113495bSYour Name {
203*5113495bSYour Name struct latest_evt_history *latest_evts;
204*5113495bSYour Name int idx = 0;
205*5113495bSYour Name
206*5113495bSYour Name if (ce_id != 2 && ce_id != 3)
207*5113495bSYour Name return;
208*5113495bSYour Name
209*5113495bSYour Name latest_evts = &ce_hist->latest_evts[ce_id - HIF_CE_MAX_LATEST_HIST][idx];
210*5113495bSYour Name
211*5113495bSYour Name switch (type) {
212*5113495bSYour Name case HIF_IRQ_EVENT:
213*5113495bSYour Name if (latest_evts[idx].irq_entry_ts >
214*5113495bSYour Name latest_evts[idx + 1].irq_entry_ts)
215*5113495bSYour Name idx = 1;
216*5113495bSYour Name latest_evts[idx].irq_entry_ts = time;
217*5113495bSYour Name latest_evts[idx].cpu_id = qdf_get_cpu();
218*5113495bSYour Name break;
219*5113495bSYour Name case HIF_CE_TASKLET_ENTRY:
220*5113495bSYour Name if (latest_evts[idx].bh_entry_ts >
221*5113495bSYour Name latest_evts[idx + 1].bh_entry_ts)
222*5113495bSYour Name idx = 1;
223*5113495bSYour Name latest_evts[idx].bh_entry_ts = time;
224*5113495bSYour Name break;
225*5113495bSYour Name case HIF_CE_TASKLET_RESCHEDULE:
226*5113495bSYour Name if (latest_evts[idx].bh_resched_ts >
227*5113495bSYour Name latest_evts[idx + 1].bh_resched_ts)
228*5113495bSYour Name idx = 1;
229*5113495bSYour Name latest_evts[idx].bh_resched_ts = time;
230*5113495bSYour Name break;
231*5113495bSYour Name case HIF_CE_TASKLET_EXIT:
232*5113495bSYour Name if (latest_evts[idx].bh_exit_ts >
233*5113495bSYour Name latest_evts[idx + 1].bh_exit_ts)
234*5113495bSYour Name idx = 1;
235*5113495bSYour Name latest_evts[idx].bh_exit_ts = time;
236*5113495bSYour Name break;
237*5113495bSYour Name case HIF_TX_DESC_COMPLETION:
238*5113495bSYour Name case HIF_CE_DEST_STATUS_RING_REAP:
239*5113495bSYour Name if (latest_evts[idx].bh_work_ts >
240*5113495bSYour Name latest_evts[idx + 1].bh_work_ts)
241*5113495bSYour Name idx = 1;
242*5113495bSYour Name latest_evts[idx].bh_work_ts = time;
243*5113495bSYour Name latest_evts[idx].ring_hp = hp;
244*5113495bSYour Name latest_evts[idx].ring_tp = tp;
245*5113495bSYour Name break;
246*5113495bSYour Name default:
247*5113495bSYour Name break;
248*5113495bSYour Name }
249*5113495bSYour Name }
250*5113495bSYour Name
251*5113495bSYour Name /**
252*5113495bSYour Name * hif_record_ce_desc_event() - record ce descriptor events
253*5113495bSYour Name * @scn: hif_softc
254*5113495bSYour Name * @ce_id: which ce is the event occurring on
255*5113495bSYour Name * @type: what happened
256*5113495bSYour Name * @descriptor: pointer to the descriptor posted/completed
257*5113495bSYour Name * @memory: virtual address of buffer related to the descriptor
258*5113495bSYour Name * @index: index that the descriptor was/will be at.
259*5113495bSYour Name * @len:
260*5113495bSYour Name */
hif_record_ce_desc_event(struct hif_softc * scn,int ce_id,enum hif_ce_event_type type,union ce_desc * descriptor,void * memory,int index,int len)261*5113495bSYour Name void hif_record_ce_desc_event(struct hif_softc *scn, int ce_id,
262*5113495bSYour Name enum hif_ce_event_type type,
263*5113495bSYour Name union ce_desc *descriptor,
264*5113495bSYour Name void *memory, int index,
265*5113495bSYour Name int len)
266*5113495bSYour Name {
267*5113495bSYour Name int record_index;
268*5113495bSYour Name struct hif_ce_desc_event *event;
269*5113495bSYour Name
270*5113495bSYour Name struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
271*5113495bSYour Name struct hif_ce_desc_event *hist_ev = NULL;
272*5113495bSYour Name
273*5113495bSYour Name if (ce_id < CE_COUNT_MAX)
274*5113495bSYour Name hist_ev = (struct hif_ce_desc_event *)ce_hist->hist_ev[ce_id];
275*5113495bSYour Name else
276*5113495bSYour Name return;
277*5113495bSYour Name
278*5113495bSYour Name if (ce_id >= CE_COUNT_MAX)
279*5113495bSYour Name return;
280*5113495bSYour Name
281*5113495bSYour Name if (!ce_hist->enable[ce_id])
282*5113495bSYour Name return;
283*5113495bSYour Name
284*5113495bSYour Name if (!hist_ev)
285*5113495bSYour Name return;
286*5113495bSYour Name
287*5113495bSYour Name record_index = get_next_record_index(
288*5113495bSYour Name &ce_hist->history_index[ce_id], HIF_CE_HISTORY_MAX);
289*5113495bSYour Name
290*5113495bSYour Name event = &hist_ev[record_index];
291*5113495bSYour Name
292*5113495bSYour Name hif_clear_ce_desc_debug_data(event);
293*5113495bSYour Name
294*5113495bSYour Name event->type = type;
295*5113495bSYour Name event->time = qdf_get_log_timestamp();
296*5113495bSYour Name event->cpu_id = qdf_get_cpu();
297*5113495bSYour Name
298*5113495bSYour Name if (descriptor)
299*5113495bSYour Name qdf_mem_copy(&event->descriptor, descriptor,
300*5113495bSYour Name sizeof(union ce_desc));
301*5113495bSYour Name
302*5113495bSYour Name event->memory = memory;
303*5113495bSYour Name event->index = index;
304*5113495bSYour Name
305*5113495bSYour Name if (event->type == HIF_RX_DESC_POST ||
306*5113495bSYour Name event->type == HIF_RX_DESC_COMPLETION)
307*5113495bSYour Name hif_ce_desc_record_rx_paddr(scn, event, memory);
308*5113495bSYour Name
309*5113495bSYour Name if (ce_hist->data_enable[ce_id])
310*5113495bSYour Name hif_ce_desc_data_record(event, len);
311*5113495bSYour Name
312*5113495bSYour Name hif_record_latest_evt(ce_hist, type, ce_id, event->time, 0, 0);
313*5113495bSYour Name }
314*5113495bSYour Name qdf_export_symbol(hif_record_ce_desc_event);
315*5113495bSYour Name
316*5113495bSYour Name /**
317*5113495bSYour Name * ce_init_ce_desc_event_log() - initialize the ce event log
318*5113495bSYour Name * @scn: HIF context
319*5113495bSYour Name * @ce_id: copy engine id for which we are initializing the log
320*5113495bSYour Name * @size: size of array to dedicate
321*5113495bSYour Name *
322*5113495bSYour Name * Currently the passed size is ignored in favor of a precompiled value.
323*5113495bSYour Name */
ce_init_ce_desc_event_log(struct hif_softc * scn,int ce_id,int size)324*5113495bSYour Name void ce_init_ce_desc_event_log(struct hif_softc *scn, int ce_id, int size)
325*5113495bSYour Name {
326*5113495bSYour Name struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
327*5113495bSYour Name qdf_atomic_init(&ce_hist->history_index[ce_id]);
328*5113495bSYour Name qdf_mutex_create(&ce_hist->ce_dbg_datamem_lock[ce_id]);
329*5113495bSYour Name }
330*5113495bSYour Name
331*5113495bSYour Name /**
332*5113495bSYour Name * ce_deinit_ce_desc_event_log() - deinitialize the ce event log
333*5113495bSYour Name * @scn: HIF context
334*5113495bSYour Name * @ce_id: copy engine id for which we are deinitializing the log
335*5113495bSYour Name *
336*5113495bSYour Name */
ce_deinit_ce_desc_event_log(struct hif_softc * scn,int ce_id)337*5113495bSYour Name inline void ce_deinit_ce_desc_event_log(struct hif_softc *scn, int ce_id)
338*5113495bSYour Name {
339*5113495bSYour Name struct ce_desc_hist *ce_hist = &scn->hif_ce_desc_hist;
340*5113495bSYour Name
341*5113495bSYour Name qdf_mutex_destroy(&ce_hist->ce_dbg_datamem_lock[ce_id]);
342*5113495bSYour Name }
343*5113495bSYour Name
344*5113495bSYour Name #else /* (HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */
hif_record_ce_desc_event(struct hif_softc * scn,int ce_id,enum hif_ce_event_type type,union ce_desc * descriptor,void * memory,int index,int len)345*5113495bSYour Name void hif_record_ce_desc_event(struct hif_softc *scn,
346*5113495bSYour Name int ce_id, enum hif_ce_event_type type,
347*5113495bSYour Name union ce_desc *descriptor, void *memory,
348*5113495bSYour Name int index, int len)
349*5113495bSYour Name {
350*5113495bSYour Name }
351*5113495bSYour Name qdf_export_symbol(hif_record_ce_desc_event);
352*5113495bSYour Name
ce_init_ce_desc_event_log(struct hif_softc * scn,int ce_id,int size)353*5113495bSYour Name inline void ce_init_ce_desc_event_log(struct hif_softc *scn, int ce_id,
354*5113495bSYour Name int size)
355*5113495bSYour Name {
356*5113495bSYour Name }
357*5113495bSYour Name
ce_deinit_ce_desc_event_log(struct hif_softc * scn,int ce_id)358*5113495bSYour Name void ce_deinit_ce_desc_event_log(struct hif_softc *scn, int ce_id)
359*5113495bSYour Name {
360*5113495bSYour Name }
361*5113495bSYour Name #endif /*defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */
362*5113495bSYour Name
363*5113495bSYour Name #ifdef NAPI_YIELD_BUDGET_BASED
hif_ce_service_should_yield(struct hif_softc * scn,struct CE_state * ce_state)364*5113495bSYour Name bool hif_ce_service_should_yield(struct hif_softc *scn,
365*5113495bSYour Name struct CE_state *ce_state)
366*5113495bSYour Name {
367*5113495bSYour Name bool yield = hif_max_num_receives_reached(scn, ce_state->receive_count);
368*5113495bSYour Name
369*5113495bSYour Name /* Setting receive_count to MAX_NUM_OF_RECEIVES when this count goes
370*5113495bSYour Name * beyond MAX_NUM_OF_RECEIVES for NAPI backet calculation issue. This
371*5113495bSYour Name * can happen in fast path handling as processing is happening in
372*5113495bSYour Name * batches.
373*5113495bSYour Name */
374*5113495bSYour Name if (yield)
375*5113495bSYour Name ce_state->receive_count = MAX_NUM_OF_RECEIVES;
376*5113495bSYour Name
377*5113495bSYour Name return yield;
378*5113495bSYour Name }
379*5113495bSYour Name #else
380*5113495bSYour Name /**
381*5113495bSYour Name * hif_ce_service_should_yield() - return true if the service is hogging the cpu
382*5113495bSYour Name * @scn: hif context
383*5113495bSYour Name * @ce_state: context of the copy engine being serviced
384*5113495bSYour Name *
385*5113495bSYour Name * Return: true if the service should yield
386*5113495bSYour Name */
hif_ce_service_should_yield(struct hif_softc * scn,struct CE_state * ce_state)387*5113495bSYour Name bool hif_ce_service_should_yield(struct hif_softc *scn,
388*5113495bSYour Name struct CE_state *ce_state)
389*5113495bSYour Name {
390*5113495bSYour Name bool yield, time_limit_reached, rxpkt_thresh_reached = 0;
391*5113495bSYour Name
392*5113495bSYour Name time_limit_reached = qdf_time_sched_clock() >
393*5113495bSYour Name ce_state->ce_service_yield_time ? 1 : 0;
394*5113495bSYour Name
395*5113495bSYour Name if (!time_limit_reached)
396*5113495bSYour Name rxpkt_thresh_reached = hif_max_num_receives_reached
397*5113495bSYour Name (scn, ce_state->receive_count);
398*5113495bSYour Name
399*5113495bSYour Name /* Setting receive_count to MAX_NUM_OF_RECEIVES when this count goes
400*5113495bSYour Name * beyond MAX_NUM_OF_RECEIVES for NAPI backet calculation issue. This
401*5113495bSYour Name * can happen in fast path handling as processing is happening in
402*5113495bSYour Name * batches.
403*5113495bSYour Name */
404*5113495bSYour Name if (rxpkt_thresh_reached)
405*5113495bSYour Name ce_state->receive_count = MAX_NUM_OF_RECEIVES;
406*5113495bSYour Name
407*5113495bSYour Name yield = time_limit_reached || rxpkt_thresh_reached;
408*5113495bSYour Name
409*5113495bSYour Name if (yield &&
410*5113495bSYour Name ce_state->htt_rx_data &&
411*5113495bSYour Name hif_napi_enabled(GET_HIF_OPAQUE_HDL(scn), ce_state->id)) {
412*5113495bSYour Name hif_napi_update_yield_stats(ce_state,
413*5113495bSYour Name time_limit_reached,
414*5113495bSYour Name rxpkt_thresh_reached);
415*5113495bSYour Name }
416*5113495bSYour Name
417*5113495bSYour Name return yield;
418*5113495bSYour Name }
419*5113495bSYour Name qdf_export_symbol(hif_ce_service_should_yield);
420*5113495bSYour Name #endif
421*5113495bSYour Name
ce_flush_tx_ring_write_idx(struct CE_handle * ce_tx_hdl,bool force_flush)422*5113495bSYour Name void ce_flush_tx_ring_write_idx(struct CE_handle *ce_tx_hdl, bool force_flush)
423*5113495bSYour Name {
424*5113495bSYour Name struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl;
425*5113495bSYour Name struct CE_ring_state *src_ring = ce_state->src_ring;
426*5113495bSYour Name struct hif_softc *scn = ce_state->scn;
427*5113495bSYour Name
428*5113495bSYour Name if (force_flush)
429*5113495bSYour Name ce_ring_set_event(src_ring, CE_RING_FLUSH_EVENT);
430*5113495bSYour Name
431*5113495bSYour Name if (ce_ring_get_clear_event(src_ring, CE_RING_FLUSH_EVENT)) {
432*5113495bSYour Name qdf_spin_lock_bh(&ce_state->ce_index_lock);
433*5113495bSYour Name CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
434*5113495bSYour Name src_ring->write_index);
435*5113495bSYour Name qdf_spin_unlock_bh(&ce_state->ce_index_lock);
436*5113495bSYour Name
437*5113495bSYour Name src_ring->last_flush_ts = qdf_get_log_timestamp();
438*5113495bSYour Name hif_debug("flushed");
439*5113495bSYour Name }
440*5113495bSYour Name }
441*5113495bSYour Name
442*5113495bSYour Name /* Make sure this wrapper is called under ce_index_lock */
ce_tx_ring_write_idx_update_wrapper(struct CE_handle * ce_tx_hdl,int coalesce)443*5113495bSYour Name void ce_tx_ring_write_idx_update_wrapper(struct CE_handle *ce_tx_hdl,
444*5113495bSYour Name int coalesce)
445*5113495bSYour Name {
446*5113495bSYour Name struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl;
447*5113495bSYour Name struct CE_ring_state *src_ring = ce_state->src_ring;
448*5113495bSYour Name struct hif_softc *scn = ce_state->scn;
449*5113495bSYour Name
450*5113495bSYour Name if (!coalesce)
451*5113495bSYour Name CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
452*5113495bSYour Name src_ring->write_index);
453*5113495bSYour Name }
454*5113495bSYour Name
455*5113495bSYour Name /*
456*5113495bSYour Name * Guts of ce_send, used by both ce_send and ce_sendlist_send.
457*5113495bSYour Name * The caller takes responsibility for any needed locking.
458*5113495bSYour Name */
459*5113495bSYour Name
war_ce_src_ring_write_idx_set(struct hif_softc * scn,u32 ctrl_addr,unsigned int write_index)460*5113495bSYour Name void war_ce_src_ring_write_idx_set(struct hif_softc *scn,
461*5113495bSYour Name u32 ctrl_addr, unsigned int write_index)
462*5113495bSYour Name {
463*5113495bSYour Name if (hif_ce_war1) {
464*5113495bSYour Name void __iomem *indicator_addr;
465*5113495bSYour Name
466*5113495bSYour Name indicator_addr = scn->mem + ctrl_addr + DST_WATERMARK_ADDRESS;
467*5113495bSYour Name
468*5113495bSYour Name if (!war1_allow_sleep
469*5113495bSYour Name && ctrl_addr == CE_BASE_ADDRESS(CDC_WAR_DATA_CE)) {
470*5113495bSYour Name hif_write32_mb(scn, indicator_addr,
471*5113495bSYour Name (CDC_WAR_MAGIC_STR | write_index));
472*5113495bSYour Name } else {
473*5113495bSYour Name unsigned long irq_flags;
474*5113495bSYour Name
475*5113495bSYour Name local_irq_save(irq_flags);
476*5113495bSYour Name hif_write32_mb(scn, indicator_addr, 1);
477*5113495bSYour Name
478*5113495bSYour Name /*
479*5113495bSYour Name * PCIE write waits for ACK in IPQ8K, there is no
480*5113495bSYour Name * need to read back value.
481*5113495bSYour Name */
482*5113495bSYour Name (void)hif_read32_mb(scn, indicator_addr);
483*5113495bSYour Name /* conservative */
484*5113495bSYour Name (void)hif_read32_mb(scn, indicator_addr);
485*5113495bSYour Name
486*5113495bSYour Name CE_SRC_RING_WRITE_IDX_SET(scn,
487*5113495bSYour Name ctrl_addr, write_index);
488*5113495bSYour Name
489*5113495bSYour Name hif_write32_mb(scn, indicator_addr, 0);
490*5113495bSYour Name local_irq_restore(irq_flags);
491*5113495bSYour Name }
492*5113495bSYour Name } else {
493*5113495bSYour Name CE_SRC_RING_WRITE_IDX_SET(scn, ctrl_addr, write_index);
494*5113495bSYour Name }
495*5113495bSYour Name }
496*5113495bSYour Name
497*5113495bSYour Name qdf_export_symbol(war_ce_src_ring_write_idx_set);
498*5113495bSYour Name
499*5113495bSYour Name QDF_STATUS
ce_send(struct CE_handle * copyeng,void * per_transfer_context,qdf_dma_addr_t buffer,uint32_t nbytes,uint32_t transfer_id,uint32_t flags,uint32_t user_flag)500*5113495bSYour Name ce_send(struct CE_handle *copyeng,
501*5113495bSYour Name void *per_transfer_context,
502*5113495bSYour Name qdf_dma_addr_t buffer,
503*5113495bSYour Name uint32_t nbytes,
504*5113495bSYour Name uint32_t transfer_id,
505*5113495bSYour Name uint32_t flags,
506*5113495bSYour Name uint32_t user_flag)
507*5113495bSYour Name {
508*5113495bSYour Name struct CE_state *CE_state = (struct CE_state *)copyeng;
509*5113495bSYour Name QDF_STATUS status;
510*5113495bSYour Name struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
511*5113495bSYour Name
512*5113495bSYour Name qdf_spin_lock_bh(&CE_state->ce_index_lock);
513*5113495bSYour Name status = hif_state->ce_services->ce_send_nolock(copyeng,
514*5113495bSYour Name per_transfer_context, buffer, nbytes,
515*5113495bSYour Name transfer_id, flags, user_flag);
516*5113495bSYour Name qdf_spin_unlock_bh(&CE_state->ce_index_lock);
517*5113495bSYour Name
518*5113495bSYour Name return status;
519*5113495bSYour Name }
520*5113495bSYour Name qdf_export_symbol(ce_send);
521*5113495bSYour Name
ce_sendlist_sizeof(void)522*5113495bSYour Name unsigned int ce_sendlist_sizeof(void)
523*5113495bSYour Name {
524*5113495bSYour Name return sizeof(struct ce_sendlist);
525*5113495bSYour Name }
526*5113495bSYour Name
ce_sendlist_init(struct ce_sendlist * sendlist)527*5113495bSYour Name void ce_sendlist_init(struct ce_sendlist *sendlist)
528*5113495bSYour Name {
529*5113495bSYour Name struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
530*5113495bSYour Name
531*5113495bSYour Name sl->num_items = 0;
532*5113495bSYour Name }
533*5113495bSYour Name
534*5113495bSYour Name QDF_STATUS
ce_sendlist_buf_add(struct ce_sendlist * sendlist,qdf_dma_addr_t buffer,uint32_t nbytes,uint32_t flags,uint32_t user_flags)535*5113495bSYour Name ce_sendlist_buf_add(struct ce_sendlist *sendlist,
536*5113495bSYour Name qdf_dma_addr_t buffer,
537*5113495bSYour Name uint32_t nbytes,
538*5113495bSYour Name uint32_t flags,
539*5113495bSYour Name uint32_t user_flags)
540*5113495bSYour Name {
541*5113495bSYour Name struct ce_sendlist_s *sl = (struct ce_sendlist_s *)sendlist;
542*5113495bSYour Name unsigned int num_items = sl->num_items;
543*5113495bSYour Name struct ce_sendlist_item *item;
544*5113495bSYour Name
545*5113495bSYour Name if (num_items >= CE_SENDLIST_ITEMS_MAX) {
546*5113495bSYour Name QDF_ASSERT(num_items < CE_SENDLIST_ITEMS_MAX);
547*5113495bSYour Name return QDF_STATUS_E_RESOURCES;
548*5113495bSYour Name }
549*5113495bSYour Name
550*5113495bSYour Name item = &sl->item[num_items];
551*5113495bSYour Name item->send_type = CE_SIMPLE_BUFFER_TYPE;
552*5113495bSYour Name item->data = buffer;
553*5113495bSYour Name item->u.nbytes = nbytes;
554*5113495bSYour Name item->flags = flags;
555*5113495bSYour Name item->user_flags = user_flags;
556*5113495bSYour Name sl->num_items = num_items + 1;
557*5113495bSYour Name return QDF_STATUS_SUCCESS;
558*5113495bSYour Name }
559*5113495bSYour Name
560*5113495bSYour Name QDF_STATUS
ce_sendlist_send(struct CE_handle * copyeng,void * per_transfer_context,struct ce_sendlist * sendlist,unsigned int transfer_id)561*5113495bSYour Name ce_sendlist_send(struct CE_handle *copyeng,
562*5113495bSYour Name void *per_transfer_context,
563*5113495bSYour Name struct ce_sendlist *sendlist, unsigned int transfer_id)
564*5113495bSYour Name {
565*5113495bSYour Name struct CE_state *CE_state = (struct CE_state *)copyeng;
566*5113495bSYour Name struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
567*5113495bSYour Name
568*5113495bSYour Name return hif_state->ce_services->ce_sendlist_send(copyeng,
569*5113495bSYour Name per_transfer_context, sendlist, transfer_id);
570*5113495bSYour Name }
571*5113495bSYour Name
572*5113495bSYour Name #ifndef AH_NEED_TX_DATA_SWAP
573*5113495bSYour Name #define AH_NEED_TX_DATA_SWAP 0
574*5113495bSYour Name #endif
575*5113495bSYour Name
576*5113495bSYour Name /**
577*5113495bSYour Name * ce_batch_send() - sends bunch of msdus at once
578*5113495bSYour Name * @ce_tx_hdl : pointer to CE handle
579*5113495bSYour Name * @msdu : list of msdus to be sent
580*5113495bSYour Name * @transfer_id : transfer id
581*5113495bSYour Name * @len : Downloaded length
582*5113495bSYour Name * @sendhead : sendhead
583*5113495bSYour Name *
584*5113495bSYour Name * Assumption : Called with an array of MSDU's
585*5113495bSYour Name * Function:
586*5113495bSYour Name * For each msdu in the array
587*5113495bSYour Name * 1. Send each msdu
588*5113495bSYour Name * 2. Increment write index accordinlgy.
589*5113495bSYour Name *
590*5113495bSYour Name * Return: list of msds not sent
591*5113495bSYour Name */
ce_batch_send(struct CE_handle * ce_tx_hdl,qdf_nbuf_t msdu,uint32_t transfer_id,u_int32_t len,uint32_t sendhead)592*5113495bSYour Name qdf_nbuf_t ce_batch_send(struct CE_handle *ce_tx_hdl, qdf_nbuf_t msdu,
593*5113495bSYour Name uint32_t transfer_id, u_int32_t len, uint32_t sendhead)
594*5113495bSYour Name {
595*5113495bSYour Name struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl;
596*5113495bSYour Name struct hif_softc *scn = ce_state->scn;
597*5113495bSYour Name struct CE_ring_state *src_ring = ce_state->src_ring;
598*5113495bSYour Name u_int32_t ctrl_addr = ce_state->ctrl_addr;
599*5113495bSYour Name /* A_target_id_t targid = TARGID(scn);*/
600*5113495bSYour Name
601*5113495bSYour Name uint32_t nentries_mask = src_ring->nentries_mask;
602*5113495bSYour Name uint32_t sw_index, write_index;
603*5113495bSYour Name
604*5113495bSYour Name struct CE_src_desc *src_desc_base =
605*5113495bSYour Name (struct CE_src_desc *)src_ring->base_addr_owner_space;
606*5113495bSYour Name uint32_t *src_desc;
607*5113495bSYour Name
608*5113495bSYour Name struct CE_src_desc lsrc_desc = {0};
609*5113495bSYour Name int deltacount = 0;
610*5113495bSYour Name qdf_nbuf_t freelist = NULL, hfreelist = NULL, tempnext;
611*5113495bSYour Name
612*5113495bSYour Name DATA_CE_UPDATE_SWINDEX(src_ring->sw_index, scn, ctrl_addr);
613*5113495bSYour Name sw_index = src_ring->sw_index;
614*5113495bSYour Name write_index = src_ring->write_index;
615*5113495bSYour Name
616*5113495bSYour Name deltacount = CE_RING_DELTA(nentries_mask, write_index, sw_index-1);
617*5113495bSYour Name
618*5113495bSYour Name while (msdu) {
619*5113495bSYour Name tempnext = qdf_nbuf_next(msdu);
620*5113495bSYour Name
621*5113495bSYour Name if (deltacount < 2) {
622*5113495bSYour Name if (sendhead)
623*5113495bSYour Name return msdu;
624*5113495bSYour Name hif_err("Out of descriptors");
625*5113495bSYour Name src_ring->write_index = write_index;
626*5113495bSYour Name war_ce_src_ring_write_idx_set(scn, ctrl_addr,
627*5113495bSYour Name write_index);
628*5113495bSYour Name
629*5113495bSYour Name sw_index = src_ring->sw_index;
630*5113495bSYour Name write_index = src_ring->write_index;
631*5113495bSYour Name
632*5113495bSYour Name deltacount = CE_RING_DELTA(nentries_mask, write_index,
633*5113495bSYour Name sw_index-1);
634*5113495bSYour Name if (!freelist) {
635*5113495bSYour Name freelist = msdu;
636*5113495bSYour Name hfreelist = msdu;
637*5113495bSYour Name } else {
638*5113495bSYour Name qdf_nbuf_set_next(freelist, msdu);
639*5113495bSYour Name freelist = msdu;
640*5113495bSYour Name }
641*5113495bSYour Name qdf_nbuf_set_next(msdu, NULL);
642*5113495bSYour Name msdu = tempnext;
643*5113495bSYour Name continue;
644*5113495bSYour Name }
645*5113495bSYour Name
646*5113495bSYour Name src_desc = (uint32_t *)CE_SRC_RING_TO_DESC(src_desc_base,
647*5113495bSYour Name write_index);
648*5113495bSYour Name
649*5113495bSYour Name src_desc[0] = qdf_nbuf_get_frag_paddr(msdu, 0);
650*5113495bSYour Name
651*5113495bSYour Name lsrc_desc.meta_data = transfer_id;
652*5113495bSYour Name if (len > msdu->len)
653*5113495bSYour Name len = msdu->len;
654*5113495bSYour Name lsrc_desc.nbytes = len;
655*5113495bSYour Name /* Data packet is a byte stream, so disable byte swap */
656*5113495bSYour Name lsrc_desc.byte_swap = AH_NEED_TX_DATA_SWAP;
657*5113495bSYour Name lsrc_desc.gather = 0; /*For the last one, gather is not set*/
658*5113495bSYour Name
659*5113495bSYour Name src_desc[1] = ((uint32_t *)&lsrc_desc)[1];
660*5113495bSYour Name
661*5113495bSYour Name
662*5113495bSYour Name src_ring->per_transfer_context[write_index] = msdu;
663*5113495bSYour Name write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
664*5113495bSYour Name
665*5113495bSYour Name if (sendhead)
666*5113495bSYour Name break;
667*5113495bSYour Name qdf_nbuf_set_next(msdu, NULL);
668*5113495bSYour Name msdu = tempnext;
669*5113495bSYour Name
670*5113495bSYour Name }
671*5113495bSYour Name
672*5113495bSYour Name
673*5113495bSYour Name src_ring->write_index = write_index;
674*5113495bSYour Name war_ce_src_ring_write_idx_set(scn, ctrl_addr, write_index);
675*5113495bSYour Name
676*5113495bSYour Name return hfreelist;
677*5113495bSYour Name }
678*5113495bSYour Name
679*5113495bSYour Name /**
680*5113495bSYour Name * ce_update_tx_ring() - Advance sw index.
681*5113495bSYour Name * @ce_tx_hdl : pointer to CE handle
682*5113495bSYour Name * @num_htt_cmpls : htt completions received.
683*5113495bSYour Name *
684*5113495bSYour Name * Function:
685*5113495bSYour Name * Increment the value of sw index of src ring
686*5113495bSYour Name * according to number of htt completions
687*5113495bSYour Name * received.
688*5113495bSYour Name *
689*5113495bSYour Name * Return: void
690*5113495bSYour Name */
691*5113495bSYour Name #ifdef DATA_CE_SW_INDEX_NO_INLINE_UPDATE
ce_update_tx_ring(struct CE_handle * ce_tx_hdl,uint32_t num_htt_cmpls)692*5113495bSYour Name void ce_update_tx_ring(struct CE_handle *ce_tx_hdl, uint32_t num_htt_cmpls)
693*5113495bSYour Name {
694*5113495bSYour Name struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl;
695*5113495bSYour Name struct CE_ring_state *src_ring = ce_state->src_ring;
696*5113495bSYour Name uint32_t nentries_mask = src_ring->nentries_mask;
697*5113495bSYour Name /*
698*5113495bSYour Name * Advance the s/w index:
699*5113495bSYour Name * This effectively simulates completing the CE ring descriptors
700*5113495bSYour Name */
701*5113495bSYour Name src_ring->sw_index =
702*5113495bSYour Name CE_RING_IDX_ADD(nentries_mask, src_ring->sw_index,
703*5113495bSYour Name num_htt_cmpls);
704*5113495bSYour Name }
705*5113495bSYour Name #else
ce_update_tx_ring(struct CE_handle * ce_tx_hdl,uint32_t num_htt_cmpls)706*5113495bSYour Name void ce_update_tx_ring(struct CE_handle *ce_tx_hdl, uint32_t num_htt_cmpls)
707*5113495bSYour Name {}
708*5113495bSYour Name #endif
709*5113495bSYour Name
710*5113495bSYour Name /**
711*5113495bSYour Name * ce_send_single() - sends
712*5113495bSYour Name * @ce_tx_hdl : pointer to CE handle
713*5113495bSYour Name * @msdu : msdu to be sent
714*5113495bSYour Name * @transfer_id : transfer id
715*5113495bSYour Name * @len : Downloaded length
716*5113495bSYour Name *
717*5113495bSYour Name * Function:
718*5113495bSYour Name * 1. Send one msdu
719*5113495bSYour Name * 2. Increment write index of src ring accordinlgy.
720*5113495bSYour Name *
721*5113495bSYour Name * Return: QDF_STATUS: CE sent status
722*5113495bSYour Name */
ce_send_single(struct CE_handle * ce_tx_hdl,qdf_nbuf_t msdu,uint32_t transfer_id,u_int32_t len)723*5113495bSYour Name QDF_STATUS ce_send_single(struct CE_handle *ce_tx_hdl, qdf_nbuf_t msdu,
724*5113495bSYour Name uint32_t transfer_id, u_int32_t len)
725*5113495bSYour Name {
726*5113495bSYour Name struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl;
727*5113495bSYour Name struct hif_softc *scn = ce_state->scn;
728*5113495bSYour Name struct CE_ring_state *src_ring = ce_state->src_ring;
729*5113495bSYour Name uint32_t ctrl_addr = ce_state->ctrl_addr;
730*5113495bSYour Name /*A_target_id_t targid = TARGID(scn);*/
731*5113495bSYour Name
732*5113495bSYour Name uint32_t nentries_mask = src_ring->nentries_mask;
733*5113495bSYour Name uint32_t sw_index, write_index;
734*5113495bSYour Name
735*5113495bSYour Name struct CE_src_desc *src_desc_base =
736*5113495bSYour Name (struct CE_src_desc *)src_ring->base_addr_owner_space;
737*5113495bSYour Name uint32_t *src_desc;
738*5113495bSYour Name
739*5113495bSYour Name struct CE_src_desc lsrc_desc = {0};
740*5113495bSYour Name enum hif_ce_event_type event_type;
741*5113495bSYour Name
742*5113495bSYour Name DATA_CE_UPDATE_SWINDEX(src_ring->sw_index, scn, ctrl_addr);
743*5113495bSYour Name sw_index = src_ring->sw_index;
744*5113495bSYour Name write_index = src_ring->write_index;
745*5113495bSYour Name
746*5113495bSYour Name if (qdf_unlikely(CE_RING_DELTA(nentries_mask, write_index,
747*5113495bSYour Name sw_index-1) < 1)) {
748*5113495bSYour Name hif_err("ce send fail %d %d %d", nentries_mask,
749*5113495bSYour Name write_index, sw_index);
750*5113495bSYour Name return QDF_STATUS_E_RESOURCES;
751*5113495bSYour Name }
752*5113495bSYour Name
753*5113495bSYour Name src_desc = (uint32_t *)CE_SRC_RING_TO_DESC(src_desc_base, write_index);
754*5113495bSYour Name
755*5113495bSYour Name src_desc[0] = qdf_nbuf_get_frag_paddr(msdu, 0);
756*5113495bSYour Name
757*5113495bSYour Name lsrc_desc.meta_data = transfer_id;
758*5113495bSYour Name lsrc_desc.nbytes = len;
759*5113495bSYour Name /* Data packet is a byte stream, so disable byte swap */
760*5113495bSYour Name lsrc_desc.byte_swap = AH_NEED_TX_DATA_SWAP;
761*5113495bSYour Name lsrc_desc.gather = 0; /* For the last one, gather is not set */
762*5113495bSYour Name
763*5113495bSYour Name src_desc[1] = ((uint32_t *)&lsrc_desc)[1];
764*5113495bSYour Name
765*5113495bSYour Name
766*5113495bSYour Name src_ring->per_transfer_context[write_index] = msdu;
767*5113495bSYour Name
768*5113495bSYour Name if (((struct CE_src_desc *)src_desc)->gather)
769*5113495bSYour Name event_type = HIF_TX_GATHER_DESC_POST;
770*5113495bSYour Name else if (qdf_unlikely(ce_state->state != CE_RUNNING))
771*5113495bSYour Name event_type = HIF_TX_DESC_SOFTWARE_POST;
772*5113495bSYour Name else
773*5113495bSYour Name event_type = HIF_TX_DESC_POST;
774*5113495bSYour Name
775*5113495bSYour Name hif_record_ce_desc_event(scn, ce_state->id, event_type,
776*5113495bSYour Name (union ce_desc *)src_desc, msdu,
777*5113495bSYour Name write_index, len);
778*5113495bSYour Name
779*5113495bSYour Name write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
780*5113495bSYour Name
781*5113495bSYour Name src_ring->write_index = write_index;
782*5113495bSYour Name
783*5113495bSYour Name war_ce_src_ring_write_idx_set(scn, ctrl_addr, write_index);
784*5113495bSYour Name
785*5113495bSYour Name return QDF_STATUS_SUCCESS;
786*5113495bSYour Name }
787*5113495bSYour Name
788*5113495bSYour Name /**
789*5113495bSYour Name * ce_recv_buf_enqueue() - enqueue a recv buffer into a copy engine
790*5113495bSYour Name * @copyeng: copy engine handle
791*5113495bSYour Name * @per_recv_context: virtual address of the nbuf
792*5113495bSYour Name * @buffer: physical address of the nbuf
793*5113495bSYour Name *
794*5113495bSYour Name * Return: QDF_STATUS_SUCCESS if the buffer is enqueued
795*5113495bSYour Name */
796*5113495bSYour Name QDF_STATUS
ce_recv_buf_enqueue(struct CE_handle * copyeng,void * per_recv_context,qdf_dma_addr_t buffer)797*5113495bSYour Name ce_recv_buf_enqueue(struct CE_handle *copyeng,
798*5113495bSYour Name void *per_recv_context, qdf_dma_addr_t buffer)
799*5113495bSYour Name {
800*5113495bSYour Name struct CE_state *CE_state = (struct CE_state *)copyeng;
801*5113495bSYour Name struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
802*5113495bSYour Name
803*5113495bSYour Name return hif_state->ce_services->ce_recv_buf_enqueue(copyeng,
804*5113495bSYour Name per_recv_context, buffer);
805*5113495bSYour Name }
806*5113495bSYour Name qdf_export_symbol(ce_recv_buf_enqueue);
807*5113495bSYour Name
808*5113495bSYour Name void
ce_send_watermarks_set(struct CE_handle * copyeng,unsigned int low_alert_nentries,unsigned int high_alert_nentries)809*5113495bSYour Name ce_send_watermarks_set(struct CE_handle *copyeng,
810*5113495bSYour Name unsigned int low_alert_nentries,
811*5113495bSYour Name unsigned int high_alert_nentries)
812*5113495bSYour Name {
813*5113495bSYour Name struct CE_state *CE_state = (struct CE_state *)copyeng;
814*5113495bSYour Name uint32_t ctrl_addr = CE_state->ctrl_addr;
815*5113495bSYour Name struct hif_softc *scn = CE_state->scn;
816*5113495bSYour Name
817*5113495bSYour Name CE_SRC_RING_LOWMARK_SET(scn, ctrl_addr, low_alert_nentries);
818*5113495bSYour Name CE_SRC_RING_HIGHMARK_SET(scn, ctrl_addr, high_alert_nentries);
819*5113495bSYour Name }
820*5113495bSYour Name
821*5113495bSYour Name void
ce_recv_watermarks_set(struct CE_handle * copyeng,unsigned int low_alert_nentries,unsigned int high_alert_nentries)822*5113495bSYour Name ce_recv_watermarks_set(struct CE_handle *copyeng,
823*5113495bSYour Name unsigned int low_alert_nentries,
824*5113495bSYour Name unsigned int high_alert_nentries)
825*5113495bSYour Name {
826*5113495bSYour Name struct CE_state *CE_state = (struct CE_state *)copyeng;
827*5113495bSYour Name uint32_t ctrl_addr = CE_state->ctrl_addr;
828*5113495bSYour Name struct hif_softc *scn = CE_state->scn;
829*5113495bSYour Name
830*5113495bSYour Name CE_DEST_RING_LOWMARK_SET(scn, ctrl_addr,
831*5113495bSYour Name low_alert_nentries);
832*5113495bSYour Name CE_DEST_RING_HIGHMARK_SET(scn, ctrl_addr,
833*5113495bSYour Name high_alert_nentries);
834*5113495bSYour Name }
835*5113495bSYour Name
ce_send_entries_avail(struct CE_handle * copyeng)836*5113495bSYour Name unsigned int ce_send_entries_avail(struct CE_handle *copyeng)
837*5113495bSYour Name {
838*5113495bSYour Name struct CE_state *CE_state = (struct CE_state *)copyeng;
839*5113495bSYour Name struct CE_ring_state *src_ring = CE_state->src_ring;
840*5113495bSYour Name unsigned int nentries_mask = src_ring->nentries_mask;
841*5113495bSYour Name unsigned int sw_index;
842*5113495bSYour Name unsigned int write_index;
843*5113495bSYour Name
844*5113495bSYour Name qdf_spin_lock(&CE_state->ce_index_lock);
845*5113495bSYour Name sw_index = src_ring->sw_index;
846*5113495bSYour Name write_index = src_ring->write_index;
847*5113495bSYour Name qdf_spin_unlock(&CE_state->ce_index_lock);
848*5113495bSYour Name
849*5113495bSYour Name return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
850*5113495bSYour Name }
851*5113495bSYour Name
ce_recv_entries_avail(struct CE_handle * copyeng)852*5113495bSYour Name unsigned int ce_recv_entries_avail(struct CE_handle *copyeng)
853*5113495bSYour Name {
854*5113495bSYour Name struct CE_state *CE_state = (struct CE_state *)copyeng;
855*5113495bSYour Name struct CE_ring_state *dest_ring = CE_state->dest_ring;
856*5113495bSYour Name unsigned int nentries_mask = dest_ring->nentries_mask;
857*5113495bSYour Name unsigned int sw_index;
858*5113495bSYour Name unsigned int write_index;
859*5113495bSYour Name
860*5113495bSYour Name qdf_spin_lock(&CE_state->ce_index_lock);
861*5113495bSYour Name sw_index = dest_ring->sw_index;
862*5113495bSYour Name write_index = dest_ring->write_index;
863*5113495bSYour Name qdf_spin_unlock(&CE_state->ce_index_lock);
864*5113495bSYour Name
865*5113495bSYour Name return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
866*5113495bSYour Name }
867*5113495bSYour Name
868*5113495bSYour Name /*
869*5113495bSYour Name * Guts of ce_completed_recv_next.
870*5113495bSYour Name * The caller takes responsibility for any necessary locking.
871*5113495bSYour Name */
872*5113495bSYour Name QDF_STATUS
ce_completed_recv_next(struct CE_handle * copyeng,void ** per_CE_contextp,void ** per_transfer_contextp,qdf_dma_addr_t * bufferp,unsigned int * nbytesp,unsigned int * transfer_idp,unsigned int * flagsp)873*5113495bSYour Name ce_completed_recv_next(struct CE_handle *copyeng,
874*5113495bSYour Name void **per_CE_contextp,
875*5113495bSYour Name void **per_transfer_contextp,
876*5113495bSYour Name qdf_dma_addr_t *bufferp,
877*5113495bSYour Name unsigned int *nbytesp,
878*5113495bSYour Name unsigned int *transfer_idp, unsigned int *flagsp)
879*5113495bSYour Name {
880*5113495bSYour Name struct CE_state *CE_state = (struct CE_state *)copyeng;
881*5113495bSYour Name QDF_STATUS status;
882*5113495bSYour Name struct hif_softc *scn = CE_state->scn;
883*5113495bSYour Name struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
884*5113495bSYour Name struct ce_ops *ce_services;
885*5113495bSYour Name
886*5113495bSYour Name ce_services = hif_state->ce_services;
887*5113495bSYour Name qdf_spin_lock_bh(&CE_state->ce_index_lock);
888*5113495bSYour Name status =
889*5113495bSYour Name ce_services->ce_completed_recv_next_nolock(CE_state,
890*5113495bSYour Name per_CE_contextp, per_transfer_contextp, bufferp,
891*5113495bSYour Name nbytesp, transfer_idp, flagsp);
892*5113495bSYour Name qdf_spin_unlock_bh(&CE_state->ce_index_lock);
893*5113495bSYour Name
894*5113495bSYour Name return status;
895*5113495bSYour Name }
896*5113495bSYour Name
897*5113495bSYour Name QDF_STATUS
ce_revoke_recv_next(struct CE_handle * copyeng,void ** per_CE_contextp,void ** per_transfer_contextp,qdf_dma_addr_t * bufferp)898*5113495bSYour Name ce_revoke_recv_next(struct CE_handle *copyeng,
899*5113495bSYour Name void **per_CE_contextp,
900*5113495bSYour Name void **per_transfer_contextp, qdf_dma_addr_t *bufferp)
901*5113495bSYour Name {
902*5113495bSYour Name struct CE_state *CE_state = (struct CE_state *)copyeng;
903*5113495bSYour Name struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
904*5113495bSYour Name
905*5113495bSYour Name return hif_state->ce_services->ce_revoke_recv_next(copyeng,
906*5113495bSYour Name per_CE_contextp, per_transfer_contextp, bufferp);
907*5113495bSYour Name }
908*5113495bSYour Name
909*5113495bSYour Name QDF_STATUS
ce_cancel_send_next(struct CE_handle * copyeng,void ** per_CE_contextp,void ** per_transfer_contextp,qdf_dma_addr_t * bufferp,unsigned int * nbytesp,unsigned int * transfer_idp,uint32_t * toeplitz_hash_result)910*5113495bSYour Name ce_cancel_send_next(struct CE_handle *copyeng,
911*5113495bSYour Name void **per_CE_contextp,
912*5113495bSYour Name void **per_transfer_contextp,
913*5113495bSYour Name qdf_dma_addr_t *bufferp,
914*5113495bSYour Name unsigned int *nbytesp,
915*5113495bSYour Name unsigned int *transfer_idp,
916*5113495bSYour Name uint32_t *toeplitz_hash_result)
917*5113495bSYour Name {
918*5113495bSYour Name struct CE_state *CE_state = (struct CE_state *)copyeng;
919*5113495bSYour Name struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(CE_state->scn);
920*5113495bSYour Name
921*5113495bSYour Name return hif_state->ce_services->ce_cancel_send_next
922*5113495bSYour Name (copyeng, per_CE_contextp, per_transfer_contextp,
923*5113495bSYour Name bufferp, nbytesp, transfer_idp, toeplitz_hash_result);
924*5113495bSYour Name }
925*5113495bSYour Name qdf_export_symbol(ce_cancel_send_next);
926*5113495bSYour Name
927*5113495bSYour Name QDF_STATUS
ce_completed_send_next(struct CE_handle * copyeng,void ** per_CE_contextp,void ** per_transfer_contextp,qdf_dma_addr_t * bufferp,unsigned int * nbytesp,unsigned int * transfer_idp,unsigned int * sw_idx,unsigned int * hw_idx,unsigned int * toeplitz_hash_result)928*5113495bSYour Name ce_completed_send_next(struct CE_handle *copyeng,
929*5113495bSYour Name void **per_CE_contextp,
930*5113495bSYour Name void **per_transfer_contextp,
931*5113495bSYour Name qdf_dma_addr_t *bufferp,
932*5113495bSYour Name unsigned int *nbytesp,
933*5113495bSYour Name unsigned int *transfer_idp,
934*5113495bSYour Name unsigned int *sw_idx,
935*5113495bSYour Name unsigned int *hw_idx,
936*5113495bSYour Name unsigned int *toeplitz_hash_result)
937*5113495bSYour Name {
938*5113495bSYour Name struct CE_state *CE_state = (struct CE_state *)copyeng;
939*5113495bSYour Name struct hif_softc *scn = CE_state->scn;
940*5113495bSYour Name struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
941*5113495bSYour Name struct ce_ops *ce_services;
942*5113495bSYour Name QDF_STATUS status;
943*5113495bSYour Name
944*5113495bSYour Name ce_services = hif_state->ce_services;
945*5113495bSYour Name qdf_spin_lock_bh(&CE_state->ce_index_lock);
946*5113495bSYour Name status =
947*5113495bSYour Name ce_services->ce_completed_send_next_nolock(CE_state,
948*5113495bSYour Name per_CE_contextp, per_transfer_contextp,
949*5113495bSYour Name bufferp, nbytesp, transfer_idp, sw_idx,
950*5113495bSYour Name hw_idx, toeplitz_hash_result);
951*5113495bSYour Name qdf_spin_unlock_bh(&CE_state->ce_index_lock);
952*5113495bSYour Name
953*5113495bSYour Name return status;
954*5113495bSYour Name }
955*5113495bSYour Name
956*5113495bSYour Name #ifdef ATH_11AC_TXCOMPACT
957*5113495bSYour Name /* CE engine descriptor reap
958*5113495bSYour Name * Similar to ce_per_engine_service , Only difference is ce_per_engine_service
959*5113495bSYour Name * does receive and reaping of completed descriptor ,
960*5113495bSYour Name * This function only handles reaping of Tx complete descriptor.
961*5113495bSYour Name * The Function is called from threshold reap poll routine
962*5113495bSYour Name * hif_send_complete_check so should not contain receive functionality
963*5113495bSYour Name * within it .
964*5113495bSYour Name */
965*5113495bSYour Name
ce_per_engine_servicereap(struct hif_softc * scn,unsigned int ce_id)966*5113495bSYour Name void ce_per_engine_servicereap(struct hif_softc *scn, unsigned int ce_id)
967*5113495bSYour Name {
968*5113495bSYour Name void *CE_context;
969*5113495bSYour Name void *transfer_context;
970*5113495bSYour Name qdf_dma_addr_t buf;
971*5113495bSYour Name unsigned int nbytes;
972*5113495bSYour Name unsigned int id;
973*5113495bSYour Name unsigned int sw_idx, hw_idx;
974*5113495bSYour Name uint32_t toeplitz_hash_result;
975*5113495bSYour Name struct CE_state *CE_state = scn->ce_id_to_state[ce_id];
976*5113495bSYour Name struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
977*5113495bSYour Name
978*5113495bSYour Name if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
979*5113495bSYour Name return;
980*5113495bSYour Name
981*5113495bSYour Name hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_ENTRY,
982*5113495bSYour Name NULL, NULL, 0, 0);
983*5113495bSYour Name
984*5113495bSYour Name /* Since this function is called from both user context and
985*5113495bSYour Name * tasklet context the spinlock has to lock the bottom halves.
986*5113495bSYour Name * This fix assumes that ATH_11AC_TXCOMPACT flag is always
987*5113495bSYour Name * enabled in TX polling mode. If this is not the case, more
988*5113495bSYour Name * bottom halve spin lock changes are needed. Due to data path
989*5113495bSYour Name * performance concern, after internal discussion we've decided
990*5113495bSYour Name * to make minimum change, i.e., only address the issue occurred
991*5113495bSYour Name * in this function. The possible negative effect of this minimum
992*5113495bSYour Name * change is that, in the future, if some other function will also
993*5113495bSYour Name * be opened to let the user context to use, those cases need to be
994*5113495bSYour Name * addressed by change spin_lock to spin_lock_bh also.
995*5113495bSYour Name */
996*5113495bSYour Name
997*5113495bSYour Name qdf_spin_lock_bh(&CE_state->ce_index_lock);
998*5113495bSYour Name
999*5113495bSYour Name if (CE_state->send_cb) {
1000*5113495bSYour Name {
1001*5113495bSYour Name struct ce_ops *ce_services = hif_state->ce_services;
1002*5113495bSYour Name /* Pop completed send buffers and call the
1003*5113495bSYour Name * registered send callback for each
1004*5113495bSYour Name */
1005*5113495bSYour Name while (ce_services->ce_completed_send_next_nolock
1006*5113495bSYour Name (CE_state, &CE_context,
1007*5113495bSYour Name &transfer_context, &buf,
1008*5113495bSYour Name &nbytes, &id, &sw_idx, &hw_idx,
1009*5113495bSYour Name &toeplitz_hash_result) ==
1010*5113495bSYour Name QDF_STATUS_SUCCESS) {
1011*5113495bSYour Name if (ce_id != CE_HTT_H2T_MSG) {
1012*5113495bSYour Name qdf_spin_unlock_bh(
1013*5113495bSYour Name &CE_state->ce_index_lock);
1014*5113495bSYour Name CE_state->send_cb(
1015*5113495bSYour Name (struct CE_handle *)
1016*5113495bSYour Name CE_state, CE_context,
1017*5113495bSYour Name transfer_context, buf,
1018*5113495bSYour Name nbytes, id, sw_idx, hw_idx,
1019*5113495bSYour Name toeplitz_hash_result);
1020*5113495bSYour Name qdf_spin_lock_bh(
1021*5113495bSYour Name &CE_state->ce_index_lock);
1022*5113495bSYour Name } else {
1023*5113495bSYour Name struct HIF_CE_pipe_info *pipe_info =
1024*5113495bSYour Name (struct HIF_CE_pipe_info *)
1025*5113495bSYour Name CE_context;
1026*5113495bSYour Name
1027*5113495bSYour Name qdf_spin_lock_bh(&pipe_info->
1028*5113495bSYour Name completion_freeq_lock);
1029*5113495bSYour Name pipe_info->num_sends_allowed++;
1030*5113495bSYour Name qdf_spin_unlock_bh(&pipe_info->
1031*5113495bSYour Name completion_freeq_lock);
1032*5113495bSYour Name }
1033*5113495bSYour Name }
1034*5113495bSYour Name }
1035*5113495bSYour Name }
1036*5113495bSYour Name
1037*5113495bSYour Name qdf_spin_unlock_bh(&CE_state->ce_index_lock);
1038*5113495bSYour Name
1039*5113495bSYour Name hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_EXIT,
1040*5113495bSYour Name NULL, NULL, 0, 0);
1041*5113495bSYour Name Q_TARGET_ACCESS_END(scn);
1042*5113495bSYour Name }
1043*5113495bSYour Name
1044*5113495bSYour Name #endif /*ATH_11AC_TXCOMPACT */
1045*5113495bSYour Name
1046*5113495bSYour Name #ifdef ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST
check_ce_id_and_epping_enabled(int CE_id,uint32_t mode)1047*5113495bSYour Name static inline bool check_ce_id_and_epping_enabled(int CE_id, uint32_t mode)
1048*5113495bSYour Name {
1049*5113495bSYour Name // QDF_IS_EPPING_ENABLED is pre lithium feature
1050*5113495bSYour Name // CE4 completion is enabled only lithium and later
1051*5113495bSYour Name // so no need to check for EPPING
1052*5113495bSYour Name return true;
1053*5113495bSYour Name }
1054*5113495bSYour Name
1055*5113495bSYour Name #else /* ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST */
1056*5113495bSYour Name
check_ce_id_and_epping_enabled(int CE_id,uint32_t mode)1057*5113495bSYour Name static inline bool check_ce_id_and_epping_enabled(int CE_id, uint32_t mode)
1058*5113495bSYour Name {
1059*5113495bSYour Name if (CE_id != CE_HTT_H2T_MSG || QDF_IS_EPPING_ENABLED(mode))
1060*5113495bSYour Name return true;
1061*5113495bSYour Name else
1062*5113495bSYour Name return false;
1063*5113495bSYour Name }
1064*5113495bSYour Name
1065*5113495bSYour Name #endif /* ENABLE_CE4_COMP_DISABLE_HTT_HTC_MISC_LIST */
1066*5113495bSYour Name
1067*5113495bSYour Name /*
1068*5113495bSYour Name * ce_engine_service_reg:
1069*5113495bSYour Name *
1070*5113495bSYour Name * Called from ce_per_engine_service and goes through the regular interrupt
1071*5113495bSYour Name * handling that does not involve the WLAN fast path feature.
1072*5113495bSYour Name *
1073*5113495bSYour Name * Returns void
1074*5113495bSYour Name */
ce_engine_service_reg(struct hif_softc * scn,int CE_id)1075*5113495bSYour Name void ce_engine_service_reg(struct hif_softc *scn, int CE_id)
1076*5113495bSYour Name {
1077*5113495bSYour Name struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1078*5113495bSYour Name uint32_t ctrl_addr = CE_state->ctrl_addr;
1079*5113495bSYour Name struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1080*5113495bSYour Name void *CE_context;
1081*5113495bSYour Name void *transfer_context;
1082*5113495bSYour Name qdf_dma_addr_t buf;
1083*5113495bSYour Name unsigned int nbytes;
1084*5113495bSYour Name unsigned int id;
1085*5113495bSYour Name unsigned int flags;
1086*5113495bSYour Name unsigned int more_comp_cnt = 0;
1087*5113495bSYour Name unsigned int more_snd_comp_cnt = 0;
1088*5113495bSYour Name unsigned int sw_idx, hw_idx;
1089*5113495bSYour Name uint32_t toeplitz_hash_result;
1090*5113495bSYour Name uint32_t mode = hif_get_conparam(scn);
1091*5113495bSYour Name
1092*5113495bSYour Name more_completions:
1093*5113495bSYour Name if (CE_state->recv_cb) {
1094*5113495bSYour Name
1095*5113495bSYour Name /* Pop completed recv buffers and call
1096*5113495bSYour Name * the registered recv callback for each
1097*5113495bSYour Name */
1098*5113495bSYour Name while (hif_state->ce_services->ce_completed_recv_next_nolock
1099*5113495bSYour Name (CE_state, &CE_context, &transfer_context,
1100*5113495bSYour Name &buf, &nbytes, &id, &flags) ==
1101*5113495bSYour Name QDF_STATUS_SUCCESS) {
1102*5113495bSYour Name qdf_spin_unlock(&CE_state->ce_index_lock);
1103*5113495bSYour Name CE_state->recv_cb((struct CE_handle *)CE_state,
1104*5113495bSYour Name CE_context, transfer_context, buf,
1105*5113495bSYour Name nbytes, id, flags);
1106*5113495bSYour Name
1107*5113495bSYour Name qdf_spin_lock(&CE_state->ce_index_lock);
1108*5113495bSYour Name /*
1109*5113495bSYour Name * EV #112693 -
1110*5113495bSYour Name * [Peregrine][ES1][WB342][Win8x86][Performance]
1111*5113495bSYour Name * BSoD_0x133 occurred in VHT80 UDP_DL
1112*5113495bSYour Name * Break out DPC by force if number of loops in
1113*5113495bSYour Name * hif_pci_ce_recv_data reaches MAX_NUM_OF_RECEIVES
1114*5113495bSYour Name * to avoid spending too long time in
1115*5113495bSYour Name * DPC for each interrupt handling. Schedule another
1116*5113495bSYour Name * DPC to avoid data loss if we had taken
1117*5113495bSYour Name * force-break action before apply to Windows OS
1118*5113495bSYour Name * only currently, Linux/MAC os can expand to their
1119*5113495bSYour Name * platform if necessary
1120*5113495bSYour Name */
1121*5113495bSYour Name
1122*5113495bSYour Name /* Break the receive processes by
1123*5113495bSYour Name * force if force_break set up
1124*5113495bSYour Name */
1125*5113495bSYour Name if (qdf_unlikely(CE_state->force_break)) {
1126*5113495bSYour Name qdf_atomic_set(&CE_state->rx_pending, 1);
1127*5113495bSYour Name return;
1128*5113495bSYour Name }
1129*5113495bSYour Name }
1130*5113495bSYour Name }
1131*5113495bSYour Name
1132*5113495bSYour Name /*
1133*5113495bSYour Name * Attention: We may experience potential infinite loop for below
1134*5113495bSYour Name * While Loop during Sending Stress test.
1135*5113495bSYour Name * Resolve the same way as Receive Case (Refer to EV #112693)
1136*5113495bSYour Name */
1137*5113495bSYour Name
1138*5113495bSYour Name if (CE_state->send_cb) {
1139*5113495bSYour Name /* Pop completed send buffers and call
1140*5113495bSYour Name * the registered send callback for each
1141*5113495bSYour Name */
1142*5113495bSYour Name
1143*5113495bSYour Name #ifdef ATH_11AC_TXCOMPACT
1144*5113495bSYour Name while (hif_state->ce_services->ce_completed_send_next_nolock
1145*5113495bSYour Name (CE_state, &CE_context,
1146*5113495bSYour Name &transfer_context, &buf, &nbytes,
1147*5113495bSYour Name &id, &sw_idx, &hw_idx,
1148*5113495bSYour Name &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
1149*5113495bSYour Name
1150*5113495bSYour Name if (check_ce_id_and_epping_enabled(CE_id, mode)) {
1151*5113495bSYour Name qdf_spin_unlock(&CE_state->ce_index_lock);
1152*5113495bSYour Name CE_state->send_cb((struct CE_handle *)CE_state,
1153*5113495bSYour Name CE_context, transfer_context,
1154*5113495bSYour Name buf, nbytes, id, sw_idx,
1155*5113495bSYour Name hw_idx, toeplitz_hash_result);
1156*5113495bSYour Name qdf_spin_lock(&CE_state->ce_index_lock);
1157*5113495bSYour Name } else {
1158*5113495bSYour Name struct HIF_CE_pipe_info *pipe_info =
1159*5113495bSYour Name (struct HIF_CE_pipe_info *)CE_context;
1160*5113495bSYour Name
1161*5113495bSYour Name qdf_spin_lock_bh(&pipe_info->
1162*5113495bSYour Name completion_freeq_lock);
1163*5113495bSYour Name pipe_info->num_sends_allowed++;
1164*5113495bSYour Name qdf_spin_unlock_bh(&pipe_info->
1165*5113495bSYour Name completion_freeq_lock);
1166*5113495bSYour Name }
1167*5113495bSYour Name }
1168*5113495bSYour Name #else /*ATH_11AC_TXCOMPACT */
1169*5113495bSYour Name while (hif_state->ce_services->ce_completed_send_next_nolock
1170*5113495bSYour Name (CE_state, &CE_context,
1171*5113495bSYour Name &transfer_context, &buf, &nbytes,
1172*5113495bSYour Name &id, &sw_idx, &hw_idx,
1173*5113495bSYour Name &toeplitz_hash_result) == QDF_STATUS_SUCCESS) {
1174*5113495bSYour Name qdf_spin_unlock(&CE_state->ce_index_lock);
1175*5113495bSYour Name CE_state->send_cb((struct CE_handle *)CE_state,
1176*5113495bSYour Name CE_context, transfer_context, buf,
1177*5113495bSYour Name nbytes, id, sw_idx, hw_idx,
1178*5113495bSYour Name toeplitz_hash_result);
1179*5113495bSYour Name qdf_spin_lock(&CE_state->ce_index_lock);
1180*5113495bSYour Name }
1181*5113495bSYour Name #endif /*ATH_11AC_TXCOMPACT */
1182*5113495bSYour Name }
1183*5113495bSYour Name
1184*5113495bSYour Name more_watermarks:
1185*5113495bSYour Name if (CE_state->misc_cbs) {
1186*5113495bSYour Name if (CE_state->watermark_cb &&
1187*5113495bSYour Name hif_state->ce_services->watermark_int(CE_state,
1188*5113495bSYour Name &flags)) {
1189*5113495bSYour Name qdf_spin_unlock(&CE_state->ce_index_lock);
1190*5113495bSYour Name /* Convert HW IS bits to software flags */
1191*5113495bSYour Name CE_state->watermark_cb((struct CE_handle *)CE_state,
1192*5113495bSYour Name CE_state->wm_context, flags);
1193*5113495bSYour Name qdf_spin_lock(&CE_state->ce_index_lock);
1194*5113495bSYour Name }
1195*5113495bSYour Name }
1196*5113495bSYour Name
1197*5113495bSYour Name /*
1198*5113495bSYour Name * Clear the misc interrupts (watermark) that were handled above,
1199*5113495bSYour Name * and that will be checked again below.
1200*5113495bSYour Name * Clear and check for copy-complete interrupts again, just in case
1201*5113495bSYour Name * more copy completions happened while the misc interrupts were being
1202*5113495bSYour Name * handled.
1203*5113495bSYour Name */
1204*5113495bSYour Name if (!ce_srng_based(scn) && !CE_state->msi_supported) {
1205*5113495bSYour Name if (TARGET_REGISTER_ACCESS_ALLOWED(scn)) {
1206*5113495bSYour Name CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr,
1207*5113495bSYour Name CE_WATERMARK_MASK |
1208*5113495bSYour Name HOST_IS_COPY_COMPLETE_MASK);
1209*5113495bSYour Name } else {
1210*5113495bSYour Name qdf_atomic_set(&CE_state->rx_pending, 0);
1211*5113495bSYour Name hif_err_rl("%s: target access is not allowed",
1212*5113495bSYour Name __func__);
1213*5113495bSYour Name return;
1214*5113495bSYour Name }
1215*5113495bSYour Name }
1216*5113495bSYour Name
1217*5113495bSYour Name /*
1218*5113495bSYour Name * Now that per-engine interrupts are cleared, verify that
1219*5113495bSYour Name * no recv interrupts arrive while processing send interrupts,
1220*5113495bSYour Name * and no recv or send interrupts happened while processing
1221*5113495bSYour Name * misc interrupts.Go back and check again.Keep checking until
1222*5113495bSYour Name * we find no more events to process.
1223*5113495bSYour Name */
1224*5113495bSYour Name if (CE_state->recv_cb &&
1225*5113495bSYour Name hif_state->ce_services->ce_recv_entries_done_nolock(scn,
1226*5113495bSYour Name CE_state)) {
1227*5113495bSYour Name if (QDF_IS_EPPING_ENABLED(mode) ||
1228*5113495bSYour Name more_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
1229*5113495bSYour Name goto more_completions;
1230*5113495bSYour Name } else {
1231*5113495bSYour Name if (!ce_srng_based(scn) &&
1232*5113495bSYour Name !CE_state->batch_intr_supported) {
1233*5113495bSYour Name hif_err_rl(
1234*5113495bSYour Name "Potential infinite loop detected during Rx processing id:%u nentries_mask:0x%x sw read_idx:0x%x hw read_idx:0x%x",
1235*5113495bSYour Name CE_state->id,
1236*5113495bSYour Name CE_state->dest_ring->nentries_mask,
1237*5113495bSYour Name CE_state->dest_ring->sw_index,
1238*5113495bSYour Name CE_DEST_RING_READ_IDX_GET(scn,
1239*5113495bSYour Name CE_state->ctrl_addr));
1240*5113495bSYour Name }
1241*5113495bSYour Name }
1242*5113495bSYour Name }
1243*5113495bSYour Name
1244*5113495bSYour Name if (CE_state->send_cb &&
1245*5113495bSYour Name hif_state->ce_services->ce_send_entries_done_nolock(scn,
1246*5113495bSYour Name CE_state)) {
1247*5113495bSYour Name if (QDF_IS_EPPING_ENABLED(mode) ||
1248*5113495bSYour Name more_snd_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
1249*5113495bSYour Name goto more_completions;
1250*5113495bSYour Name } else {
1251*5113495bSYour Name if (!ce_srng_based(scn) &&
1252*5113495bSYour Name !CE_state->batch_intr_supported) {
1253*5113495bSYour Name hif_err_rl(
1254*5113495bSYour Name "Potential infinite loop detected during send completion id:%u mask:0x%x sw read_idx:0x%x hw_index:0x%x write_index: 0x%x hw read_idx:0x%x",
1255*5113495bSYour Name CE_state->id,
1256*5113495bSYour Name CE_state->src_ring->nentries_mask,
1257*5113495bSYour Name CE_state->src_ring->sw_index,
1258*5113495bSYour Name CE_state->src_ring->hw_index,
1259*5113495bSYour Name CE_state->src_ring->write_index,
1260*5113495bSYour Name CE_SRC_RING_READ_IDX_GET(scn,
1261*5113495bSYour Name CE_state->ctrl_addr));
1262*5113495bSYour Name }
1263*5113495bSYour Name }
1264*5113495bSYour Name }
1265*5113495bSYour Name
1266*5113495bSYour Name if (CE_state->misc_cbs && CE_state->watermark_cb) {
1267*5113495bSYour Name if (hif_state->ce_services->watermark_int(CE_state, &flags))
1268*5113495bSYour Name goto more_watermarks;
1269*5113495bSYour Name }
1270*5113495bSYour Name
1271*5113495bSYour Name qdf_atomic_set(&CE_state->rx_pending, 0);
1272*5113495bSYour Name }
1273*5113495bSYour Name
1274*5113495bSYour Name #ifdef WLAN_TRACEPOINTS
1275*5113495bSYour Name /**
1276*5113495bSYour Name * ce_trace_tasklet_sched_latency() - Trace ce tasklet scheduling
1277*5113495bSYour Name * latency
1278*5113495bSYour Name * @ce_state: CE context
1279*5113495bSYour Name *
1280*5113495bSYour Name * Return: None
1281*5113495bSYour Name */
1282*5113495bSYour Name static inline
ce_trace_tasklet_sched_latency(struct CE_state * ce_state)1283*5113495bSYour Name void ce_trace_tasklet_sched_latency(struct CE_state *ce_state)
1284*5113495bSYour Name {
1285*5113495bSYour Name qdf_trace_dp_ce_tasklet_sched_latency(ce_state->id,
1286*5113495bSYour Name ce_state->ce_service_start_time -
1287*5113495bSYour Name ce_state->ce_tasklet_sched_time);
1288*5113495bSYour Name }
1289*5113495bSYour Name #else
1290*5113495bSYour Name static inline
ce_trace_tasklet_sched_latency(struct CE_state * ce_state)1291*5113495bSYour Name void ce_trace_tasklet_sched_latency(struct CE_state *ce_state)
1292*5113495bSYour Name {
1293*5113495bSYour Name }
1294*5113495bSYour Name #endif
1295*5113495bSYour Name
1296*5113495bSYour Name /*
1297*5113495bSYour Name * Guts of interrupt handler for per-engine interrupts on a particular CE.
1298*5113495bSYour Name *
1299*5113495bSYour Name * Invokes registered callbacks for recv_complete,
1300*5113495bSYour Name * send_complete, and watermarks.
1301*5113495bSYour Name *
1302*5113495bSYour Name * Returns: number of messages processed
1303*5113495bSYour Name */
ce_per_engine_service(struct hif_softc * scn,unsigned int CE_id)1304*5113495bSYour Name int ce_per_engine_service(struct hif_softc *scn, unsigned int CE_id)
1305*5113495bSYour Name {
1306*5113495bSYour Name struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1307*5113495bSYour Name
1308*5113495bSYour Name if (hif_is_nss_wifi_enabled(scn) && (CE_state->htt_rx_data))
1309*5113495bSYour Name return CE_state->receive_count;
1310*5113495bSYour Name
1311*5113495bSYour Name if (Q_TARGET_ACCESS_BEGIN(scn) < 0) {
1312*5113495bSYour Name hif_err("[premature rc=0]");
1313*5113495bSYour Name return 0; /* no work done */
1314*5113495bSYour Name }
1315*5113495bSYour Name
1316*5113495bSYour Name /* Clear force_break flag and re-initialize receive_count to 0 */
1317*5113495bSYour Name CE_state->receive_count = 0;
1318*5113495bSYour Name CE_state->force_break = 0;
1319*5113495bSYour Name CE_state->ce_service_start_time = qdf_time_sched_clock();
1320*5113495bSYour Name CE_state->ce_service_yield_time =
1321*5113495bSYour Name CE_state->ce_service_start_time +
1322*5113495bSYour Name hif_get_ce_service_max_yield_time(
1323*5113495bSYour Name (struct hif_opaque_softc *)scn);
1324*5113495bSYour Name
1325*5113495bSYour Name ce_trace_tasklet_sched_latency(CE_state);
1326*5113495bSYour Name
1327*5113495bSYour Name qdf_spin_lock(&CE_state->ce_index_lock);
1328*5113495bSYour Name
1329*5113495bSYour Name CE_state->service(scn, CE_id);
1330*5113495bSYour Name
1331*5113495bSYour Name qdf_spin_unlock(&CE_state->ce_index_lock);
1332*5113495bSYour Name
1333*5113495bSYour Name if (Q_TARGET_ACCESS_END(scn) < 0)
1334*5113495bSYour Name hif_err("<--[premature rc=%d]", CE_state->receive_count);
1335*5113495bSYour Name return CE_state->receive_count;
1336*5113495bSYour Name }
1337*5113495bSYour Name qdf_export_symbol(ce_per_engine_service);
1338*5113495bSYour Name
1339*5113495bSYour Name /*
1340*5113495bSYour Name * Handler for per-engine interrupts on ALL active CEs.
1341*5113495bSYour Name * This is used in cases where the system is sharing a
1342*5113495bSYour Name * single interrupt for all CEs
1343*5113495bSYour Name */
1344*5113495bSYour Name
ce_per_engine_service_any(int irq,struct hif_softc * scn)1345*5113495bSYour Name void ce_per_engine_service_any(int irq, struct hif_softc *scn)
1346*5113495bSYour Name {
1347*5113495bSYour Name int CE_id;
1348*5113495bSYour Name uint32_t intr_summary;
1349*5113495bSYour Name
1350*5113495bSYour Name if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1351*5113495bSYour Name return;
1352*5113495bSYour Name
1353*5113495bSYour Name if (!qdf_atomic_read(&scn->tasklet_from_intr)) {
1354*5113495bSYour Name for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1355*5113495bSYour Name struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1356*5113495bSYour Name
1357*5113495bSYour Name if (qdf_atomic_read(&CE_state->rx_pending)) {
1358*5113495bSYour Name qdf_atomic_set(&CE_state->rx_pending, 0);
1359*5113495bSYour Name ce_per_engine_service(scn, CE_id);
1360*5113495bSYour Name }
1361*5113495bSYour Name }
1362*5113495bSYour Name
1363*5113495bSYour Name Q_TARGET_ACCESS_END(scn);
1364*5113495bSYour Name return;
1365*5113495bSYour Name }
1366*5113495bSYour Name
1367*5113495bSYour Name intr_summary = CE_INTERRUPT_SUMMARY(scn);
1368*5113495bSYour Name
1369*5113495bSYour Name for (CE_id = 0; intr_summary && (CE_id < scn->ce_count); CE_id++) {
1370*5113495bSYour Name if (intr_summary & (1 << CE_id))
1371*5113495bSYour Name intr_summary &= ~(1 << CE_id);
1372*5113495bSYour Name else
1373*5113495bSYour Name continue; /* no intr pending on this CE */
1374*5113495bSYour Name
1375*5113495bSYour Name ce_per_engine_service(scn, CE_id);
1376*5113495bSYour Name }
1377*5113495bSYour Name
1378*5113495bSYour Name Q_TARGET_ACCESS_END(scn);
1379*5113495bSYour Name }
1380*5113495bSYour Name
1381*5113495bSYour Name /*Iterate the CE_state list and disable the compl interrupt
1382*5113495bSYour Name * if it has been registered already.
1383*5113495bSYour Name */
ce_disable_any_copy_compl_intr_nolock(struct hif_softc * scn)1384*5113495bSYour Name void ce_disable_any_copy_compl_intr_nolock(struct hif_softc *scn)
1385*5113495bSYour Name {
1386*5113495bSYour Name int CE_id;
1387*5113495bSYour Name
1388*5113495bSYour Name if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1389*5113495bSYour Name return;
1390*5113495bSYour Name
1391*5113495bSYour Name for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1392*5113495bSYour Name struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1393*5113495bSYour Name uint32_t ctrl_addr = CE_state->ctrl_addr;
1394*5113495bSYour Name
1395*5113495bSYour Name /* if the interrupt is currently enabled, disable it */
1396*5113495bSYour Name if (!CE_state->disable_copy_compl_intr
1397*5113495bSYour Name && (CE_state->send_cb || CE_state->recv_cb))
1398*5113495bSYour Name CE_COPY_COMPLETE_INTR_DISABLE(scn, ctrl_addr);
1399*5113495bSYour Name
1400*5113495bSYour Name if (CE_state->watermark_cb)
1401*5113495bSYour Name CE_WATERMARK_INTR_DISABLE(scn, ctrl_addr);
1402*5113495bSYour Name }
1403*5113495bSYour Name Q_TARGET_ACCESS_END(scn);
1404*5113495bSYour Name }
1405*5113495bSYour Name
ce_enable_any_copy_compl_intr_nolock(struct hif_softc * scn)1406*5113495bSYour Name void ce_enable_any_copy_compl_intr_nolock(struct hif_softc *scn)
1407*5113495bSYour Name {
1408*5113495bSYour Name int CE_id;
1409*5113495bSYour Name
1410*5113495bSYour Name if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
1411*5113495bSYour Name return;
1412*5113495bSYour Name
1413*5113495bSYour Name for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1414*5113495bSYour Name struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1415*5113495bSYour Name uint32_t ctrl_addr = CE_state->ctrl_addr;
1416*5113495bSYour Name
1417*5113495bSYour Name /*
1418*5113495bSYour Name * If the CE is supposed to have copy complete interrupts
1419*5113495bSYour Name * enabled (i.e. there a callback registered, and the
1420*5113495bSYour Name * "disable" flag is not set), then re-enable the interrupt.
1421*5113495bSYour Name */
1422*5113495bSYour Name if (!CE_state->disable_copy_compl_intr
1423*5113495bSYour Name && (CE_state->send_cb || CE_state->recv_cb))
1424*5113495bSYour Name CE_COPY_COMPLETE_INTR_ENABLE(scn, ctrl_addr);
1425*5113495bSYour Name
1426*5113495bSYour Name if (CE_state->watermark_cb)
1427*5113495bSYour Name CE_WATERMARK_INTR_ENABLE(scn, ctrl_addr);
1428*5113495bSYour Name }
1429*5113495bSYour Name Q_TARGET_ACCESS_END(scn);
1430*5113495bSYour Name }
1431*5113495bSYour Name
1432*5113495bSYour Name /**
1433*5113495bSYour Name * ce_send_cb_register(): register completion handler
1434*5113495bSYour Name * @copyeng: CE_state representing the ce we are adding the behavior to
1435*5113495bSYour Name * @fn_ptr: callback that the ce should use when processing tx completions
1436*5113495bSYour Name * @ce_send_context: context to pass back in the callback
1437*5113495bSYour Name * @disable_interrupts: if the interrupts should be enabled or not.
1438*5113495bSYour Name *
1439*5113495bSYour Name * Caller should guarantee that no transactions are in progress before
1440*5113495bSYour Name * switching the callback function.
1441*5113495bSYour Name *
1442*5113495bSYour Name * Registers the send context before the fn pointer so that if the cb is valid
1443*5113495bSYour Name * the context should be valid.
1444*5113495bSYour Name *
1445*5113495bSYour Name * Beware that currently this function will enable completion interrupts.
1446*5113495bSYour Name */
1447*5113495bSYour Name void
ce_send_cb_register(struct CE_handle * copyeng,ce_send_cb fn_ptr,void * ce_send_context,int disable_interrupts)1448*5113495bSYour Name ce_send_cb_register(struct CE_handle *copyeng,
1449*5113495bSYour Name ce_send_cb fn_ptr,
1450*5113495bSYour Name void *ce_send_context, int disable_interrupts)
1451*5113495bSYour Name {
1452*5113495bSYour Name struct CE_state *CE_state = (struct CE_state *)copyeng;
1453*5113495bSYour Name struct hif_softc *scn;
1454*5113495bSYour Name struct HIF_CE_state *hif_state;
1455*5113495bSYour Name
1456*5113495bSYour Name if (!CE_state) {
1457*5113495bSYour Name hif_err("Error CE state = NULL");
1458*5113495bSYour Name return;
1459*5113495bSYour Name }
1460*5113495bSYour Name scn = CE_state->scn;
1461*5113495bSYour Name hif_state = HIF_GET_CE_STATE(scn);
1462*5113495bSYour Name if (!hif_state) {
1463*5113495bSYour Name hif_err("Error HIF state = NULL");
1464*5113495bSYour Name return;
1465*5113495bSYour Name }
1466*5113495bSYour Name CE_state->send_context = ce_send_context;
1467*5113495bSYour Name CE_state->send_cb = fn_ptr;
1468*5113495bSYour Name hif_state->ce_services->ce_per_engine_handler_adjust(CE_state,
1469*5113495bSYour Name disable_interrupts);
1470*5113495bSYour Name }
1471*5113495bSYour Name qdf_export_symbol(ce_send_cb_register);
1472*5113495bSYour Name
1473*5113495bSYour Name /**
1474*5113495bSYour Name * ce_recv_cb_register(): register completion handler
1475*5113495bSYour Name * @copyeng: CE_state representing the ce we are adding the behavior to
1476*5113495bSYour Name * @fn_ptr: callback that the ce should use when processing rx completions
1477*5113495bSYour Name * @CE_recv_context: context to pass back in the callback
1478*5113495bSYour Name * @disable_interrupts: if the interrupts should be enabled or not.
1479*5113495bSYour Name *
1480*5113495bSYour Name * Registers the send context before the fn pointer so that if the cb is valid
1481*5113495bSYour Name * the context should be valid.
1482*5113495bSYour Name *
1483*5113495bSYour Name * Caller should guarantee that no transactions are in progress before
1484*5113495bSYour Name * switching the callback function.
1485*5113495bSYour Name */
1486*5113495bSYour Name void
ce_recv_cb_register(struct CE_handle * copyeng,CE_recv_cb fn_ptr,void * CE_recv_context,int disable_interrupts)1487*5113495bSYour Name ce_recv_cb_register(struct CE_handle *copyeng,
1488*5113495bSYour Name CE_recv_cb fn_ptr,
1489*5113495bSYour Name void *CE_recv_context, int disable_interrupts)
1490*5113495bSYour Name {
1491*5113495bSYour Name struct CE_state *CE_state = (struct CE_state *)copyeng;
1492*5113495bSYour Name struct hif_softc *scn;
1493*5113495bSYour Name struct HIF_CE_state *hif_state;
1494*5113495bSYour Name
1495*5113495bSYour Name if (!CE_state) {
1496*5113495bSYour Name hif_err("ERROR CE state = NULL");
1497*5113495bSYour Name return;
1498*5113495bSYour Name }
1499*5113495bSYour Name scn = CE_state->scn;
1500*5113495bSYour Name hif_state = HIF_GET_CE_STATE(scn);
1501*5113495bSYour Name if (!hif_state) {
1502*5113495bSYour Name hif_err("Error HIF state = NULL");
1503*5113495bSYour Name return;
1504*5113495bSYour Name }
1505*5113495bSYour Name CE_state->recv_context = CE_recv_context;
1506*5113495bSYour Name CE_state->recv_cb = fn_ptr;
1507*5113495bSYour Name hif_state->ce_services->ce_per_engine_handler_adjust(CE_state,
1508*5113495bSYour Name disable_interrupts);
1509*5113495bSYour Name }
1510*5113495bSYour Name qdf_export_symbol(ce_recv_cb_register);
1511*5113495bSYour Name
1512*5113495bSYour Name /**
1513*5113495bSYour Name * ce_watermark_cb_register(): register completion handler
1514*5113495bSYour Name * @copyeng: CE_state representing the ce we are adding the behavior to
1515*5113495bSYour Name * @fn_ptr: callback that the ce should use when processing watermark events
1516*5113495bSYour Name * @CE_wm_context: context to pass back in the callback
1517*5113495bSYour Name *
1518*5113495bSYour Name * Caller should guarantee that no watermark events are being processed before
1519*5113495bSYour Name * switching the callback function.
1520*5113495bSYour Name */
1521*5113495bSYour Name void
ce_watermark_cb_register(struct CE_handle * copyeng,CE_watermark_cb fn_ptr,void * CE_wm_context)1522*5113495bSYour Name ce_watermark_cb_register(struct CE_handle *copyeng,
1523*5113495bSYour Name CE_watermark_cb fn_ptr, void *CE_wm_context)
1524*5113495bSYour Name {
1525*5113495bSYour Name struct CE_state *CE_state = (struct CE_state *)copyeng;
1526*5113495bSYour Name struct hif_softc *scn = CE_state->scn;
1527*5113495bSYour Name struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1528*5113495bSYour Name
1529*5113495bSYour Name CE_state->watermark_cb = fn_ptr;
1530*5113495bSYour Name CE_state->wm_context = CE_wm_context;
1531*5113495bSYour Name hif_state->ce_services->ce_per_engine_handler_adjust(CE_state,
1532*5113495bSYour Name 0);
1533*5113495bSYour Name if (fn_ptr)
1534*5113495bSYour Name CE_state->misc_cbs = 1;
1535*5113495bSYour Name }
1536*5113495bSYour Name
1537*5113495bSYour Name #ifdef CUSTOM_CB_SCHEDULER_SUPPORT
1538*5113495bSYour Name void
ce_register_custom_cb(struct CE_handle * copyeng,void (* custom_cb)(void *),void * custom_cb_context)1539*5113495bSYour Name ce_register_custom_cb(struct CE_handle *copyeng, void (*custom_cb)(void *),
1540*5113495bSYour Name void *custom_cb_context)
1541*5113495bSYour Name {
1542*5113495bSYour Name struct CE_state *CE_state = (struct CE_state *)copyeng;
1543*5113495bSYour Name
1544*5113495bSYour Name CE_state->custom_cb = custom_cb;
1545*5113495bSYour Name CE_state->custom_cb_context = custom_cb_context;
1546*5113495bSYour Name qdf_atomic_init(&CE_state->custom_cb_pending);
1547*5113495bSYour Name }
1548*5113495bSYour Name
1549*5113495bSYour Name void
ce_unregister_custom_cb(struct CE_handle * copyeng)1550*5113495bSYour Name ce_unregister_custom_cb(struct CE_handle *copyeng)
1551*5113495bSYour Name {
1552*5113495bSYour Name struct CE_state *CE_state = (struct CE_state *)copyeng;
1553*5113495bSYour Name
1554*5113495bSYour Name qdf_assert_always(!qdf_atomic_read(&CE_state->custom_cb_pending));
1555*5113495bSYour Name CE_state->custom_cb = NULL;
1556*5113495bSYour Name CE_state->custom_cb_context = NULL;
1557*5113495bSYour Name }
1558*5113495bSYour Name
1559*5113495bSYour Name void
ce_enable_custom_cb(struct CE_handle * copyeng)1560*5113495bSYour Name ce_enable_custom_cb(struct CE_handle *copyeng)
1561*5113495bSYour Name {
1562*5113495bSYour Name struct CE_state *CE_state = (struct CE_state *)copyeng;
1563*5113495bSYour Name int32_t custom_cb_pending;
1564*5113495bSYour Name
1565*5113495bSYour Name qdf_assert_always(CE_state->custom_cb);
1566*5113495bSYour Name qdf_assert_always(CE_state->custom_cb_context);
1567*5113495bSYour Name
1568*5113495bSYour Name custom_cb_pending = qdf_atomic_inc_return(&CE_state->custom_cb_pending);
1569*5113495bSYour Name qdf_assert_always(custom_cb_pending >= 1);
1570*5113495bSYour Name }
1571*5113495bSYour Name
1572*5113495bSYour Name void
ce_disable_custom_cb(struct CE_handle * copyeng)1573*5113495bSYour Name ce_disable_custom_cb(struct CE_handle *copyeng)
1574*5113495bSYour Name {
1575*5113495bSYour Name struct CE_state *CE_state = (struct CE_state *)copyeng;
1576*5113495bSYour Name
1577*5113495bSYour Name qdf_assert_always(CE_state->custom_cb);
1578*5113495bSYour Name qdf_assert_always(CE_state->custom_cb_context);
1579*5113495bSYour Name
1580*5113495bSYour Name qdf_atomic_dec_if_positive(&CE_state->custom_cb_pending);
1581*5113495bSYour Name }
1582*5113495bSYour Name #endif /* CUSTOM_CB_SCHEDULER_SUPPORT */
1583*5113495bSYour Name
ce_get_rx_pending(struct hif_softc * scn)1584*5113495bSYour Name bool ce_get_rx_pending(struct hif_softc *scn)
1585*5113495bSYour Name {
1586*5113495bSYour Name int CE_id;
1587*5113495bSYour Name
1588*5113495bSYour Name for (CE_id = 0; CE_id < scn->ce_count; CE_id++) {
1589*5113495bSYour Name struct CE_state *CE_state = scn->ce_id_to_state[CE_id];
1590*5113495bSYour Name
1591*5113495bSYour Name if (qdf_atomic_read(&CE_state->rx_pending))
1592*5113495bSYour Name return true;
1593*5113495bSYour Name }
1594*5113495bSYour Name
1595*5113495bSYour Name return false;
1596*5113495bSYour Name }
1597*5113495bSYour Name
1598*5113495bSYour Name /**
1599*5113495bSYour Name * ce_check_rx_pending() - ce_check_rx_pending
1600*5113495bSYour Name * @CE_state: context of the copy engine to check
1601*5113495bSYour Name *
1602*5113495bSYour Name * Return: true if there per_engine_service
1603*5113495bSYour Name * didn't process all the rx descriptors.
1604*5113495bSYour Name */
ce_check_rx_pending(struct CE_state * CE_state)1605*5113495bSYour Name bool ce_check_rx_pending(struct CE_state *CE_state)
1606*5113495bSYour Name {
1607*5113495bSYour Name if (qdf_atomic_read(&CE_state->rx_pending))
1608*5113495bSYour Name return true;
1609*5113495bSYour Name else
1610*5113495bSYour Name return false;
1611*5113495bSYour Name }
1612*5113495bSYour Name qdf_export_symbol(ce_check_rx_pending);
1613*5113495bSYour Name
1614*5113495bSYour Name #ifdef IPA_OFFLOAD
1615*5113495bSYour Name #ifdef QCN7605_SUPPORT
ce_ipa_get_wr_index_addr(struct CE_state * CE_state)1616*5113495bSYour Name static qdf_dma_addr_t ce_ipa_get_wr_index_addr(struct CE_state *CE_state)
1617*5113495bSYour Name {
1618*5113495bSYour Name u_int32_t ctrl_addr = CE_state->ctrl_addr;
1619*5113495bSYour Name struct hif_softc *scn = CE_state->scn;
1620*5113495bSYour Name qdf_dma_addr_t wr_index_addr;
1621*5113495bSYour Name
1622*5113495bSYour Name wr_index_addr = shadow_sr_wr_ind_addr(scn, ctrl_addr);
1623*5113495bSYour Name return wr_index_addr;
1624*5113495bSYour Name }
1625*5113495bSYour Name #else
ce_ipa_get_wr_index_addr(struct CE_state * CE_state)1626*5113495bSYour Name static qdf_dma_addr_t ce_ipa_get_wr_index_addr(struct CE_state *CE_state)
1627*5113495bSYour Name {
1628*5113495bSYour Name struct hif_softc *scn = CE_state->scn;
1629*5113495bSYour Name qdf_dma_addr_t wr_index_addr;
1630*5113495bSYour Name
1631*5113495bSYour Name wr_index_addr = CE_BASE_ADDRESS(CE_state->id) +
1632*5113495bSYour Name SR_WR_INDEX_ADDRESS;
1633*5113495bSYour Name return wr_index_addr;
1634*5113495bSYour Name }
1635*5113495bSYour Name #endif
1636*5113495bSYour Name
1637*5113495bSYour Name /**
1638*5113495bSYour Name * ce_ipa_get_resource() - get uc resource on copyengine
1639*5113495bSYour Name * @ce: copyengine context
1640*5113495bSYour Name * @ce_sr: copyengine source ring resource info
1641*5113495bSYour Name * @ce_sr_ring_size: copyengine source ring size
1642*5113495bSYour Name * @ce_reg_paddr: copyengine register physical address
1643*5113495bSYour Name *
1644*5113495bSYour Name * Copy engine should release resource to micro controller
1645*5113495bSYour Name * Micro controller needs
1646*5113495bSYour Name * - Copy engine source descriptor base address
1647*5113495bSYour Name * - Copy engine source descriptor size
1648*5113495bSYour Name * - PCI BAR address to access copy engine register
1649*5113495bSYour Name *
1650*5113495bSYour Name * Return: None
1651*5113495bSYour Name */
ce_ipa_get_resource(struct CE_handle * ce,qdf_shared_mem_t ** ce_sr,uint32_t * ce_sr_ring_size,qdf_dma_addr_t * ce_reg_paddr)1652*5113495bSYour Name void ce_ipa_get_resource(struct CE_handle *ce,
1653*5113495bSYour Name qdf_shared_mem_t **ce_sr,
1654*5113495bSYour Name uint32_t *ce_sr_ring_size,
1655*5113495bSYour Name qdf_dma_addr_t *ce_reg_paddr)
1656*5113495bSYour Name {
1657*5113495bSYour Name struct CE_state *CE_state = (struct CE_state *)ce;
1658*5113495bSYour Name uint32_t ring_loop;
1659*5113495bSYour Name struct CE_src_desc *ce_desc;
1660*5113495bSYour Name qdf_dma_addr_t phy_mem_base;
1661*5113495bSYour Name struct hif_softc *scn = CE_state->scn;
1662*5113495bSYour Name
1663*5113495bSYour Name if (CE_UNUSED == CE_state->state) {
1664*5113495bSYour Name *qdf_mem_get_dma_addr_ptr(scn->qdf_dev,
1665*5113495bSYour Name &CE_state->scn->ipa_ce_ring->mem_info) = 0;
1666*5113495bSYour Name *ce_sr_ring_size = 0;
1667*5113495bSYour Name return;
1668*5113495bSYour Name }
1669*5113495bSYour Name
1670*5113495bSYour Name /* Update default value for descriptor */
1671*5113495bSYour Name for (ring_loop = 0; ring_loop < CE_state->src_ring->nentries;
1672*5113495bSYour Name ring_loop++) {
1673*5113495bSYour Name ce_desc = (struct CE_src_desc *)
1674*5113495bSYour Name ((char *)CE_state->src_ring->base_addr_owner_space +
1675*5113495bSYour Name ring_loop * (sizeof(struct CE_src_desc)));
1676*5113495bSYour Name CE_IPA_RING_INIT(ce_desc);
1677*5113495bSYour Name }
1678*5113495bSYour Name
1679*5113495bSYour Name /* Get BAR address */
1680*5113495bSYour Name hif_read_phy_mem_base(CE_state->scn, &phy_mem_base);
1681*5113495bSYour Name
1682*5113495bSYour Name *ce_sr = CE_state->scn->ipa_ce_ring;
1683*5113495bSYour Name *ce_sr_ring_size = (uint32_t)(CE_state->src_ring->nentries *
1684*5113495bSYour Name sizeof(struct CE_src_desc));
1685*5113495bSYour Name *ce_reg_paddr = phy_mem_base + ce_ipa_get_wr_index_addr(CE_state);
1686*5113495bSYour Name
1687*5113495bSYour Name }
1688*5113495bSYour Name
1689*5113495bSYour Name #endif /* IPA_OFFLOAD */
1690*5113495bSYour Name
1691*5113495bSYour Name #ifdef HIF_CE_DEBUG_DATA_BUF
1692*5113495bSYour Name /**
1693*5113495bSYour Name * hif_dump_desc_data_buf() - record ce descriptor events
1694*5113495bSYour Name * @buf: buffer to copy to
1695*5113495bSYour Name * @pos: Current position till which the buf is filled
1696*5113495bSYour Name * @data: Data to be copied
1697*5113495bSYour Name * @data_len: Length of the data to be copied
1698*5113495bSYour Name */
hif_dump_desc_data_buf(uint8_t * buf,ssize_t pos,uint8_t * data,uint32_t data_len)1699*5113495bSYour Name static uint32_t hif_dump_desc_data_buf(uint8_t *buf, ssize_t pos,
1700*5113495bSYour Name uint8_t *data, uint32_t data_len)
1701*5113495bSYour Name {
1702*5113495bSYour Name pos += snprintf(buf + pos, PAGE_SIZE - pos, "Data:(Max%dBytes)\n",
1703*5113495bSYour Name CE_DEBUG_MAX_DATA_BUF_SIZE);
1704*5113495bSYour Name
1705*5113495bSYour Name if ((data_len > 0) && data) {
1706*5113495bSYour Name if (data_len < 16) {
1707*5113495bSYour Name hex_dump_to_buffer(data,
1708*5113495bSYour Name CE_DEBUG_DATA_PER_ROW,
1709*5113495bSYour Name 16, 1, buf + pos,
1710*5113495bSYour Name (ssize_t)PAGE_SIZE - pos,
1711*5113495bSYour Name false);
1712*5113495bSYour Name pos += CE_DEBUG_PRINT_BUF_SIZE(data_len);
1713*5113495bSYour Name pos += snprintf(buf + pos, PAGE_SIZE - pos, "\n");
1714*5113495bSYour Name } else {
1715*5113495bSYour Name uint32_t rows = (data_len / 16) + 1;
1716*5113495bSYour Name uint32_t row = 0;
1717*5113495bSYour Name
1718*5113495bSYour Name for (row = 0; row < rows; row++) {
1719*5113495bSYour Name hex_dump_to_buffer(data + (row * 16),
1720*5113495bSYour Name CE_DEBUG_DATA_PER_ROW,
1721*5113495bSYour Name 16, 1, buf + pos,
1722*5113495bSYour Name (ssize_t)PAGE_SIZE
1723*5113495bSYour Name - pos, false);
1724*5113495bSYour Name pos +=
1725*5113495bSYour Name CE_DEBUG_PRINT_BUF_SIZE(CE_DEBUG_DATA_PER_ROW);
1726*5113495bSYour Name pos += snprintf(buf + pos, PAGE_SIZE - pos,
1727*5113495bSYour Name "\n");
1728*5113495bSYour Name }
1729*5113495bSYour Name }
1730*5113495bSYour Name }
1731*5113495bSYour Name
1732*5113495bSYour Name return pos;
1733*5113495bSYour Name }
1734*5113495bSYour Name #endif
1735*5113495bSYour Name
1736*5113495bSYour Name /*
1737*5113495bSYour Name * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
1738*5113495bSYour Name * for defined here
1739*5113495bSYour Name */
1740*5113495bSYour Name #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
ce_event_type_to_str(enum hif_ce_event_type type)1741*5113495bSYour Name static const char *ce_event_type_to_str(enum hif_ce_event_type type)
1742*5113495bSYour Name {
1743*5113495bSYour Name switch (type) {
1744*5113495bSYour Name case HIF_RX_DESC_POST:
1745*5113495bSYour Name return "HIF_RX_DESC_POST";
1746*5113495bSYour Name case HIF_RX_DESC_COMPLETION:
1747*5113495bSYour Name return "HIF_RX_DESC_COMPLETION";
1748*5113495bSYour Name case HIF_TX_GATHER_DESC_POST:
1749*5113495bSYour Name return "HIF_TX_GATHER_DESC_POST";
1750*5113495bSYour Name case HIF_TX_DESC_POST:
1751*5113495bSYour Name return "HIF_TX_DESC_POST";
1752*5113495bSYour Name case HIF_TX_DESC_SOFTWARE_POST:
1753*5113495bSYour Name return "HIF_TX_DESC_SOFTWARE_POST";
1754*5113495bSYour Name case HIF_TX_DESC_COMPLETION:
1755*5113495bSYour Name return "HIF_TX_DESC_COMPLETION";
1756*5113495bSYour Name case FAST_RX_WRITE_INDEX_UPDATE:
1757*5113495bSYour Name return "FAST_RX_WRITE_INDEX_UPDATE";
1758*5113495bSYour Name case FAST_RX_SOFTWARE_INDEX_UPDATE:
1759*5113495bSYour Name return "FAST_RX_SOFTWARE_INDEX_UPDATE";
1760*5113495bSYour Name case FAST_TX_WRITE_INDEX_UPDATE:
1761*5113495bSYour Name return "FAST_TX_WRITE_INDEX_UPDATE";
1762*5113495bSYour Name case FAST_TX_WRITE_INDEX_SOFTWARE_UPDATE:
1763*5113495bSYour Name return "FAST_TX_WRITE_INDEX_SOFTWARE_UPDATE";
1764*5113495bSYour Name case FAST_TX_SOFTWARE_INDEX_UPDATE:
1765*5113495bSYour Name return "FAST_TX_SOFTWARE_INDEX_UPDATE";
1766*5113495bSYour Name case RESUME_WRITE_INDEX_UPDATE:
1767*5113495bSYour Name return "RESUME_WRITE_INDEX_UPDATE";
1768*5113495bSYour Name case HIF_IRQ_EVENT:
1769*5113495bSYour Name return "HIF_IRQ_EVENT";
1770*5113495bSYour Name case HIF_CE_TASKLET_ENTRY:
1771*5113495bSYour Name return "HIF_CE_TASKLET_ENTRY";
1772*5113495bSYour Name case HIF_CE_TASKLET_RESCHEDULE:
1773*5113495bSYour Name return "HIF_CE_TASKLET_RESCHEDULE";
1774*5113495bSYour Name case HIF_CE_TASKLET_EXIT:
1775*5113495bSYour Name return "HIF_CE_TASKLET_EXIT";
1776*5113495bSYour Name case HIF_CE_REAP_ENTRY:
1777*5113495bSYour Name return "HIF_CE_REAP_ENTRY";
1778*5113495bSYour Name case HIF_CE_REAP_EXIT:
1779*5113495bSYour Name return "HIF_CE_REAP_EXIT";
1780*5113495bSYour Name case NAPI_SCHEDULE:
1781*5113495bSYour Name return "NAPI_SCHEDULE";
1782*5113495bSYour Name case NAPI_POLL_ENTER:
1783*5113495bSYour Name return "NAPI_POLL_ENTER";
1784*5113495bSYour Name case NAPI_COMPLETE:
1785*5113495bSYour Name return "NAPI_COMPLETE";
1786*5113495bSYour Name case NAPI_POLL_EXIT:
1787*5113495bSYour Name return "NAPI_POLL_EXIT";
1788*5113495bSYour Name case HIF_RX_NBUF_ALLOC_FAILURE:
1789*5113495bSYour Name return "HIF_RX_NBUF_ALLOC_FAILURE";
1790*5113495bSYour Name case HIF_RX_NBUF_MAP_FAILURE:
1791*5113495bSYour Name return "HIF_RX_NBUF_MAP_FAILURE";
1792*5113495bSYour Name case HIF_RX_NBUF_ENQUEUE_FAILURE:
1793*5113495bSYour Name return "HIF_RX_NBUF_ENQUEUE_FAILURE";
1794*5113495bSYour Name default:
1795*5113495bSYour Name return "invalid";
1796*5113495bSYour Name }
1797*5113495bSYour Name }
1798*5113495bSYour Name
1799*5113495bSYour Name /**
1800*5113495bSYour Name * hif_dump_desc_event() - record ce descriptor events
1801*5113495bSYour Name * @scn: HIF context
1802*5113495bSYour Name * @buf: Buffer to which to be copied
1803*5113495bSYour Name */
hif_dump_desc_event(struct hif_softc * scn,char * buf)1804*5113495bSYour Name ssize_t hif_dump_desc_event(struct hif_softc *scn, char *buf)
1805*5113495bSYour Name {
1806*5113495bSYour Name struct hif_ce_desc_event *event;
1807*5113495bSYour Name uint64_t secs, usecs;
1808*5113495bSYour Name ssize_t len = 0;
1809*5113495bSYour Name struct ce_desc_hist *ce_hist = NULL;
1810*5113495bSYour Name struct hif_ce_desc_event *hist_ev = NULL;
1811*5113495bSYour Name
1812*5113495bSYour Name if (!scn)
1813*5113495bSYour Name return -EINVAL;
1814*5113495bSYour Name
1815*5113495bSYour Name ce_hist = &scn->hif_ce_desc_hist;
1816*5113495bSYour Name
1817*5113495bSYour Name if (ce_hist->hist_id >= CE_COUNT_MAX ||
1818*5113495bSYour Name ce_hist->hist_index >= HIF_CE_HISTORY_MAX) {
1819*5113495bSYour Name qdf_print("Invalid values");
1820*5113495bSYour Name return -EINVAL;
1821*5113495bSYour Name }
1822*5113495bSYour Name
1823*5113495bSYour Name hist_ev =
1824*5113495bSYour Name (struct hif_ce_desc_event *)ce_hist->hist_ev[ce_hist->hist_id];
1825*5113495bSYour Name
1826*5113495bSYour Name if (!hist_ev) {
1827*5113495bSYour Name qdf_print("Low Memory");
1828*5113495bSYour Name return -EINVAL;
1829*5113495bSYour Name }
1830*5113495bSYour Name
1831*5113495bSYour Name event = &hist_ev[ce_hist->hist_index];
1832*5113495bSYour Name
1833*5113495bSYour Name qdf_log_timestamp_to_secs(event->time, &secs, &usecs);
1834*5113495bSYour Name
1835*5113495bSYour Name len += snprintf(buf, PAGE_SIZE - len,
1836*5113495bSYour Name "\nTime:%lld.%06lld, CE:%d, EventType: %s, EventIndex: %d\nDataAddr=%pK",
1837*5113495bSYour Name secs, usecs, ce_hist->hist_id,
1838*5113495bSYour Name ce_event_type_to_str(event->type),
1839*5113495bSYour Name event->index, event->memory);
1840*5113495bSYour Name #ifdef HIF_CE_DEBUG_DATA_BUF
1841*5113495bSYour Name len += snprintf(buf + len, PAGE_SIZE - len, ", Data len=%zu",
1842*5113495bSYour Name event->actual_data_len);
1843*5113495bSYour Name #endif
1844*5113495bSYour Name
1845*5113495bSYour Name len += snprintf(buf + len, PAGE_SIZE - len, "\nCE descriptor: ");
1846*5113495bSYour Name
1847*5113495bSYour Name hex_dump_to_buffer(&event->descriptor, sizeof(union ce_desc),
1848*5113495bSYour Name 16, 1, buf + len,
1849*5113495bSYour Name (ssize_t)PAGE_SIZE - len, false);
1850*5113495bSYour Name len += CE_DEBUG_PRINT_BUF_SIZE(sizeof(union ce_desc));
1851*5113495bSYour Name len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1852*5113495bSYour Name
1853*5113495bSYour Name #ifdef HIF_CE_DEBUG_DATA_BUF
1854*5113495bSYour Name if (ce_hist->data_enable[ce_hist->hist_id])
1855*5113495bSYour Name len = hif_dump_desc_data_buf(buf, len, event->data,
1856*5113495bSYour Name (event->actual_data_len <
1857*5113495bSYour Name CE_DEBUG_MAX_DATA_BUF_SIZE) ?
1858*5113495bSYour Name event->actual_data_len :
1859*5113495bSYour Name CE_DEBUG_MAX_DATA_BUF_SIZE);
1860*5113495bSYour Name #endif /*HIF_CE_DEBUG_DATA_BUF*/
1861*5113495bSYour Name
1862*5113495bSYour Name len += snprintf(buf + len, PAGE_SIZE - len, "END\n");
1863*5113495bSYour Name
1864*5113495bSYour Name return len;
1865*5113495bSYour Name }
1866*5113495bSYour Name
1867*5113495bSYour Name /*
1868*5113495bSYour Name * hif_store_desc_trace_buf_index() -
1869*5113495bSYour Name * API to get the CE id and CE debug storage buffer index
1870*5113495bSYour Name *
1871*5113495bSYour Name * @dev: network device
1872*5113495bSYour Name * @attr: sysfs attribute
1873*5113495bSYour Name * @buf: data got from the user
1874*5113495bSYour Name *
1875*5113495bSYour Name * Return total length
1876*5113495bSYour Name */
hif_input_desc_trace_buf_index(struct hif_softc * scn,const char * buf,size_t size)1877*5113495bSYour Name ssize_t hif_input_desc_trace_buf_index(struct hif_softc *scn,
1878*5113495bSYour Name const char *buf, size_t size)
1879*5113495bSYour Name {
1880*5113495bSYour Name struct ce_desc_hist *ce_hist = NULL;
1881*5113495bSYour Name
1882*5113495bSYour Name if (!scn)
1883*5113495bSYour Name return -EINVAL;
1884*5113495bSYour Name
1885*5113495bSYour Name ce_hist = &scn->hif_ce_desc_hist;
1886*5113495bSYour Name
1887*5113495bSYour Name if (!size) {
1888*5113495bSYour Name qdf_nofl_err("%s: Invalid input buffer.", __func__);
1889*5113495bSYour Name return -EINVAL;
1890*5113495bSYour Name }
1891*5113495bSYour Name
1892*5113495bSYour Name if (sscanf(buf, "%u %u", (unsigned int *)&ce_hist->hist_id,
1893*5113495bSYour Name (unsigned int *)&ce_hist->hist_index) != 2) {
1894*5113495bSYour Name qdf_nofl_err("%s: Invalid input value.", __func__);
1895*5113495bSYour Name return -EINVAL;
1896*5113495bSYour Name }
1897*5113495bSYour Name if ((ce_hist->hist_id >= CE_COUNT_MAX) ||
1898*5113495bSYour Name (ce_hist->hist_index >= HIF_CE_HISTORY_MAX)) {
1899*5113495bSYour Name qdf_print("Invalid values");
1900*5113495bSYour Name return -EINVAL;
1901*5113495bSYour Name }
1902*5113495bSYour Name
1903*5113495bSYour Name return size;
1904*5113495bSYour Name }
1905*5113495bSYour Name
1906*5113495bSYour Name #endif /*defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF) */
1907*5113495bSYour Name
1908*5113495bSYour Name #ifdef HIF_CE_DEBUG_DATA_BUF
1909*5113495bSYour Name /*
1910*5113495bSYour Name * hif_ce_en_desc_hist() -
1911*5113495bSYour Name * API to enable recording the CE desc history
1912*5113495bSYour Name *
1913*5113495bSYour Name * @dev: network device
1914*5113495bSYour Name * @attr: sysfs attribute
1915*5113495bSYour Name * @buf: buffer to copy the data.
1916*5113495bSYour Name *
1917*5113495bSYour Name * Starts recording the ce desc history
1918*5113495bSYour Name *
1919*5113495bSYour Name * Return total length copied
1920*5113495bSYour Name */
hif_ce_en_desc_hist(struct hif_softc * scn,const char * buf,size_t size)1921*5113495bSYour Name ssize_t hif_ce_en_desc_hist(struct hif_softc *scn, const char *buf, size_t size)
1922*5113495bSYour Name {
1923*5113495bSYour Name struct ce_desc_hist *ce_hist = NULL;
1924*5113495bSYour Name uint32_t cfg = 0;
1925*5113495bSYour Name uint32_t ce_id = 0;
1926*5113495bSYour Name
1927*5113495bSYour Name if (!scn)
1928*5113495bSYour Name return -EINVAL;
1929*5113495bSYour Name
1930*5113495bSYour Name ce_hist = &scn->hif_ce_desc_hist;
1931*5113495bSYour Name
1932*5113495bSYour Name if (!size) {
1933*5113495bSYour Name qdf_nofl_err("%s: Invalid input buffer.", __func__);
1934*5113495bSYour Name return -EINVAL;
1935*5113495bSYour Name }
1936*5113495bSYour Name
1937*5113495bSYour Name if (sscanf(buf, "%u %u", (unsigned int *)&ce_id,
1938*5113495bSYour Name (unsigned int *)&cfg) != 2) {
1939*5113495bSYour Name qdf_nofl_err("%s: Invalid input: Enter CE Id<sp><1/0>.",
1940*5113495bSYour Name __func__);
1941*5113495bSYour Name return -EINVAL;
1942*5113495bSYour Name }
1943*5113495bSYour Name if (ce_id >= CE_COUNT_MAX) {
1944*5113495bSYour Name qdf_print("Invalid value CE Id");
1945*5113495bSYour Name return -EINVAL;
1946*5113495bSYour Name }
1947*5113495bSYour Name
1948*5113495bSYour Name if ((cfg > 1 || cfg < 0)) {
1949*5113495bSYour Name qdf_print("Invalid values: enter 0 or 1");
1950*5113495bSYour Name return -EINVAL;
1951*5113495bSYour Name }
1952*5113495bSYour Name
1953*5113495bSYour Name if (!ce_hist->hist_ev[ce_id])
1954*5113495bSYour Name return -EINVAL;
1955*5113495bSYour Name
1956*5113495bSYour Name qdf_mutex_acquire(&ce_hist->ce_dbg_datamem_lock[ce_id]);
1957*5113495bSYour Name if (cfg == 1) {
1958*5113495bSYour Name if (ce_hist->data_enable[ce_id] == 1) {
1959*5113495bSYour Name qdf_debug("Already Enabled");
1960*5113495bSYour Name } else {
1961*5113495bSYour Name if (alloc_mem_ce_debug_hist_data(scn, ce_id)
1962*5113495bSYour Name == QDF_STATUS_E_NOMEM){
1963*5113495bSYour Name ce_hist->data_enable[ce_id] = 0;
1964*5113495bSYour Name qdf_err("%s:Memory Alloc failed", __func__);
1965*5113495bSYour Name } else
1966*5113495bSYour Name ce_hist->data_enable[ce_id] = 1;
1967*5113495bSYour Name }
1968*5113495bSYour Name } else if (cfg == 0) {
1969*5113495bSYour Name if (ce_hist->data_enable[ce_id] == 0) {
1970*5113495bSYour Name qdf_debug("Already Disabled");
1971*5113495bSYour Name } else {
1972*5113495bSYour Name ce_hist->data_enable[ce_id] = 0;
1973*5113495bSYour Name free_mem_ce_debug_hist_data(scn, ce_id);
1974*5113495bSYour Name }
1975*5113495bSYour Name }
1976*5113495bSYour Name qdf_mutex_release(&ce_hist->ce_dbg_datamem_lock[ce_id]);
1977*5113495bSYour Name
1978*5113495bSYour Name return size;
1979*5113495bSYour Name }
1980*5113495bSYour Name
1981*5113495bSYour Name /*
1982*5113495bSYour Name * hif_disp_ce_enable_desc_data_hist() -
1983*5113495bSYour Name * API to display value of data_enable
1984*5113495bSYour Name *
1985*5113495bSYour Name * @dev: network device
1986*5113495bSYour Name * @attr: sysfs attribute
1987*5113495bSYour Name * @buf: buffer to copy the data.
1988*5113495bSYour Name *
1989*5113495bSYour Name * Return total length copied
1990*5113495bSYour Name */
hif_disp_ce_enable_desc_data_hist(struct hif_softc * scn,char * buf)1991*5113495bSYour Name ssize_t hif_disp_ce_enable_desc_data_hist(struct hif_softc *scn, char *buf)
1992*5113495bSYour Name {
1993*5113495bSYour Name ssize_t len = 0;
1994*5113495bSYour Name uint32_t ce_id = 0;
1995*5113495bSYour Name struct ce_desc_hist *ce_hist = NULL;
1996*5113495bSYour Name
1997*5113495bSYour Name if (!scn)
1998*5113495bSYour Name return -EINVAL;
1999*5113495bSYour Name
2000*5113495bSYour Name ce_hist = &scn->hif_ce_desc_hist;
2001*5113495bSYour Name
2002*5113495bSYour Name for (ce_id = 0; ce_id < CE_COUNT_MAX; ce_id++) {
2003*5113495bSYour Name len += snprintf(buf + len, PAGE_SIZE - len, " CE%d: %d\n",
2004*5113495bSYour Name ce_id, ce_hist->data_enable[ce_id]);
2005*5113495bSYour Name }
2006*5113495bSYour Name
2007*5113495bSYour Name return len;
2008*5113495bSYour Name }
2009*5113495bSYour Name #endif /* HIF_CE_DEBUG_DATA_BUF */
2010*5113495bSYour Name
2011*5113495bSYour Name #ifdef OL_ATH_SMART_LOGGING
2012*5113495bSYour Name #define GUARD_SPACE 10
2013*5113495bSYour Name #define LOG_ID_SZ 4
2014*5113495bSYour Name /*
2015*5113495bSYour Name * hif_log_src_ce_dump() - Copy all the CE SRC ring to buf
2016*5113495bSYour Name * @src_ring: SRC ring state
2017*5113495bSYour Name * @buf_cur: Current pointer in ring buffer
2018*5113495bSYour Name * @buf_init:Start of the ring buffer
2019*5113495bSYour Name * @buf_sz: Size of the ring buffer
2020*5113495bSYour Name * @skb_sz: Max size of the SKB buffer to be copied
2021*5113495bSYour Name *
2022*5113495bSYour Name * Dumps all the CE SRC ring descriptors and buffers pointed by them in to
2023*5113495bSYour Name * the given buf, skb_sz is the max buffer size to be copied
2024*5113495bSYour Name *
2025*5113495bSYour Name * Return: Current pointer in ring buffer
2026*5113495bSYour Name */
hif_log_src_ce_dump(struct CE_ring_state * src_ring,uint8_t * buf_cur,uint8_t * buf_init,uint32_t buf_sz,uint32_t skb_sz)2027*5113495bSYour Name static uint8_t *hif_log_src_ce_dump(struct CE_ring_state *src_ring,
2028*5113495bSYour Name uint8_t *buf_cur, uint8_t *buf_init,
2029*5113495bSYour Name uint32_t buf_sz, uint32_t skb_sz)
2030*5113495bSYour Name {
2031*5113495bSYour Name struct CE_src_desc *src_ring_base;
2032*5113495bSYour Name uint32_t len, entry;
2033*5113495bSYour Name struct CE_src_desc *src_desc;
2034*5113495bSYour Name qdf_nbuf_t nbuf;
2035*5113495bSYour Name uint32_t available_buf;
2036*5113495bSYour Name
2037*5113495bSYour Name src_ring_base = (struct CE_src_desc *)src_ring->base_addr_owner_space;
2038*5113495bSYour Name len = sizeof(struct CE_ring_state);
2039*5113495bSYour Name available_buf = buf_sz - (buf_cur - buf_init);
2040*5113495bSYour Name if (available_buf < (len + GUARD_SPACE)) {
2041*5113495bSYour Name buf_cur = buf_init;
2042*5113495bSYour Name }
2043*5113495bSYour Name
2044*5113495bSYour Name qdf_mem_copy(buf_cur, src_ring, sizeof(struct CE_ring_state));
2045*5113495bSYour Name buf_cur += sizeof(struct CE_ring_state);
2046*5113495bSYour Name
2047*5113495bSYour Name for (entry = 0; entry < src_ring->nentries; entry++) {
2048*5113495bSYour Name src_desc = CE_SRC_RING_TO_DESC(src_ring_base, entry);
2049*5113495bSYour Name nbuf = src_ring->per_transfer_context[entry];
2050*5113495bSYour Name if (nbuf) {
2051*5113495bSYour Name uint32_t skb_len = qdf_nbuf_len(nbuf);
2052*5113495bSYour Name uint32_t skb_cp_len = qdf_min(skb_len, skb_sz);
2053*5113495bSYour Name
2054*5113495bSYour Name len = sizeof(struct CE_src_desc) + skb_cp_len
2055*5113495bSYour Name + LOG_ID_SZ + sizeof(skb_cp_len);
2056*5113495bSYour Name available_buf = buf_sz - (buf_cur - buf_init);
2057*5113495bSYour Name if (available_buf < (len + GUARD_SPACE)) {
2058*5113495bSYour Name buf_cur = buf_init;
2059*5113495bSYour Name }
2060*5113495bSYour Name qdf_mem_copy(buf_cur, src_desc,
2061*5113495bSYour Name sizeof(struct CE_src_desc));
2062*5113495bSYour Name buf_cur += sizeof(struct CE_src_desc);
2063*5113495bSYour Name
2064*5113495bSYour Name available_buf = buf_sz - (buf_cur - buf_init);
2065*5113495bSYour Name buf_cur += snprintf(buf_cur, available_buf, "SKB%d",
2066*5113495bSYour Name skb_cp_len);
2067*5113495bSYour Name
2068*5113495bSYour Name if (skb_cp_len) {
2069*5113495bSYour Name qdf_mem_copy(buf_cur, qdf_nbuf_data(nbuf),
2070*5113495bSYour Name skb_cp_len);
2071*5113495bSYour Name buf_cur += skb_cp_len;
2072*5113495bSYour Name }
2073*5113495bSYour Name } else {
2074*5113495bSYour Name len = sizeof(struct CE_src_desc) + LOG_ID_SZ;
2075*5113495bSYour Name available_buf = buf_sz - (buf_cur - buf_init);
2076*5113495bSYour Name if (available_buf < (len + GUARD_SPACE)) {
2077*5113495bSYour Name buf_cur = buf_init;
2078*5113495bSYour Name }
2079*5113495bSYour Name qdf_mem_copy(buf_cur, src_desc,
2080*5113495bSYour Name sizeof(struct CE_src_desc));
2081*5113495bSYour Name buf_cur += sizeof(struct CE_src_desc);
2082*5113495bSYour Name available_buf = buf_sz - (buf_cur - buf_init);
2083*5113495bSYour Name buf_cur += snprintf(buf_cur, available_buf, "NUL");
2084*5113495bSYour Name }
2085*5113495bSYour Name }
2086*5113495bSYour Name
2087*5113495bSYour Name return buf_cur;
2088*5113495bSYour Name }
2089*5113495bSYour Name
2090*5113495bSYour Name /*
2091*5113495bSYour Name * hif_log_dest_ce_dump() - Copy all the CE DEST ring to buf
2092*5113495bSYour Name * @dest_ring: SRC ring state
2093*5113495bSYour Name * @buf_cur: Current pointer in ring buffer
2094*5113495bSYour Name * @buf_init:Start of the ring buffer
2095*5113495bSYour Name * @buf_sz: Size of the ring buffer
2096*5113495bSYour Name * @skb_sz: Max size of the SKB buffer to be copied
2097*5113495bSYour Name *
2098*5113495bSYour Name * Dumps all the CE SRC ring descriptors and buffers pointed by them in to
2099*5113495bSYour Name * the given buf, skb_sz is the max buffer size to be copied
2100*5113495bSYour Name *
2101*5113495bSYour Name * Return: Current pointer in ring buffer
2102*5113495bSYour Name */
hif_log_dest_ce_dump(struct CE_ring_state * dest_ring,uint8_t * buf_cur,uint8_t * buf_init,uint32_t buf_sz,uint32_t skb_sz)2103*5113495bSYour Name static uint8_t *hif_log_dest_ce_dump(struct CE_ring_state *dest_ring,
2104*5113495bSYour Name uint8_t *buf_cur, uint8_t *buf_init,
2105*5113495bSYour Name uint32_t buf_sz, uint32_t skb_sz)
2106*5113495bSYour Name {
2107*5113495bSYour Name struct CE_dest_desc *dest_ring_base;
2108*5113495bSYour Name uint32_t len, entry;
2109*5113495bSYour Name struct CE_dest_desc *dest_desc;
2110*5113495bSYour Name qdf_nbuf_t nbuf;
2111*5113495bSYour Name uint32_t available_buf;
2112*5113495bSYour Name
2113*5113495bSYour Name dest_ring_base =
2114*5113495bSYour Name (struct CE_dest_desc *)dest_ring->base_addr_owner_space;
2115*5113495bSYour Name
2116*5113495bSYour Name len = sizeof(struct CE_ring_state);
2117*5113495bSYour Name available_buf = buf_sz - (buf_cur - buf_init);
2118*5113495bSYour Name if (available_buf < (len + GUARD_SPACE)) {
2119*5113495bSYour Name buf_cur = buf_init;
2120*5113495bSYour Name }
2121*5113495bSYour Name
2122*5113495bSYour Name qdf_mem_copy(buf_cur, dest_ring, sizeof(struct CE_ring_state));
2123*5113495bSYour Name buf_cur += sizeof(struct CE_ring_state);
2124*5113495bSYour Name
2125*5113495bSYour Name for (entry = 0; entry < dest_ring->nentries; entry++) {
2126*5113495bSYour Name dest_desc = CE_DEST_RING_TO_DESC(dest_ring_base, entry);
2127*5113495bSYour Name
2128*5113495bSYour Name nbuf = dest_ring->per_transfer_context[entry];
2129*5113495bSYour Name if (nbuf) {
2130*5113495bSYour Name uint32_t skb_len = qdf_nbuf_len(nbuf);
2131*5113495bSYour Name uint32_t skb_cp_len = qdf_min(skb_len, skb_sz);
2132*5113495bSYour Name
2133*5113495bSYour Name len = sizeof(struct CE_dest_desc) + skb_cp_len
2134*5113495bSYour Name + LOG_ID_SZ + sizeof(skb_cp_len);
2135*5113495bSYour Name
2136*5113495bSYour Name available_buf = buf_sz - (buf_cur - buf_init);
2137*5113495bSYour Name if (available_buf < (len + GUARD_SPACE)) {
2138*5113495bSYour Name buf_cur = buf_init;
2139*5113495bSYour Name }
2140*5113495bSYour Name
2141*5113495bSYour Name qdf_mem_copy(buf_cur, dest_desc,
2142*5113495bSYour Name sizeof(struct CE_dest_desc));
2143*5113495bSYour Name buf_cur += sizeof(struct CE_dest_desc);
2144*5113495bSYour Name available_buf = buf_sz - (buf_cur - buf_init);
2145*5113495bSYour Name buf_cur += snprintf(buf_cur, available_buf, "SKB%d",
2146*5113495bSYour Name skb_cp_len);
2147*5113495bSYour Name if (skb_cp_len) {
2148*5113495bSYour Name qdf_mem_copy(buf_cur, qdf_nbuf_data(nbuf),
2149*5113495bSYour Name skb_cp_len);
2150*5113495bSYour Name buf_cur += skb_cp_len;
2151*5113495bSYour Name }
2152*5113495bSYour Name } else {
2153*5113495bSYour Name len = sizeof(struct CE_dest_desc) + LOG_ID_SZ;
2154*5113495bSYour Name available_buf = buf_sz - (buf_cur - buf_init);
2155*5113495bSYour Name if (available_buf < (len + GUARD_SPACE)) {
2156*5113495bSYour Name buf_cur = buf_init;
2157*5113495bSYour Name }
2158*5113495bSYour Name qdf_mem_copy(buf_cur, dest_desc,
2159*5113495bSYour Name sizeof(struct CE_dest_desc));
2160*5113495bSYour Name buf_cur += sizeof(struct CE_dest_desc);
2161*5113495bSYour Name available_buf = buf_sz - (buf_cur - buf_init);
2162*5113495bSYour Name buf_cur += snprintf(buf_cur, available_buf, "NUL");
2163*5113495bSYour Name }
2164*5113495bSYour Name }
2165*5113495bSYour Name return buf_cur;
2166*5113495bSYour Name }
2167*5113495bSYour Name
2168*5113495bSYour Name /**
2169*5113495bSYour Name * hif_log_dump_ce() - Copy all the CE DEST ring to buf
2170*5113495bSYour Name * @scn:
2171*5113495bSYour Name * @buf_cur:
2172*5113495bSYour Name * @buf_init:
2173*5113495bSYour Name * @buf_sz:
2174*5113495bSYour Name * @ce:
2175*5113495bSYour Name * @skb_sz:
2176*5113495bSYour Name *
2177*5113495bSYour Name * Calls the respective function to dump all the CE SRC/DEST ring descriptors
2178*5113495bSYour Name * and buffers pointed by them in to the given buf
2179*5113495bSYour Name */
hif_log_dump_ce(struct hif_softc * scn,uint8_t * buf_cur,uint8_t * buf_init,uint32_t buf_sz,uint32_t ce,uint32_t skb_sz)2180*5113495bSYour Name uint8_t *hif_log_dump_ce(struct hif_softc *scn, uint8_t *buf_cur,
2181*5113495bSYour Name uint8_t *buf_init, uint32_t buf_sz,
2182*5113495bSYour Name uint32_t ce, uint32_t skb_sz)
2183*5113495bSYour Name {
2184*5113495bSYour Name struct CE_state *ce_state;
2185*5113495bSYour Name struct CE_ring_state *src_ring;
2186*5113495bSYour Name struct CE_ring_state *dest_ring;
2187*5113495bSYour Name
2188*5113495bSYour Name ce_state = scn->ce_id_to_state[ce];
2189*5113495bSYour Name src_ring = ce_state->src_ring;
2190*5113495bSYour Name dest_ring = ce_state->dest_ring;
2191*5113495bSYour Name
2192*5113495bSYour Name if (src_ring) {
2193*5113495bSYour Name buf_cur = hif_log_src_ce_dump(src_ring, buf_cur,
2194*5113495bSYour Name buf_init, buf_sz, skb_sz);
2195*5113495bSYour Name } else if (dest_ring) {
2196*5113495bSYour Name buf_cur = hif_log_dest_ce_dump(dest_ring, buf_cur,
2197*5113495bSYour Name buf_init, buf_sz, skb_sz);
2198*5113495bSYour Name }
2199*5113495bSYour Name
2200*5113495bSYour Name return buf_cur;
2201*5113495bSYour Name }
2202*5113495bSYour Name
2203*5113495bSYour Name qdf_export_symbol(hif_log_dump_ce);
2204*5113495bSYour Name #endif /* OL_ATH_SMART_LOGGING */
2205*5113495bSYour Name
2206