xref: /wlan-driver/qca-wifi-host-cmn/hif/src/hif_exec.c (revision 5113495b16420b49004c444715d2daae2066e7dc)
1*5113495bSYour Name /*
2*5113495bSYour Name  * Copyright (c) 2017-2021 The Linux Foundation. All rights reserved.
3*5113495bSYour Name  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4*5113495bSYour Name  *
5*5113495bSYour Name  * Permission to use, copy, modify, and/or distribute this software for
6*5113495bSYour Name  * any purpose with or without fee is hereby granted, provided that the
7*5113495bSYour Name  * above copyright notice and this permission notice appear in all
8*5113495bSYour Name  * copies.
9*5113495bSYour Name  *
10*5113495bSYour Name  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11*5113495bSYour Name  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12*5113495bSYour Name  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13*5113495bSYour Name  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14*5113495bSYour Name  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15*5113495bSYour Name  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16*5113495bSYour Name  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17*5113495bSYour Name  * PERFORMANCE OF THIS SOFTWARE.
18*5113495bSYour Name  */
19*5113495bSYour Name 
20*5113495bSYour Name #include <hif_exec.h>
21*5113495bSYour Name #include <ce_main.h>
22*5113495bSYour Name #include "qdf_module.h"
23*5113495bSYour Name #include "qdf_net_if.h"
24*5113495bSYour Name #include <pld_common.h>
25*5113495bSYour Name #ifdef DP_UMAC_HW_RESET_SUPPORT
26*5113495bSYour Name #include "if_pci.h"
27*5113495bSYour Name #endif
28*5113495bSYour Name #include "qdf_ssr_driver_dump.h"
29*5113495bSYour Name 
30*5113495bSYour Name /* mapping NAPI budget 0 to internal budget 0
31*5113495bSYour Name  * NAPI budget 1 to internal budget [1,scaler -1]
32*5113495bSYour Name  * NAPI budget 2 to internal budget [scaler, 2 * scaler - 1], etc
33*5113495bSYour Name  */
34*5113495bSYour Name #define NAPI_BUDGET_TO_INTERNAL_BUDGET(n, s) \
35*5113495bSYour Name 	(((n) << (s)) - 1)
36*5113495bSYour Name #define INTERNAL_BUDGET_TO_NAPI_BUDGET(n, s) \
37*5113495bSYour Name 	(((n) + 1) >> (s))
38*5113495bSYour Name 
39*5113495bSYour Name static struct hif_exec_context *hif_exec_tasklet_create(void);
40*5113495bSYour Name 
41*5113495bSYour Name #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
42*5113495bSYour Name struct hif_event_history hif_event_desc_history[HIF_NUM_INT_CONTEXTS];
43*5113495bSYour Name uint32_t hif_event_hist_max = HIF_EVENT_HIST_MAX;
44*5113495bSYour Name 
hif_desc_history_log_register(void)45*5113495bSYour Name void hif_desc_history_log_register(void)
46*5113495bSYour Name {
47*5113495bSYour Name 	qdf_ssr_driver_dump_register_region("hif_event_history",
48*5113495bSYour Name 					    hif_event_desc_history,
49*5113495bSYour Name 					    sizeof(hif_event_desc_history));
50*5113495bSYour Name 	qdf_ssr_driver_dump_register_region("hif_event_hist_max",
51*5113495bSYour Name 					    &hif_event_hist_max,
52*5113495bSYour Name 					    sizeof(hif_event_hist_max));
53*5113495bSYour Name }
54*5113495bSYour Name 
hif_desc_history_log_unregister(void)55*5113495bSYour Name void hif_desc_history_log_unregister(void)
56*5113495bSYour Name {
57*5113495bSYour Name 	qdf_ssr_driver_dump_unregister_region("hif_event_hist_max");
58*5113495bSYour Name 	qdf_ssr_driver_dump_unregister_region("hif_event_history");
59*5113495bSYour Name }
60*5113495bSYour Name 
61*5113495bSYour Name static inline
hif_get_next_record_index(qdf_atomic_t * table_index,int array_size)62*5113495bSYour Name int hif_get_next_record_index(qdf_atomic_t *table_index,
63*5113495bSYour Name 			      int array_size)
64*5113495bSYour Name {
65*5113495bSYour Name 	int record_index = qdf_atomic_inc_return(table_index);
66*5113495bSYour Name 
67*5113495bSYour Name 	return record_index & (array_size - 1);
68*5113495bSYour Name }
69*5113495bSYour Name 
70*5113495bSYour Name /**
71*5113495bSYour Name  * hif_hist_is_prev_record() - Check if index is the immediate
72*5113495bSYour Name  *  previous record wrt curr_index
73*5113495bSYour Name  * @curr_index: curr index in the event history
74*5113495bSYour Name  * @index: index to be checked
75*5113495bSYour Name  * @hist_size: history size
76*5113495bSYour Name  *
77*5113495bSYour Name  * Return: true if index is immediately behind curr_index else false
78*5113495bSYour Name  */
79*5113495bSYour Name static inline
hif_hist_is_prev_record(int32_t curr_index,int32_t index,uint32_t hist_size)80*5113495bSYour Name bool hif_hist_is_prev_record(int32_t curr_index, int32_t index,
81*5113495bSYour Name 			     uint32_t hist_size)
82*5113495bSYour Name {
83*5113495bSYour Name 	return (((index + 1) & (hist_size - 1)) == curr_index) ?
84*5113495bSYour Name 			true : false;
85*5113495bSYour Name }
86*5113495bSYour Name 
87*5113495bSYour Name /**
88*5113495bSYour Name  * hif_hist_skip_event_record() - Check if current event needs to be
89*5113495bSYour Name  *  recorded or not
90*5113495bSYour Name  * @hist_ev: HIF event history
91*5113495bSYour Name  * @event: DP event entry
92*5113495bSYour Name  *
93*5113495bSYour Name  * Return: true if current event needs to be skipped else false
94*5113495bSYour Name  */
95*5113495bSYour Name static bool
hif_hist_skip_event_record(struct hif_event_history * hist_ev,struct hif_event_record * event)96*5113495bSYour Name hif_hist_skip_event_record(struct hif_event_history *hist_ev,
97*5113495bSYour Name 			   struct hif_event_record *event)
98*5113495bSYour Name {
99*5113495bSYour Name 	struct hif_event_record *rec;
100*5113495bSYour Name 	struct hif_event_record *last_irq_rec;
101*5113495bSYour Name 	int32_t index;
102*5113495bSYour Name 
103*5113495bSYour Name 	index = qdf_atomic_read(&hist_ev->index);
104*5113495bSYour Name 	if (index < 0)
105*5113495bSYour Name 		return false;
106*5113495bSYour Name 
107*5113495bSYour Name 	index &= (HIF_EVENT_HIST_MAX - 1);
108*5113495bSYour Name 	rec = &hist_ev->event[index];
109*5113495bSYour Name 
110*5113495bSYour Name 	switch (event->type) {
111*5113495bSYour Name 	case HIF_EVENT_IRQ_TRIGGER:
112*5113495bSYour Name 		/*
113*5113495bSYour Name 		 * The prev record check is to prevent skipping the IRQ event
114*5113495bSYour Name 		 * record in case where BH got re-scheduled due to force_break
115*5113495bSYour Name 		 * but there are no entries to be reaped in the rings.
116*5113495bSYour Name 		 */
117*5113495bSYour Name 		if (rec->type == HIF_EVENT_BH_SCHED &&
118*5113495bSYour Name 		    hif_hist_is_prev_record(index,
119*5113495bSYour Name 					    hist_ev->misc.last_irq_index,
120*5113495bSYour Name 					    HIF_EVENT_HIST_MAX)) {
121*5113495bSYour Name 			last_irq_rec =
122*5113495bSYour Name 				&hist_ev->event[hist_ev->misc.last_irq_index];
123*5113495bSYour Name 			last_irq_rec->timestamp = hif_get_log_timestamp();
124*5113495bSYour Name 			last_irq_rec->cpu_id = qdf_get_cpu();
125*5113495bSYour Name 			last_irq_rec->hp++;
126*5113495bSYour Name 			last_irq_rec->tp = last_irq_rec->timestamp -
127*5113495bSYour Name 						hist_ev->misc.last_irq_ts;
128*5113495bSYour Name 			return true;
129*5113495bSYour Name 		}
130*5113495bSYour Name 		break;
131*5113495bSYour Name 	case HIF_EVENT_BH_SCHED:
132*5113495bSYour Name 		if (rec->type == HIF_EVENT_BH_SCHED) {
133*5113495bSYour Name 			rec->timestamp = hif_get_log_timestamp();
134*5113495bSYour Name 			rec->cpu_id = qdf_get_cpu();
135*5113495bSYour Name 			return true;
136*5113495bSYour Name 		}
137*5113495bSYour Name 		break;
138*5113495bSYour Name 	case HIF_EVENT_SRNG_ACCESS_START:
139*5113495bSYour Name 		if (event->hp == event->tp)
140*5113495bSYour Name 			return true;
141*5113495bSYour Name 		break;
142*5113495bSYour Name 	case HIF_EVENT_SRNG_ACCESS_END:
143*5113495bSYour Name 		if (rec->type != HIF_EVENT_SRNG_ACCESS_START)
144*5113495bSYour Name 			return true;
145*5113495bSYour Name 		break;
146*5113495bSYour Name 	case HIF_EVENT_BH_COMPLETE:
147*5113495bSYour Name 	case HIF_EVENT_BH_FORCE_BREAK:
148*5113495bSYour Name 		if (rec->type != HIF_EVENT_SRNG_ACCESS_END)
149*5113495bSYour Name 			return true;
150*5113495bSYour Name 		break;
151*5113495bSYour Name 	default:
152*5113495bSYour Name 		break;
153*5113495bSYour Name 	}
154*5113495bSYour Name 
155*5113495bSYour Name 	return false;
156*5113495bSYour Name }
157*5113495bSYour Name 
hif_hist_record_event(struct hif_opaque_softc * hif_ctx,struct hif_event_record * event,uint8_t intr_grp_id)158*5113495bSYour Name void hif_hist_record_event(struct hif_opaque_softc *hif_ctx,
159*5113495bSYour Name 			   struct hif_event_record *event, uint8_t intr_grp_id)
160*5113495bSYour Name {
161*5113495bSYour Name 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
162*5113495bSYour Name 	struct hif_event_history *hist_ev;
163*5113495bSYour Name 	struct hif_event_record *record;
164*5113495bSYour Name 	int record_index;
165*5113495bSYour Name 
166*5113495bSYour Name 	if (!(scn->event_enable_mask & BIT(event->type)))
167*5113495bSYour Name 		return;
168*5113495bSYour Name 
169*5113495bSYour Name 	if (qdf_unlikely(intr_grp_id >= HIF_NUM_INT_CONTEXTS)) {
170*5113495bSYour Name 		hif_err("Invalid interrupt group id %d", intr_grp_id);
171*5113495bSYour Name 		return;
172*5113495bSYour Name 	}
173*5113495bSYour Name 
174*5113495bSYour Name 	hist_ev = scn->evt_hist[intr_grp_id];
175*5113495bSYour Name 	if (qdf_unlikely(!hist_ev))
176*5113495bSYour Name 		return;
177*5113495bSYour Name 
178*5113495bSYour Name 	if (hif_hist_skip_event_record(hist_ev, event))
179*5113495bSYour Name 		return;
180*5113495bSYour Name 
181*5113495bSYour Name 	record_index = hif_get_next_record_index(
182*5113495bSYour Name 			&hist_ev->index, HIF_EVENT_HIST_MAX);
183*5113495bSYour Name 
184*5113495bSYour Name 	record = &hist_ev->event[record_index];
185*5113495bSYour Name 
186*5113495bSYour Name 	if (event->type == HIF_EVENT_IRQ_TRIGGER) {
187*5113495bSYour Name 		hist_ev->misc.last_irq_index = record_index;
188*5113495bSYour Name 		hist_ev->misc.last_irq_ts = hif_get_log_timestamp();
189*5113495bSYour Name 	}
190*5113495bSYour Name 
191*5113495bSYour Name 	record->hal_ring_id = event->hal_ring_id;
192*5113495bSYour Name 	record->hp = event->hp;
193*5113495bSYour Name 	record->tp = event->tp;
194*5113495bSYour Name 	record->cpu_id = qdf_get_cpu();
195*5113495bSYour Name 	record->timestamp = hif_get_log_timestamp();
196*5113495bSYour Name 	record->type = event->type;
197*5113495bSYour Name }
198*5113495bSYour Name 
hif_event_history_init(struct hif_opaque_softc * hif_ctx,uint8_t id)199*5113495bSYour Name void hif_event_history_init(struct hif_opaque_softc *hif_ctx, uint8_t id)
200*5113495bSYour Name {
201*5113495bSYour Name 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
202*5113495bSYour Name 
203*5113495bSYour Name 	scn->evt_hist[id] = &hif_event_desc_history[id];
204*5113495bSYour Name 	qdf_atomic_set(&scn->evt_hist[id]->index, -1);
205*5113495bSYour Name 
206*5113495bSYour Name 	hif_info("SRNG events history initialized for group: %d", id);
207*5113495bSYour Name }
208*5113495bSYour Name 
hif_event_history_deinit(struct hif_opaque_softc * hif_ctx,uint8_t id)209*5113495bSYour Name void hif_event_history_deinit(struct hif_opaque_softc *hif_ctx, uint8_t id)
210*5113495bSYour Name {
211*5113495bSYour Name 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
212*5113495bSYour Name 
213*5113495bSYour Name 	scn->evt_hist[id] = NULL;
214*5113495bSYour Name 	hif_info("SRNG events history de-initialized for group: %d", id);
215*5113495bSYour Name }
216*5113495bSYour Name #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
217*5113495bSYour Name 
218*5113495bSYour Name #ifndef QCA_WIFI_WCN6450
219*5113495bSYour Name /**
220*5113495bSYour Name  * hif_print_napi_latency_stats() - print NAPI scheduling latency stats
221*5113495bSYour Name  * @hif_state: hif context
222*5113495bSYour Name  *
223*5113495bSYour Name  * return: void
224*5113495bSYour Name  */
225*5113495bSYour Name #ifdef HIF_LATENCY_PROFILE_ENABLE
hif_print_napi_latency_stats(struct HIF_CE_state * hif_state)226*5113495bSYour Name static void hif_print_napi_latency_stats(struct HIF_CE_state *hif_state)
227*5113495bSYour Name {
228*5113495bSYour Name 	struct hif_exec_context *hif_ext_group;
229*5113495bSYour Name 	int i, j;
230*5113495bSYour Name 	int64_t cur_tstamp;
231*5113495bSYour Name 
232*5113495bSYour Name 	const char time_str[HIF_SCHED_LATENCY_BUCKETS][15] =  {
233*5113495bSYour Name 		"0-2   ms",
234*5113495bSYour Name 		"3-10  ms",
235*5113495bSYour Name 		"11-20 ms",
236*5113495bSYour Name 		"21-50 ms",
237*5113495bSYour Name 		"51-100 ms",
238*5113495bSYour Name 		"101-250 ms",
239*5113495bSYour Name 		"251-500 ms",
240*5113495bSYour Name 		"> 500 ms"
241*5113495bSYour Name 	};
242*5113495bSYour Name 
243*5113495bSYour Name 	cur_tstamp = qdf_ktime_to_ms(qdf_ktime_get());
244*5113495bSYour Name 
245*5113495bSYour Name 	QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_INFO_HIGH,
246*5113495bSYour Name 		  "Current timestamp: %lld", cur_tstamp);
247*5113495bSYour Name 
248*5113495bSYour Name 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
249*5113495bSYour Name 		if (hif_state->hif_ext_group[i]) {
250*5113495bSYour Name 			hif_ext_group = hif_state->hif_ext_group[i];
251*5113495bSYour Name 
252*5113495bSYour Name 			QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_INFO_HIGH,
253*5113495bSYour Name 				  "ext grp %d Last serviced timestamp: %lld",
254*5113495bSYour Name 				  i, hif_ext_group->tstamp);
255*5113495bSYour Name 
256*5113495bSYour Name 			QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_INFO_HIGH,
257*5113495bSYour Name 				  "Latency Bucket     | Time elapsed");
258*5113495bSYour Name 
259*5113495bSYour Name 			for (j = 0; j < HIF_SCHED_LATENCY_BUCKETS; j++) {
260*5113495bSYour Name 				if (hif_ext_group->sched_latency_stats[j])
261*5113495bSYour Name 					QDF_TRACE(QDF_MODULE_ID_HIF,
262*5113495bSYour Name 						  QDF_TRACE_LEVEL_INFO_HIGH,
263*5113495bSYour Name 						  "%s     |    %lld",
264*5113495bSYour Name 						  time_str[j],
265*5113495bSYour Name 						  hif_ext_group->
266*5113495bSYour Name 						  sched_latency_stats[j]);
267*5113495bSYour Name 			}
268*5113495bSYour Name 		}
269*5113495bSYour Name 	}
270*5113495bSYour Name }
271*5113495bSYour Name #else
hif_print_napi_latency_stats(struct HIF_CE_state * hif_state)272*5113495bSYour Name static void hif_print_napi_latency_stats(struct HIF_CE_state *hif_state)
273*5113495bSYour Name {
274*5113495bSYour Name }
275*5113495bSYour Name #endif
276*5113495bSYour Name 
277*5113495bSYour Name /**
278*5113495bSYour Name  * hif_clear_napi_stats() - reset NAPI stats
279*5113495bSYour Name  * @hif_ctx: hif context
280*5113495bSYour Name  *
281*5113495bSYour Name  * return: void
282*5113495bSYour Name  */
hif_clear_napi_stats(struct hif_opaque_softc * hif_ctx)283*5113495bSYour Name void hif_clear_napi_stats(struct hif_opaque_softc *hif_ctx)
284*5113495bSYour Name {
285*5113495bSYour Name 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
286*5113495bSYour Name 	struct hif_exec_context *hif_ext_group;
287*5113495bSYour Name 	size_t i;
288*5113495bSYour Name 
289*5113495bSYour Name 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
290*5113495bSYour Name 		hif_ext_group = hif_state->hif_ext_group[i];
291*5113495bSYour Name 
292*5113495bSYour Name 		if (!hif_ext_group)
293*5113495bSYour Name 			return;
294*5113495bSYour Name 
295*5113495bSYour Name 		qdf_mem_set(hif_ext_group->sched_latency_stats,
296*5113495bSYour Name 			    sizeof(hif_ext_group->sched_latency_stats),
297*5113495bSYour Name 			    0x0);
298*5113495bSYour Name 	}
299*5113495bSYour Name }
300*5113495bSYour Name 
301*5113495bSYour Name qdf_export_symbol(hif_clear_napi_stats);
302*5113495bSYour Name 
303*5113495bSYour Name #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
304*5113495bSYour Name /**
305*5113495bSYour Name  * hif_get_poll_times_hist_str() - Get HIF poll times histogram string
306*5113495bSYour Name  * @stats: NAPI stats to get poll time buckets
307*5113495bSYour Name  * @buf: buffer to fill histogram string
308*5113495bSYour Name  * @buf_len: length of the buffer
309*5113495bSYour Name  *
310*5113495bSYour Name  * Return: void
311*5113495bSYour Name  */
hif_get_poll_times_hist_str(struct qca_napi_stat * stats,char * buf,uint8_t buf_len)312*5113495bSYour Name static void hif_get_poll_times_hist_str(struct qca_napi_stat *stats, char *buf,
313*5113495bSYour Name 					uint8_t buf_len)
314*5113495bSYour Name {
315*5113495bSYour Name 	int i;
316*5113495bSYour Name 	int str_index = 0;
317*5113495bSYour Name 
318*5113495bSYour Name 	for (i = 0; i < QCA_NAPI_NUM_BUCKETS; i++)
319*5113495bSYour Name 		str_index += qdf_scnprintf(buf + str_index, buf_len - str_index,
320*5113495bSYour Name 					   "%u|", stats->poll_time_buckets[i]);
321*5113495bSYour Name }
322*5113495bSYour Name 
hif_print_napi_stats(struct hif_opaque_softc * hif_ctx)323*5113495bSYour Name void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx)
324*5113495bSYour Name {
325*5113495bSYour Name 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
326*5113495bSYour Name 	struct hif_exec_context *hif_ext_group;
327*5113495bSYour Name 	struct qca_napi_stat *napi_stats;
328*5113495bSYour Name 	int i, j;
329*5113495bSYour Name 
330*5113495bSYour Name 	/*
331*5113495bSYour Name 	 * Max value of uint_32 (poll_time_bucket) = 4294967295
332*5113495bSYour Name 	 * Thus we need 10 chars + 1 space =11 chars for each bucket value.
333*5113495bSYour Name 	 * +1 space for '\0'.
334*5113495bSYour Name 	 */
335*5113495bSYour Name 	char hist_str[(QCA_NAPI_NUM_BUCKETS * 11) + 1] = {'\0'};
336*5113495bSYour Name 
337*5113495bSYour Name 	QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_INFO_HIGH,
338*5113495bSYour Name 		  "NAPI[#]CPU[#] |scheds |polls  |comps  |dones  |t-lim  |max(us)|hist(500us buckets)");
339*5113495bSYour Name 
340*5113495bSYour Name 	for (i = 0;
341*5113495bSYour Name 	     (i < hif_state->hif_num_extgroup && hif_state->hif_ext_group[i]);
342*5113495bSYour Name 	     i++) {
343*5113495bSYour Name 		hif_ext_group = hif_state->hif_ext_group[i];
344*5113495bSYour Name 		for (j = 0; j < num_possible_cpus(); j++) {
345*5113495bSYour Name 			napi_stats = &hif_ext_group->stats[j];
346*5113495bSYour Name 			if (!napi_stats->napi_schedules)
347*5113495bSYour Name 				continue;
348*5113495bSYour Name 
349*5113495bSYour Name 			hif_get_poll_times_hist_str(napi_stats,
350*5113495bSYour Name 						    hist_str,
351*5113495bSYour Name 						    sizeof(hist_str));
352*5113495bSYour Name 			QDF_TRACE(QDF_MODULE_ID_HIF,
353*5113495bSYour Name 				  QDF_TRACE_LEVEL_INFO_HIGH,
354*5113495bSYour Name 				  "NAPI[%d]CPU[%d]: %7u %7u %7u %7u %7u %7llu %s",
355*5113495bSYour Name 				  i, j,
356*5113495bSYour Name 				  napi_stats->napi_schedules,
357*5113495bSYour Name 				  napi_stats->napi_polls,
358*5113495bSYour Name 				  napi_stats->napi_completes,
359*5113495bSYour Name 				  napi_stats->napi_workdone,
360*5113495bSYour Name 				  napi_stats->time_limit_reached,
361*5113495bSYour Name 				  qdf_do_div(napi_stats->napi_max_poll_time,
362*5113495bSYour Name 					     1000),
363*5113495bSYour Name 				  hist_str);
364*5113495bSYour Name 		}
365*5113495bSYour Name 	}
366*5113495bSYour Name 
367*5113495bSYour Name 	hif_print_napi_latency_stats(hif_state);
368*5113495bSYour Name }
369*5113495bSYour Name 
370*5113495bSYour Name qdf_export_symbol(hif_print_napi_stats);
371*5113495bSYour Name #else
372*5113495bSYour Name static inline
hif_get_poll_times_hist_str(struct qca_napi_stat * stats,char * buf,uint8_t buf_len)373*5113495bSYour Name void hif_get_poll_times_hist_str(struct qca_napi_stat *stats, char *buf,
374*5113495bSYour Name 				 uint8_t buf_len)
375*5113495bSYour Name {
376*5113495bSYour Name }
377*5113495bSYour Name 
hif_print_napi_stats(struct hif_opaque_softc * hif_ctx)378*5113495bSYour Name void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx)
379*5113495bSYour Name {
380*5113495bSYour Name 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
381*5113495bSYour Name 	struct hif_exec_context *hif_ext_group;
382*5113495bSYour Name 	struct qca_napi_stat *napi_stats;
383*5113495bSYour Name 	int i, j;
384*5113495bSYour Name 
385*5113495bSYour Name 	QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL,
386*5113495bSYour Name 		"NAPI[#ctx]CPU[#] |schedules |polls |completes |workdone");
387*5113495bSYour Name 
388*5113495bSYour Name 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
389*5113495bSYour Name 		if (hif_state->hif_ext_group[i]) {
390*5113495bSYour Name 			hif_ext_group = hif_state->hif_ext_group[i];
391*5113495bSYour Name 			for (j = 0; j < num_possible_cpus(); j++) {
392*5113495bSYour Name 				napi_stats = &(hif_ext_group->stats[j]);
393*5113495bSYour Name 				if (napi_stats->napi_schedules != 0)
394*5113495bSYour Name 					QDF_TRACE(QDF_MODULE_ID_HIF,
395*5113495bSYour Name 						QDF_TRACE_LEVEL_FATAL,
396*5113495bSYour Name 						"NAPI[%2d]CPU[%d]: "
397*5113495bSYour Name 						"%7d %7d %7d %7d ",
398*5113495bSYour Name 						i, j,
399*5113495bSYour Name 						napi_stats->napi_schedules,
400*5113495bSYour Name 						napi_stats->napi_polls,
401*5113495bSYour Name 						napi_stats->napi_completes,
402*5113495bSYour Name 						napi_stats->napi_workdone);
403*5113495bSYour Name 			}
404*5113495bSYour Name 		}
405*5113495bSYour Name 	}
406*5113495bSYour Name 
407*5113495bSYour Name 	hif_print_napi_latency_stats(hif_state);
408*5113495bSYour Name }
409*5113495bSYour Name qdf_export_symbol(hif_print_napi_stats);
410*5113495bSYour Name #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */
411*5113495bSYour Name #endif /* QCA_WIFI_WCN6450 */
412*5113495bSYour Name 
413*5113495bSYour Name #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
414*5113495bSYour Name /**
415*5113495bSYour Name  * hif_exec_fill_poll_time_histogram() - fills poll time histogram for a NAPI
416*5113495bSYour Name  * @hif_ext_group: hif_ext_group of type NAPI
417*5113495bSYour Name  *
418*5113495bSYour Name  * The function is called at the end of a NAPI poll to calculate poll time
419*5113495bSYour Name  * buckets.
420*5113495bSYour Name  *
421*5113495bSYour Name  * Return: void
422*5113495bSYour Name  */
423*5113495bSYour Name static
hif_exec_fill_poll_time_histogram(struct hif_exec_context * hif_ext_group)424*5113495bSYour Name void hif_exec_fill_poll_time_histogram(struct hif_exec_context *hif_ext_group)
425*5113495bSYour Name {
426*5113495bSYour Name 	struct qca_napi_stat *napi_stat;
427*5113495bSYour Name 	unsigned long long poll_time_ns;
428*5113495bSYour Name 	uint32_t poll_time_us;
429*5113495bSYour Name 	uint32_t bucket_size_us = 500;
430*5113495bSYour Name 	uint32_t bucket;
431*5113495bSYour Name 	uint32_t cpu_id = qdf_get_cpu();
432*5113495bSYour Name 
433*5113495bSYour Name 	poll_time_ns = qdf_time_sched_clock() - hif_ext_group->poll_start_time;
434*5113495bSYour Name 	poll_time_us = qdf_do_div(poll_time_ns, 1000);
435*5113495bSYour Name 
436*5113495bSYour Name 	napi_stat = &hif_ext_group->stats[cpu_id];
437*5113495bSYour Name 	if (poll_time_ns > hif_ext_group->stats[cpu_id].napi_max_poll_time)
438*5113495bSYour Name 		hif_ext_group->stats[cpu_id].napi_max_poll_time = poll_time_ns;
439*5113495bSYour Name 
440*5113495bSYour Name 	bucket = poll_time_us / bucket_size_us;
441*5113495bSYour Name 	if (bucket >= QCA_NAPI_NUM_BUCKETS)
442*5113495bSYour Name 		bucket = QCA_NAPI_NUM_BUCKETS - 1;
443*5113495bSYour Name 	++napi_stat->poll_time_buckets[bucket];
444*5113495bSYour Name }
445*5113495bSYour Name 
446*5113495bSYour Name /**
447*5113495bSYour Name  * hif_exec_poll_should_yield() - Local function deciding if NAPI should yield
448*5113495bSYour Name  * @hif_ext_group: hif_ext_group of type NAPI
449*5113495bSYour Name  *
450*5113495bSYour Name  * Return: true if NAPI needs to yield, else false
451*5113495bSYour Name  */
hif_exec_poll_should_yield(struct hif_exec_context * hif_ext_group)452*5113495bSYour Name static bool hif_exec_poll_should_yield(struct hif_exec_context *hif_ext_group)
453*5113495bSYour Name {
454*5113495bSYour Name 	bool time_limit_reached = false;
455*5113495bSYour Name 	unsigned long long poll_time_ns;
456*5113495bSYour Name 	int cpu_id = qdf_get_cpu();
457*5113495bSYour Name 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
458*5113495bSYour Name 	struct hif_config_info *cfg = &scn->hif_config;
459*5113495bSYour Name 
460*5113495bSYour Name 	poll_time_ns = qdf_time_sched_clock() - hif_ext_group->poll_start_time;
461*5113495bSYour Name 	time_limit_reached =
462*5113495bSYour Name 		poll_time_ns > cfg->rx_softirq_max_yield_duration_ns ? 1 : 0;
463*5113495bSYour Name 
464*5113495bSYour Name 	if (time_limit_reached) {
465*5113495bSYour Name 		hif_ext_group->stats[cpu_id].time_limit_reached++;
466*5113495bSYour Name 		hif_ext_group->force_break = true;
467*5113495bSYour Name 	}
468*5113495bSYour Name 
469*5113495bSYour Name 	return time_limit_reached;
470*5113495bSYour Name }
471*5113495bSYour Name 
hif_exec_should_yield(struct hif_opaque_softc * hif_ctx,uint grp_id)472*5113495bSYour Name bool hif_exec_should_yield(struct hif_opaque_softc *hif_ctx, uint grp_id)
473*5113495bSYour Name {
474*5113495bSYour Name 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
475*5113495bSYour Name 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
476*5113495bSYour Name 	struct hif_exec_context *hif_ext_group;
477*5113495bSYour Name 	bool ret_val = false;
478*5113495bSYour Name 
479*5113495bSYour Name 	if (!(grp_id < hif_state->hif_num_extgroup) ||
480*5113495bSYour Name 	    !(grp_id < HIF_MAX_GROUP))
481*5113495bSYour Name 		return false;
482*5113495bSYour Name 
483*5113495bSYour Name 	hif_ext_group = hif_state->hif_ext_group[grp_id];
484*5113495bSYour Name 
485*5113495bSYour Name 	if (hif_ext_group->type == HIF_EXEC_NAPI_TYPE)
486*5113495bSYour Name 		ret_val = hif_exec_poll_should_yield(hif_ext_group);
487*5113495bSYour Name 
488*5113495bSYour Name 	return ret_val;
489*5113495bSYour Name }
490*5113495bSYour Name 
491*5113495bSYour Name /**
492*5113495bSYour Name  * hif_exec_update_service_start_time() - Update NAPI poll start time
493*5113495bSYour Name  * @hif_ext_group: hif_ext_group of type NAPI
494*5113495bSYour Name  *
495*5113495bSYour Name  * The function is called at the beginning of a NAPI poll to record the poll
496*5113495bSYour Name  * start time.
497*5113495bSYour Name  *
498*5113495bSYour Name  * Return: None
499*5113495bSYour Name  */
500*5113495bSYour Name static inline
hif_exec_update_service_start_time(struct hif_exec_context * hif_ext_group)501*5113495bSYour Name void hif_exec_update_service_start_time(struct hif_exec_context *hif_ext_group)
502*5113495bSYour Name {
503*5113495bSYour Name 	hif_ext_group->poll_start_time = qdf_time_sched_clock();
504*5113495bSYour Name }
505*5113495bSYour Name 
506*5113495bSYour Name #else
507*5113495bSYour Name static inline
hif_exec_update_service_start_time(struct hif_exec_context * hif_ext_group)508*5113495bSYour Name void hif_exec_update_service_start_time(struct hif_exec_context *hif_ext_group)
509*5113495bSYour Name {
510*5113495bSYour Name }
511*5113495bSYour Name 
512*5113495bSYour Name static inline
hif_exec_fill_poll_time_histogram(struct hif_exec_context * hif_ext_group)513*5113495bSYour Name void hif_exec_fill_poll_time_histogram(struct hif_exec_context *hif_ext_group)
514*5113495bSYour Name {
515*5113495bSYour Name }
516*5113495bSYour Name #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */
517*5113495bSYour Name 
hif_exec_tasklet_schedule(struct hif_exec_context * ctx)518*5113495bSYour Name static void hif_exec_tasklet_schedule(struct hif_exec_context *ctx)
519*5113495bSYour Name {
520*5113495bSYour Name 	struct hif_tasklet_exec_context *t_ctx = hif_exec_get_tasklet(ctx);
521*5113495bSYour Name 
522*5113495bSYour Name 	tasklet_schedule(&t_ctx->tasklet);
523*5113495bSYour Name }
524*5113495bSYour Name 
525*5113495bSYour Name /**
526*5113495bSYour Name  * hif_exec_tasklet_fn() - grp tasklet
527*5113495bSYour Name  * @data: context
528*5113495bSYour Name  *
529*5113495bSYour Name  * Return: void
530*5113495bSYour Name  */
hif_exec_tasklet_fn(unsigned long data)531*5113495bSYour Name static void hif_exec_tasklet_fn(unsigned long data)
532*5113495bSYour Name {
533*5113495bSYour Name 	struct hif_exec_context *hif_ext_group =
534*5113495bSYour Name 			(struct hif_exec_context *)data;
535*5113495bSYour Name 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
536*5113495bSYour Name 	unsigned int work_done;
537*5113495bSYour Name 	int cpu = smp_processor_id();
538*5113495bSYour Name 
539*5113495bSYour Name 	work_done =
540*5113495bSYour Name 		hif_ext_group->handler(hif_ext_group->context, HIF_MAX_BUDGET,
541*5113495bSYour Name 				       cpu);
542*5113495bSYour Name 
543*5113495bSYour Name 	if (hif_ext_group->work_complete(hif_ext_group, work_done)) {
544*5113495bSYour Name 		qdf_atomic_dec(&(scn->active_grp_tasklet_cnt));
545*5113495bSYour Name 		hif_ext_group->irq_enable(hif_ext_group);
546*5113495bSYour Name 	} else {
547*5113495bSYour Name 		hif_exec_tasklet_schedule(hif_ext_group);
548*5113495bSYour Name 	}
549*5113495bSYour Name }
550*5113495bSYour Name 
551*5113495bSYour Name /**
552*5113495bSYour Name  * hif_latency_profile_measure() - calculate latency and update histogram
553*5113495bSYour Name  * @hif_ext_group: hif exec context
554*5113495bSYour Name  *
555*5113495bSYour Name  * Return: None
556*5113495bSYour Name  */
557*5113495bSYour Name #ifdef HIF_LATENCY_PROFILE_ENABLE
hif_latency_profile_measure(struct hif_exec_context * hif_ext_group)558*5113495bSYour Name static void hif_latency_profile_measure(struct hif_exec_context *hif_ext_group)
559*5113495bSYour Name {
560*5113495bSYour Name 	int64_t cur_tstamp;
561*5113495bSYour Name 	int64_t time_elapsed;
562*5113495bSYour Name 
563*5113495bSYour Name 	cur_tstamp = qdf_ktime_to_ms(qdf_ktime_get());
564*5113495bSYour Name 
565*5113495bSYour Name 	if (cur_tstamp > hif_ext_group->tstamp)
566*5113495bSYour Name 		time_elapsed = (cur_tstamp - hif_ext_group->tstamp);
567*5113495bSYour Name 	else
568*5113495bSYour Name 		time_elapsed = ~0x0 - (hif_ext_group->tstamp - cur_tstamp);
569*5113495bSYour Name 
570*5113495bSYour Name 	hif_ext_group->tstamp = cur_tstamp;
571*5113495bSYour Name 
572*5113495bSYour Name 	if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_0_2)
573*5113495bSYour Name 		hif_ext_group->sched_latency_stats[0]++;
574*5113495bSYour Name 	else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_3_10)
575*5113495bSYour Name 		hif_ext_group->sched_latency_stats[1]++;
576*5113495bSYour Name 	else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_11_20)
577*5113495bSYour Name 		hif_ext_group->sched_latency_stats[2]++;
578*5113495bSYour Name 	else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_21_50)
579*5113495bSYour Name 		hif_ext_group->sched_latency_stats[3]++;
580*5113495bSYour Name 	else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_51_100)
581*5113495bSYour Name 		hif_ext_group->sched_latency_stats[4]++;
582*5113495bSYour Name 	else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_101_250)
583*5113495bSYour Name 		hif_ext_group->sched_latency_stats[5]++;
584*5113495bSYour Name 	else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_251_500)
585*5113495bSYour Name 		hif_ext_group->sched_latency_stats[6]++;
586*5113495bSYour Name 	else
587*5113495bSYour Name 		hif_ext_group->sched_latency_stats[7]++;
588*5113495bSYour Name }
589*5113495bSYour Name #else
590*5113495bSYour Name static inline
hif_latency_profile_measure(struct hif_exec_context * hif_ext_group)591*5113495bSYour Name void hif_latency_profile_measure(struct hif_exec_context *hif_ext_group)
592*5113495bSYour Name {
593*5113495bSYour Name }
594*5113495bSYour Name #endif
595*5113495bSYour Name 
596*5113495bSYour Name /**
597*5113495bSYour Name  * hif_latency_profile_start() - Update the start timestamp for HIF ext group
598*5113495bSYour Name  * @hif_ext_group: hif exec context
599*5113495bSYour Name  *
600*5113495bSYour Name  * Return: None
601*5113495bSYour Name  */
602*5113495bSYour Name #ifdef HIF_LATENCY_PROFILE_ENABLE
hif_latency_profile_start(struct hif_exec_context * hif_ext_group)603*5113495bSYour Name static void hif_latency_profile_start(struct hif_exec_context *hif_ext_group)
604*5113495bSYour Name {
605*5113495bSYour Name 	hif_ext_group->tstamp = qdf_ktime_to_ms(qdf_ktime_get());
606*5113495bSYour Name }
607*5113495bSYour Name #else
608*5113495bSYour Name static inline
hif_latency_profile_start(struct hif_exec_context * hif_ext_group)609*5113495bSYour Name void hif_latency_profile_start(struct hif_exec_context *hif_ext_group)
610*5113495bSYour Name {
611*5113495bSYour Name }
612*5113495bSYour Name #endif
613*5113495bSYour Name 
614*5113495bSYour Name #ifdef FEATURE_NAPI
615*5113495bSYour Name #ifdef FEATURE_IRQ_AFFINITY
616*5113495bSYour Name static inline int32_t
hif_is_force_napi_complete_required(struct hif_exec_context * hif_ext_group)617*5113495bSYour Name hif_is_force_napi_complete_required(struct hif_exec_context *hif_ext_group)
618*5113495bSYour Name {
619*5113495bSYour Name 	return qdf_atomic_inc_not_zero(&hif_ext_group->force_napi_complete);
620*5113495bSYour Name }
621*5113495bSYour Name #else
622*5113495bSYour Name static inline int32_t
hif_is_force_napi_complete_required(struct hif_exec_context * hif_ext_group)623*5113495bSYour Name hif_is_force_napi_complete_required(struct hif_exec_context *hif_ext_group)
624*5113495bSYour Name {
625*5113495bSYour Name 	return 0;
626*5113495bSYour Name }
627*5113495bSYour Name #endif
628*5113495bSYour Name 
629*5113495bSYour Name /**
630*5113495bSYour Name  * hif_irq_disabled_time_limit_reached() - determine if irq disabled limit
631*5113495bSYour Name  * reached for single MSI
632*5113495bSYour Name  * @hif_ext_group: hif exec context
633*5113495bSYour Name  *
634*5113495bSYour Name  * Return: true if reached, else false.
635*5113495bSYour Name  */
636*5113495bSYour Name static bool
hif_irq_disabled_time_limit_reached(struct hif_exec_context * hif_ext_group)637*5113495bSYour Name hif_irq_disabled_time_limit_reached(struct hif_exec_context *hif_ext_group)
638*5113495bSYour Name {
639*5113495bSYour Name 	unsigned long long irq_disabled_duration_ns;
640*5113495bSYour Name 
641*5113495bSYour Name 	if (hif_ext_group->type != HIF_EXEC_NAPI_TYPE)
642*5113495bSYour Name 		return false;
643*5113495bSYour Name 
644*5113495bSYour Name 	irq_disabled_duration_ns = qdf_time_sched_clock() -
645*5113495bSYour Name 					hif_ext_group->irq_disabled_start_time;
646*5113495bSYour Name 	if (irq_disabled_duration_ns >= IRQ_DISABLED_MAX_DURATION_NS) {
647*5113495bSYour Name 		hif_record_event(hif_ext_group->hif, hif_ext_group->grp_id,
648*5113495bSYour Name 				 0, 0, 0, HIF_EVENT_IRQ_DISABLE_EXPIRED);
649*5113495bSYour Name 		return true;
650*5113495bSYour Name 	}
651*5113495bSYour Name 
652*5113495bSYour Name 	return false;
653*5113495bSYour Name }
654*5113495bSYour Name 
655*5113495bSYour Name /**
656*5113495bSYour Name  * hif_exec_poll() - napi poll
657*5113495bSYour Name  * @napi: napi struct
658*5113495bSYour Name  * @budget: budget for napi
659*5113495bSYour Name  *
660*5113495bSYour Name  * Return: mapping of internal budget to napi
661*5113495bSYour Name  */
hif_exec_poll(struct napi_struct * napi,int budget)662*5113495bSYour Name static int hif_exec_poll(struct napi_struct *napi, int budget)
663*5113495bSYour Name {
664*5113495bSYour Name 	struct hif_napi_exec_context *napi_exec_ctx =
665*5113495bSYour Name 		    qdf_container_of(napi, struct hif_napi_exec_context, napi);
666*5113495bSYour Name 	struct hif_exec_context *hif_ext_group = &napi_exec_ctx->exec_ctx;
667*5113495bSYour Name 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
668*5113495bSYour Name 	int work_done;
669*5113495bSYour Name 	int normalized_budget = 0;
670*5113495bSYour Name 	int actual_dones;
671*5113495bSYour Name 	int shift = hif_ext_group->scale_bin_shift;
672*5113495bSYour Name 	int cpu = smp_processor_id();
673*5113495bSYour Name 	bool force_complete = false;
674*5113495bSYour Name 
675*5113495bSYour Name 	hif_record_event(hif_ext_group->hif, hif_ext_group->grp_id,
676*5113495bSYour Name 			 0, 0, 0, HIF_EVENT_BH_SCHED);
677*5113495bSYour Name 
678*5113495bSYour Name 	hif_ext_group->force_break = false;
679*5113495bSYour Name 	hif_exec_update_service_start_time(hif_ext_group);
680*5113495bSYour Name 
681*5113495bSYour Name 	if (budget)
682*5113495bSYour Name 		normalized_budget = NAPI_BUDGET_TO_INTERNAL_BUDGET(budget, shift);
683*5113495bSYour Name 
684*5113495bSYour Name 	hif_latency_profile_measure(hif_ext_group);
685*5113495bSYour Name 
686*5113495bSYour Name 	work_done = hif_ext_group->handler(hif_ext_group->context,
687*5113495bSYour Name 					   normalized_budget, cpu);
688*5113495bSYour Name 
689*5113495bSYour Name 	actual_dones = work_done;
690*5113495bSYour Name 
691*5113495bSYour Name 	if (hif_is_force_napi_complete_required(hif_ext_group)) {
692*5113495bSYour Name 		force_complete = true;
693*5113495bSYour Name 		if (work_done >= normalized_budget)
694*5113495bSYour Name 			work_done = normalized_budget - 1;
695*5113495bSYour Name 	}
696*5113495bSYour Name 
697*5113495bSYour Name 	if (qdf_unlikely(force_complete) ||
698*5113495bSYour Name 	    (!hif_ext_group->force_break && work_done < normalized_budget) ||
699*5113495bSYour Name 	    ((pld_is_one_msi(scn->qdf_dev->dev) &&
700*5113495bSYour Name 	    hif_irq_disabled_time_limit_reached(hif_ext_group)))) {
701*5113495bSYour Name 		hif_record_event(hif_ext_group->hif, hif_ext_group->grp_id,
702*5113495bSYour Name 				 0, 0, 0, HIF_EVENT_BH_COMPLETE);
703*5113495bSYour Name 		napi_complete(napi);
704*5113495bSYour Name 		qdf_atomic_dec(&scn->active_grp_tasklet_cnt);
705*5113495bSYour Name 		hif_ext_group->irq_enable(hif_ext_group);
706*5113495bSYour Name 		hif_ext_group->stats[cpu].napi_completes++;
707*5113495bSYour Name 	} else {
708*5113495bSYour Name 		/* if the ext_group supports time based yield, claim full work
709*5113495bSYour Name 		 * done anyways */
710*5113495bSYour Name 		hif_record_event(hif_ext_group->hif, hif_ext_group->grp_id,
711*5113495bSYour Name 				 0, 0, 0, HIF_EVENT_BH_FORCE_BREAK);
712*5113495bSYour Name 		work_done = normalized_budget;
713*5113495bSYour Name 	}
714*5113495bSYour Name 
715*5113495bSYour Name 	hif_ext_group->stats[cpu].napi_polls++;
716*5113495bSYour Name 	hif_ext_group->stats[cpu].napi_workdone += actual_dones;
717*5113495bSYour Name 
718*5113495bSYour Name 	/* map internal budget to NAPI budget */
719*5113495bSYour Name 	if (work_done)
720*5113495bSYour Name 		work_done = INTERNAL_BUDGET_TO_NAPI_BUDGET(work_done, shift);
721*5113495bSYour Name 
722*5113495bSYour Name 	hif_exec_fill_poll_time_histogram(hif_ext_group);
723*5113495bSYour Name 
724*5113495bSYour Name 	return work_done;
725*5113495bSYour Name }
726*5113495bSYour Name 
727*5113495bSYour Name /**
728*5113495bSYour Name  * hif_exec_napi_schedule() - schedule the napi exec instance
729*5113495bSYour Name  * @ctx: a hif_exec_context known to be of napi type
730*5113495bSYour Name  */
hif_exec_napi_schedule(struct hif_exec_context * ctx)731*5113495bSYour Name static void hif_exec_napi_schedule(struct hif_exec_context *ctx)
732*5113495bSYour Name {
733*5113495bSYour Name 	struct hif_napi_exec_context *n_ctx = hif_exec_get_napi(ctx);
734*5113495bSYour Name 	ctx->stats[smp_processor_id()].napi_schedules++;
735*5113495bSYour Name 
736*5113495bSYour Name 	napi_schedule(&n_ctx->napi);
737*5113495bSYour Name }
738*5113495bSYour Name 
739*5113495bSYour Name /**
740*5113495bSYour Name  * hif_exec_napi_kill() - stop a napi exec context from being rescheduled
741*5113495bSYour Name  * @ctx: a hif_exec_context known to be of napi type
742*5113495bSYour Name  */
hif_exec_napi_kill(struct hif_exec_context * ctx)743*5113495bSYour Name static void hif_exec_napi_kill(struct hif_exec_context *ctx)
744*5113495bSYour Name {
745*5113495bSYour Name 	struct hif_napi_exec_context *n_ctx = hif_exec_get_napi(ctx);
746*5113495bSYour Name 	int irq_ind;
747*5113495bSYour Name 
748*5113495bSYour Name 	if (ctx->inited) {
749*5113495bSYour Name 		qdf_napi_disable(&n_ctx->napi);
750*5113495bSYour Name 		ctx->inited = 0;
751*5113495bSYour Name 	}
752*5113495bSYour Name 
753*5113495bSYour Name 	for (irq_ind = 0; irq_ind < ctx->numirq; irq_ind++)
754*5113495bSYour Name 		hif_irq_affinity_remove(ctx->os_irq[irq_ind]);
755*5113495bSYour Name 
756*5113495bSYour Name 	hif_core_ctl_set_boost(false);
757*5113495bSYour Name 	qdf_netif_napi_del(&(n_ctx->napi));
758*5113495bSYour Name }
759*5113495bSYour Name 
760*5113495bSYour Name struct hif_execution_ops napi_sched_ops = {
761*5113495bSYour Name 	.schedule = &hif_exec_napi_schedule,
762*5113495bSYour Name 	.kill = &hif_exec_napi_kill,
763*5113495bSYour Name };
764*5113495bSYour Name 
765*5113495bSYour Name /**
766*5113495bSYour Name  * hif_exec_napi_create() - allocate and initialize a napi exec context
767*5113495bSYour Name  * @scale: a binary shift factor to map NAPI budget from\to internal
768*5113495bSYour Name  *         budget
769*5113495bSYour Name  */
hif_exec_napi_create(uint32_t scale)770*5113495bSYour Name static struct hif_exec_context *hif_exec_napi_create(uint32_t scale)
771*5113495bSYour Name {
772*5113495bSYour Name 	struct hif_napi_exec_context *ctx;
773*5113495bSYour Name 
774*5113495bSYour Name 	ctx = qdf_mem_malloc(sizeof(struct hif_napi_exec_context));
775*5113495bSYour Name 	if (!ctx)
776*5113495bSYour Name 		return NULL;
777*5113495bSYour Name 
778*5113495bSYour Name 	ctx->exec_ctx.sched_ops = &napi_sched_ops;
779*5113495bSYour Name 	ctx->exec_ctx.inited = true;
780*5113495bSYour Name 	ctx->exec_ctx.scale_bin_shift = scale;
781*5113495bSYour Name 	qdf_net_if_create_dummy_if((struct qdf_net_if *)&ctx->netdev);
782*5113495bSYour Name 	qdf_netif_napi_add(&(ctx->netdev), &(ctx->napi), hif_exec_poll,
783*5113495bSYour Name 			   QCA_NAPI_BUDGET);
784*5113495bSYour Name 	qdf_napi_enable(&ctx->napi);
785*5113495bSYour Name 
786*5113495bSYour Name 	return &ctx->exec_ctx;
787*5113495bSYour Name }
788*5113495bSYour Name #else
hif_exec_napi_create(uint32_t scale)789*5113495bSYour Name static struct hif_exec_context *hif_exec_napi_create(uint32_t scale)
790*5113495bSYour Name {
791*5113495bSYour Name 	hif_warn("FEATURE_NAPI not defined, making tasklet");
792*5113495bSYour Name 	return hif_exec_tasklet_create();
793*5113495bSYour Name }
794*5113495bSYour Name #endif
795*5113495bSYour Name 
796*5113495bSYour Name 
797*5113495bSYour Name /**
798*5113495bSYour Name  * hif_exec_tasklet_kill() - stop a tasklet exec context from being rescheduled
799*5113495bSYour Name  * @ctx: a hif_exec_context known to be of tasklet type
800*5113495bSYour Name  */
hif_exec_tasklet_kill(struct hif_exec_context * ctx)801*5113495bSYour Name static void hif_exec_tasklet_kill(struct hif_exec_context *ctx)
802*5113495bSYour Name {
803*5113495bSYour Name 	struct hif_tasklet_exec_context *t_ctx = hif_exec_get_tasklet(ctx);
804*5113495bSYour Name 	int irq_ind;
805*5113495bSYour Name 
806*5113495bSYour Name 	if (ctx->inited) {
807*5113495bSYour Name 		tasklet_disable(&t_ctx->tasklet);
808*5113495bSYour Name 		tasklet_kill(&t_ctx->tasklet);
809*5113495bSYour Name 	}
810*5113495bSYour Name 	ctx->inited = false;
811*5113495bSYour Name 
812*5113495bSYour Name 	for (irq_ind = 0; irq_ind < ctx->numirq; irq_ind++)
813*5113495bSYour Name 		hif_irq_affinity_remove(ctx->os_irq[irq_ind]);
814*5113495bSYour Name }
815*5113495bSYour Name 
816*5113495bSYour Name struct hif_execution_ops tasklet_sched_ops = {
817*5113495bSYour Name 	.schedule = &hif_exec_tasklet_schedule,
818*5113495bSYour Name 	.kill = &hif_exec_tasklet_kill,
819*5113495bSYour Name };
820*5113495bSYour Name 
821*5113495bSYour Name /**
822*5113495bSYour Name  * hif_exec_tasklet_create() -  allocate and initialize a tasklet exec context
823*5113495bSYour Name  */
hif_exec_tasklet_create(void)824*5113495bSYour Name static struct hif_exec_context *hif_exec_tasklet_create(void)
825*5113495bSYour Name {
826*5113495bSYour Name 	struct hif_tasklet_exec_context *ctx;
827*5113495bSYour Name 
828*5113495bSYour Name 	ctx = qdf_mem_malloc(sizeof(struct hif_tasklet_exec_context));
829*5113495bSYour Name 	if (!ctx)
830*5113495bSYour Name 		return NULL;
831*5113495bSYour Name 
832*5113495bSYour Name 	ctx->exec_ctx.sched_ops = &tasklet_sched_ops;
833*5113495bSYour Name 	tasklet_init(&ctx->tasklet, hif_exec_tasklet_fn,
834*5113495bSYour Name 		     (unsigned long)ctx);
835*5113495bSYour Name 
836*5113495bSYour Name 	ctx->exec_ctx.inited = true;
837*5113495bSYour Name 
838*5113495bSYour Name 	return &ctx->exec_ctx;
839*5113495bSYour Name }
840*5113495bSYour Name 
841*5113495bSYour Name /**
842*5113495bSYour Name  * hif_exec_get_ctx() - retrieve an exec context based on an id
843*5113495bSYour Name  * @softc: the hif context owning the exec context
844*5113495bSYour Name  * @id: the id of the exec context
845*5113495bSYour Name  *
846*5113495bSYour Name  * mostly added to make it easier to rename or move the context array
847*5113495bSYour Name  */
hif_exec_get_ctx(struct hif_opaque_softc * softc,uint8_t id)848*5113495bSYour Name struct hif_exec_context *hif_exec_get_ctx(struct hif_opaque_softc *softc,
849*5113495bSYour Name 					  uint8_t id)
850*5113495bSYour Name {
851*5113495bSYour Name 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(softc);
852*5113495bSYour Name 
853*5113495bSYour Name 	if (id < hif_state->hif_num_extgroup)
854*5113495bSYour Name 		return hif_state->hif_ext_group[id];
855*5113495bSYour Name 
856*5113495bSYour Name 	return NULL;
857*5113495bSYour Name }
858*5113495bSYour Name 
hif_get_int_ctx_irq_num(struct hif_opaque_softc * softc,uint8_t id)859*5113495bSYour Name int32_t hif_get_int_ctx_irq_num(struct hif_opaque_softc *softc,
860*5113495bSYour Name 				uint8_t id)
861*5113495bSYour Name {
862*5113495bSYour Name 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(softc);
863*5113495bSYour Name 
864*5113495bSYour Name 	if (id < hif_state->hif_num_extgroup)
865*5113495bSYour Name 		return hif_state->hif_ext_group[id]->os_irq[0];
866*5113495bSYour Name 	return -EINVAL;
867*5113495bSYour Name }
868*5113495bSYour Name 
869*5113495bSYour Name qdf_export_symbol(hif_get_int_ctx_irq_num);
870*5113495bSYour Name 
hif_configure_ext_group_interrupts(struct hif_opaque_softc * hif_ctx)871*5113495bSYour Name QDF_STATUS hif_configure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx)
872*5113495bSYour Name {
873*5113495bSYour Name 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
874*5113495bSYour Name 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
875*5113495bSYour Name 	struct hif_exec_context *hif_ext_group;
876*5113495bSYour Name 	int i, status;
877*5113495bSYour Name 
878*5113495bSYour Name 	if (scn->ext_grp_irq_configured) {
879*5113495bSYour Name 		hif_err("Called after ext grp irq configured");
880*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
881*5113495bSYour Name 	}
882*5113495bSYour Name 
883*5113495bSYour Name 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
884*5113495bSYour Name 		hif_ext_group = hif_state->hif_ext_group[i];
885*5113495bSYour Name 		status = 0;
886*5113495bSYour Name 		qdf_spinlock_create(&hif_ext_group->irq_lock);
887*5113495bSYour Name 		if (hif_ext_group->configured &&
888*5113495bSYour Name 		    hif_ext_group->irq_requested == false) {
889*5113495bSYour Name 			hif_ext_group->irq_enabled = true;
890*5113495bSYour Name 			status = hif_grp_irq_configure(scn, hif_ext_group);
891*5113495bSYour Name 		}
892*5113495bSYour Name 		if (status != 0) {
893*5113495bSYour Name 			hif_err("Failed for group %d", i);
894*5113495bSYour Name 			hif_ext_group->irq_enabled = false;
895*5113495bSYour Name 		}
896*5113495bSYour Name 	}
897*5113495bSYour Name 
898*5113495bSYour Name 	scn->ext_grp_irq_configured = true;
899*5113495bSYour Name 
900*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
901*5113495bSYour Name }
902*5113495bSYour Name 
903*5113495bSYour Name qdf_export_symbol(hif_configure_ext_group_interrupts);
904*5113495bSYour Name 
hif_deconfigure_ext_group_interrupts(struct hif_opaque_softc * hif_ctx)905*5113495bSYour Name void hif_deconfigure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx)
906*5113495bSYour Name {
907*5113495bSYour Name 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
908*5113495bSYour Name 
909*5113495bSYour Name 	if (!scn || !scn->ext_grp_irq_configured) {
910*5113495bSYour Name 		hif_err("scn(%pk) is NULL or grp irq not configured", scn);
911*5113495bSYour Name 		return;
912*5113495bSYour Name 	}
913*5113495bSYour Name 
914*5113495bSYour Name 	hif_grp_irq_deconfigure(scn);
915*5113495bSYour Name 	scn->ext_grp_irq_configured = false;
916*5113495bSYour Name }
917*5113495bSYour Name 
918*5113495bSYour Name qdf_export_symbol(hif_deconfigure_ext_group_interrupts);
919*5113495bSYour Name 
920*5113495bSYour Name #ifdef WLAN_SUSPEND_RESUME_TEST
921*5113495bSYour Name /**
922*5113495bSYour Name  * hif_check_and_trigger_ut_resume() - check if unit-test command was used to
923*5113495bSYour Name  *				       to trigger fake-suspend command, if yes
924*5113495bSYour Name  *				       then issue resume procedure.
925*5113495bSYour Name  * @scn: opaque HIF software context
926*5113495bSYour Name  *
927*5113495bSYour Name  * This API checks if unit-test command was used to trigger fake-suspend command
928*5113495bSYour Name  * and if answer is yes then it would trigger resume procedure.
929*5113495bSYour Name  *
930*5113495bSYour Name  * Make this API inline to save API-switch overhead and do branch-prediction to
931*5113495bSYour Name  * optimize performance impact.
932*5113495bSYour Name  *
933*5113495bSYour Name  * Return: void
934*5113495bSYour Name  */
hif_check_and_trigger_ut_resume(struct hif_softc * scn)935*5113495bSYour Name static inline void hif_check_and_trigger_ut_resume(struct hif_softc *scn)
936*5113495bSYour Name {
937*5113495bSYour Name 	if (qdf_unlikely(hif_irq_trigger_ut_resume(scn)))
938*5113495bSYour Name 		hif_ut_fw_resume(scn);
939*5113495bSYour Name }
940*5113495bSYour Name #else
hif_check_and_trigger_ut_resume(struct hif_softc * scn)941*5113495bSYour Name static inline void hif_check_and_trigger_ut_resume(struct hif_softc *scn)
942*5113495bSYour Name {
943*5113495bSYour Name }
944*5113495bSYour Name #endif
945*5113495bSYour Name 
946*5113495bSYour Name /**
947*5113495bSYour Name  * hif_check_and_trigger_sys_resume() - Check for bus suspend and
948*5113495bSYour Name  *  trigger system resume
949*5113495bSYour Name  * @scn: hif context
950*5113495bSYour Name  * @irq: irq number
951*5113495bSYour Name  *
952*5113495bSYour Name  * Return: None
953*5113495bSYour Name  */
954*5113495bSYour Name static inline void
hif_check_and_trigger_sys_resume(struct hif_softc * scn,int irq)955*5113495bSYour Name hif_check_and_trigger_sys_resume(struct hif_softc *scn, int irq)
956*5113495bSYour Name {
957*5113495bSYour Name 	if (scn->bus_suspended && scn->linkstate_vote) {
958*5113495bSYour Name 		hif_info_rl("interrupt rcvd:%d trigger sys resume", irq);
959*5113495bSYour Name 		qdf_pm_system_wakeup();
960*5113495bSYour Name 	}
961*5113495bSYour Name }
962*5113495bSYour Name 
963*5113495bSYour Name /**
964*5113495bSYour Name  * hif_ext_group_interrupt_handler() - handler for related interrupts
965*5113495bSYour Name  * @irq: irq number of the interrupt
966*5113495bSYour Name  * @context: the associated hif_exec_group context
967*5113495bSYour Name  *
968*5113495bSYour Name  * This callback function takes care of disabling the associated interrupts
969*5113495bSYour Name  * and scheduling the expected bottom half for the exec_context.
970*5113495bSYour Name  * This callback function also helps keep track of the count running contexts.
971*5113495bSYour Name  */
hif_ext_group_interrupt_handler(int irq,void * context)972*5113495bSYour Name irqreturn_t hif_ext_group_interrupt_handler(int irq, void *context)
973*5113495bSYour Name {
974*5113495bSYour Name 	struct hif_exec_context *hif_ext_group = context;
975*5113495bSYour Name 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
976*5113495bSYour Name 
977*5113495bSYour Name 	if (hif_ext_group->irq_requested) {
978*5113495bSYour Name 		hif_latency_profile_start(hif_ext_group);
979*5113495bSYour Name 
980*5113495bSYour Name 		hif_record_event(hif_ext_group->hif, hif_ext_group->grp_id,
981*5113495bSYour Name 				 0, 0, 0, HIF_EVENT_IRQ_TRIGGER);
982*5113495bSYour Name 
983*5113495bSYour Name 		hif_ext_group->irq_disable(hif_ext_group);
984*5113495bSYour Name 
985*5113495bSYour Name 		if (pld_is_one_msi(scn->qdf_dev->dev))
986*5113495bSYour Name 			hif_ext_group->irq_disabled_start_time =
987*5113495bSYour Name 							qdf_time_sched_clock();
988*5113495bSYour Name 		/*
989*5113495bSYour Name 		 * if private ioctl has issued fake suspend command to put
990*5113495bSYour Name 		 * FW in D0-WOW state then here is our chance to bring FW out
991*5113495bSYour Name 		 * of WOW mode.
992*5113495bSYour Name 		 *
993*5113495bSYour Name 		 * The reason why you need to explicitly wake-up the FW is here:
994*5113495bSYour Name 		 * APSS should have been in fully awake through-out when
995*5113495bSYour Name 		 * fake APSS suspend command was issued (to put FW in WOW mode)
996*5113495bSYour Name 		 * hence organic way of waking-up the FW
997*5113495bSYour Name 		 * (as part-of APSS-host wake-up) won't happen because
998*5113495bSYour Name 		 * in reality APSS didn't really suspend.
999*5113495bSYour Name 		 */
1000*5113495bSYour Name 		hif_check_and_trigger_ut_resume(scn);
1001*5113495bSYour Name 
1002*5113495bSYour Name 		hif_check_and_trigger_sys_resume(scn, irq);
1003*5113495bSYour Name 
1004*5113495bSYour Name 		qdf_atomic_inc(&scn->active_grp_tasklet_cnt);
1005*5113495bSYour Name 
1006*5113495bSYour Name 		hif_ext_group->sched_ops->schedule(hif_ext_group);
1007*5113495bSYour Name 	}
1008*5113495bSYour Name 
1009*5113495bSYour Name 	return IRQ_HANDLED;
1010*5113495bSYour Name }
1011*5113495bSYour Name 
1012*5113495bSYour Name /**
1013*5113495bSYour Name  * hif_exec_kill() - grp tasklet kill
1014*5113495bSYour Name  * @hif_ctx: hif_softc
1015*5113495bSYour Name  *
1016*5113495bSYour Name  * return: void
1017*5113495bSYour Name  */
hif_exec_kill(struct hif_opaque_softc * hif_ctx)1018*5113495bSYour Name void hif_exec_kill(struct hif_opaque_softc *hif_ctx)
1019*5113495bSYour Name {
1020*5113495bSYour Name 	int i;
1021*5113495bSYour Name 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
1022*5113495bSYour Name 
1023*5113495bSYour Name 	for (i = 0; i < hif_state->hif_num_extgroup; i++)
1024*5113495bSYour Name 		hif_state->hif_ext_group[i]->sched_ops->kill(
1025*5113495bSYour Name 			hif_state->hif_ext_group[i]);
1026*5113495bSYour Name 
1027*5113495bSYour Name 	qdf_atomic_set(&hif_state->ol_sc.active_grp_tasklet_cnt, 0);
1028*5113495bSYour Name }
1029*5113495bSYour Name 
1030*5113495bSYour Name #ifdef FEATURE_IRQ_AFFINITY
1031*5113495bSYour Name static inline void
hif_init_force_napi_complete(struct hif_exec_context * hif_ext_group)1032*5113495bSYour Name hif_init_force_napi_complete(struct hif_exec_context *hif_ext_group)
1033*5113495bSYour Name {
1034*5113495bSYour Name 	qdf_atomic_init(&hif_ext_group->force_napi_complete);
1035*5113495bSYour Name }
1036*5113495bSYour Name #else
1037*5113495bSYour Name static inline void
hif_init_force_napi_complete(struct hif_exec_context * hif_ext_group)1038*5113495bSYour Name hif_init_force_napi_complete(struct hif_exec_context *hif_ext_group)
1039*5113495bSYour Name {
1040*5113495bSYour Name }
1041*5113495bSYour Name #endif
1042*5113495bSYour Name 
1043*5113495bSYour Name /**
1044*5113495bSYour Name  * hif_register_ext_group() - API to register external group
1045*5113495bSYour Name  * interrupt handler.
1046*5113495bSYour Name  * @hif_ctx : HIF Context
1047*5113495bSYour Name  * @numirq: number of irq's in the group
1048*5113495bSYour Name  * @irq: array of irq values
1049*5113495bSYour Name  * @handler: callback interrupt handler function
1050*5113495bSYour Name  * @cb_ctx: context to passed in callback
1051*5113495bSYour Name  * @context_name: context name
1052*5113495bSYour Name  * @type: napi vs tasklet
1053*5113495bSYour Name  * @scale:
1054*5113495bSYour Name  *
1055*5113495bSYour Name  * Return: QDF_STATUS
1056*5113495bSYour Name  */
hif_register_ext_group(struct hif_opaque_softc * hif_ctx,uint32_t numirq,uint32_t irq[],ext_intr_handler handler,void * cb_ctx,const char * context_name,enum hif_exec_type type,uint32_t scale)1057*5113495bSYour Name QDF_STATUS hif_register_ext_group(struct hif_opaque_softc *hif_ctx,
1058*5113495bSYour Name 				  uint32_t numirq, uint32_t irq[],
1059*5113495bSYour Name 				  ext_intr_handler handler,
1060*5113495bSYour Name 				  void *cb_ctx, const char *context_name,
1061*5113495bSYour Name 				  enum hif_exec_type type, uint32_t scale)
1062*5113495bSYour Name {
1063*5113495bSYour Name 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1064*5113495bSYour Name 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1065*5113495bSYour Name 	struct hif_exec_context *hif_ext_group;
1066*5113495bSYour Name 
1067*5113495bSYour Name 	if (scn->ext_grp_irq_configured) {
1068*5113495bSYour Name 		hif_err("Called after ext grp irq configured");
1069*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
1070*5113495bSYour Name 	}
1071*5113495bSYour Name 
1072*5113495bSYour Name 	if (hif_state->hif_num_extgroup >= HIF_MAX_GROUP) {
1073*5113495bSYour Name 		hif_err("Max groups: %d reached", hif_state->hif_num_extgroup);
1074*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
1075*5113495bSYour Name 	}
1076*5113495bSYour Name 
1077*5113495bSYour Name 	if (numirq >= HIF_MAX_GRP_IRQ) {
1078*5113495bSYour Name 		hif_err("Invalid numirq: %d", numirq);
1079*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
1080*5113495bSYour Name 	}
1081*5113495bSYour Name 
1082*5113495bSYour Name 	hif_ext_group = hif_exec_create(type, scale);
1083*5113495bSYour Name 	if (!hif_ext_group)
1084*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
1085*5113495bSYour Name 
1086*5113495bSYour Name 	hif_state->hif_ext_group[hif_state->hif_num_extgroup] =
1087*5113495bSYour Name 		hif_ext_group;
1088*5113495bSYour Name 
1089*5113495bSYour Name 	hif_ext_group->numirq = numirq;
1090*5113495bSYour Name 	qdf_mem_copy(&hif_ext_group->irq[0], irq, numirq * sizeof(irq[0]));
1091*5113495bSYour Name 	hif_ext_group->context = cb_ctx;
1092*5113495bSYour Name 	hif_ext_group->handler = handler;
1093*5113495bSYour Name 	hif_ext_group->configured = true;
1094*5113495bSYour Name 	hif_ext_group->grp_id = hif_state->hif_num_extgroup;
1095*5113495bSYour Name 	hif_ext_group->hif = hif_ctx;
1096*5113495bSYour Name 	hif_ext_group->context_name = context_name;
1097*5113495bSYour Name 	hif_ext_group->type = type;
1098*5113495bSYour Name 	hif_init_force_napi_complete(hif_ext_group);
1099*5113495bSYour Name 
1100*5113495bSYour Name 	hif_state->hif_num_extgroup++;
1101*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
1102*5113495bSYour Name }
1103*5113495bSYour Name qdf_export_symbol(hif_register_ext_group);
1104*5113495bSYour Name 
1105*5113495bSYour Name /**
1106*5113495bSYour Name  * hif_exec_create() - create an execution context
1107*5113495bSYour Name  * @type: the type of execution context to create
1108*5113495bSYour Name  * @scale:
1109*5113495bSYour Name  */
hif_exec_create(enum hif_exec_type type,uint32_t scale)1110*5113495bSYour Name struct hif_exec_context *hif_exec_create(enum hif_exec_type type,
1111*5113495bSYour Name 						uint32_t scale)
1112*5113495bSYour Name {
1113*5113495bSYour Name 	hif_debug("%s: create exec_type %d budget %d",
1114*5113495bSYour Name 		  __func__, type, QCA_NAPI_BUDGET * scale);
1115*5113495bSYour Name 
1116*5113495bSYour Name 	switch (type) {
1117*5113495bSYour Name 	case HIF_EXEC_NAPI_TYPE:
1118*5113495bSYour Name 		return hif_exec_napi_create(scale);
1119*5113495bSYour Name 
1120*5113495bSYour Name 	case HIF_EXEC_TASKLET_TYPE:
1121*5113495bSYour Name 		return hif_exec_tasklet_create();
1122*5113495bSYour Name 	default:
1123*5113495bSYour Name 		return NULL;
1124*5113495bSYour Name 	}
1125*5113495bSYour Name }
1126*5113495bSYour Name 
1127*5113495bSYour Name /**
1128*5113495bSYour Name  * hif_exec_destroy() - free the hif_exec context
1129*5113495bSYour Name  * @ctx: context to free
1130*5113495bSYour Name  *
1131*5113495bSYour Name  * please kill the context before freeing it to avoid a use after free.
1132*5113495bSYour Name  */
hif_exec_destroy(struct hif_exec_context * ctx)1133*5113495bSYour Name void hif_exec_destroy(struct hif_exec_context *ctx)
1134*5113495bSYour Name {
1135*5113495bSYour Name 	struct hif_softc *scn = HIF_GET_SOFTC(ctx->hif);
1136*5113495bSYour Name 
1137*5113495bSYour Name 	if (scn->ext_grp_irq_configured)
1138*5113495bSYour Name 		qdf_spinlock_destroy(&ctx->irq_lock);
1139*5113495bSYour Name 	qdf_mem_free(ctx);
1140*5113495bSYour Name }
1141*5113495bSYour Name 
1142*5113495bSYour Name /**
1143*5113495bSYour Name  * hif_deregister_exec_group() - API to free the exec contexts
1144*5113495bSYour Name  * @hif_ctx: HIF context
1145*5113495bSYour Name  * @context_name: name of the module whose contexts need to be deregistered
1146*5113495bSYour Name  *
1147*5113495bSYour Name  * This function deregisters the contexts of the requestor identified
1148*5113495bSYour Name  * based on the context_name & frees the memory.
1149*5113495bSYour Name  *
1150*5113495bSYour Name  * Return: void
1151*5113495bSYour Name  */
hif_deregister_exec_group(struct hif_opaque_softc * hif_ctx,const char * context_name)1152*5113495bSYour Name void hif_deregister_exec_group(struct hif_opaque_softc *hif_ctx,
1153*5113495bSYour Name 				const char *context_name)
1154*5113495bSYour Name {
1155*5113495bSYour Name 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1156*5113495bSYour Name 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1157*5113495bSYour Name 	struct hif_exec_context *hif_ext_group;
1158*5113495bSYour Name 	int i;
1159*5113495bSYour Name 
1160*5113495bSYour Name 	for (i = 0; i < HIF_MAX_GROUP; i++) {
1161*5113495bSYour Name 		hif_ext_group = hif_state->hif_ext_group[i];
1162*5113495bSYour Name 
1163*5113495bSYour Name 		if (!hif_ext_group)
1164*5113495bSYour Name 			continue;
1165*5113495bSYour Name 
1166*5113495bSYour Name 		hif_debug("%s: Deregistering grp id %d name %s",
1167*5113495bSYour Name 			  __func__,
1168*5113495bSYour Name 			  hif_ext_group->grp_id,
1169*5113495bSYour Name 			  hif_ext_group->context_name);
1170*5113495bSYour Name 
1171*5113495bSYour Name 		if (strcmp(hif_ext_group->context_name, context_name) == 0) {
1172*5113495bSYour Name 			hif_ext_group->sched_ops->kill(hif_ext_group);
1173*5113495bSYour Name 			hif_state->hif_ext_group[i] = NULL;
1174*5113495bSYour Name 			hif_exec_destroy(hif_ext_group);
1175*5113495bSYour Name 			hif_state->hif_num_extgroup--;
1176*5113495bSYour Name 		}
1177*5113495bSYour Name 
1178*5113495bSYour Name 	}
1179*5113495bSYour Name }
1180*5113495bSYour Name qdf_export_symbol(hif_deregister_exec_group);
1181*5113495bSYour Name 
1182*5113495bSYour Name #ifdef DP_UMAC_HW_RESET_SUPPORT
1183*5113495bSYour Name /**
1184*5113495bSYour Name  * hif_umac_reset_handler_tasklet() - Tasklet for UMAC HW reset interrupt
1185*5113495bSYour Name  * @data: UMAC HW reset HIF context
1186*5113495bSYour Name  *
1187*5113495bSYour Name  * return: void
1188*5113495bSYour Name  */
hif_umac_reset_handler_tasklet(unsigned long data)1189*5113495bSYour Name static void hif_umac_reset_handler_tasklet(unsigned long data)
1190*5113495bSYour Name {
1191*5113495bSYour Name 	struct hif_umac_reset_ctx *umac_reset_ctx =
1192*5113495bSYour Name 		(struct hif_umac_reset_ctx *)data;
1193*5113495bSYour Name 
1194*5113495bSYour Name 	/* call the callback handler */
1195*5113495bSYour Name 	umac_reset_ctx->cb_handler(umac_reset_ctx->cb_ctx);
1196*5113495bSYour Name }
1197*5113495bSYour Name 
1198*5113495bSYour Name /**
1199*5113495bSYour Name  * hif_umac_reset_irq_handler() - Interrupt service routine of UMAC HW reset
1200*5113495bSYour Name  * @irq: irq coming from kernel
1201*5113495bSYour Name  * @ctx: UMAC HW reset HIF context
1202*5113495bSYour Name  *
1203*5113495bSYour Name  * return: IRQ_HANDLED if success, else IRQ_NONE
1204*5113495bSYour Name  */
hif_umac_reset_irq_handler(int irq,void * ctx)1205*5113495bSYour Name static irqreturn_t hif_umac_reset_irq_handler(int irq, void *ctx)
1206*5113495bSYour Name {
1207*5113495bSYour Name 	struct hif_umac_reset_ctx *umac_reset_ctx = ctx;
1208*5113495bSYour Name 
1209*5113495bSYour Name 	/* Schedule the tasklet if it is umac reset interrupt and exit */
1210*5113495bSYour Name 	if (umac_reset_ctx->irq_handler(umac_reset_ctx->cb_ctx))
1211*5113495bSYour Name 		tasklet_hi_schedule(&umac_reset_ctx->intr_tq);
1212*5113495bSYour Name 
1213*5113495bSYour Name 	return IRQ_HANDLED;
1214*5113495bSYour Name }
1215*5113495bSYour Name 
hif_get_umac_reset_irq(struct hif_opaque_softc * hif_scn,int * umac_reset_irq)1216*5113495bSYour Name QDF_STATUS hif_get_umac_reset_irq(struct hif_opaque_softc *hif_scn,
1217*5113495bSYour Name 				  int *umac_reset_irq)
1218*5113495bSYour Name {
1219*5113495bSYour Name 	int ret;
1220*5113495bSYour Name 	struct hif_softc *hif_sc = HIF_GET_SOFTC(hif_scn);
1221*5113495bSYour Name 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_sc);
1222*5113495bSYour Name 	struct platform_device *pdev = (struct platform_device *)sc->pdev;
1223*5113495bSYour Name 
1224*5113495bSYour Name 	ret = pfrm_get_irq(&pdev->dev, (struct qdf_pfm_hndl *)pdev,
1225*5113495bSYour Name 			   "umac_reset", 0, umac_reset_irq);
1226*5113495bSYour Name 
1227*5113495bSYour Name 	if (ret) {
1228*5113495bSYour Name 		hif_err("umac reset get irq failed ret %d", ret);
1229*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
1230*5113495bSYour Name 	}
1231*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
1232*5113495bSYour Name }
1233*5113495bSYour Name 
1234*5113495bSYour Name qdf_export_symbol(hif_get_umac_reset_irq);
1235*5113495bSYour Name 
hif_register_umac_reset_handler(struct hif_opaque_softc * hif_scn,bool (* irq_handler)(void * cb_ctx),int (* tl_handler)(void * cb_ctx),void * cb_ctx,int irq)1236*5113495bSYour Name QDF_STATUS hif_register_umac_reset_handler(struct hif_opaque_softc *hif_scn,
1237*5113495bSYour Name 					   bool (*irq_handler)(void *cb_ctx),
1238*5113495bSYour Name 					   int (*tl_handler)(void *cb_ctx),
1239*5113495bSYour Name 					   void *cb_ctx, int irq)
1240*5113495bSYour Name {
1241*5113495bSYour Name 	struct hif_softc *hif_sc = HIF_GET_SOFTC(hif_scn);
1242*5113495bSYour Name 	struct hif_umac_reset_ctx *umac_reset_ctx;
1243*5113495bSYour Name 	int ret;
1244*5113495bSYour Name 
1245*5113495bSYour Name 	if (!hif_sc) {
1246*5113495bSYour Name 		hif_err("scn is null");
1247*5113495bSYour Name 		return QDF_STATUS_E_NULL_VALUE;
1248*5113495bSYour Name 	}
1249*5113495bSYour Name 
1250*5113495bSYour Name 	umac_reset_ctx = &hif_sc->umac_reset_ctx;
1251*5113495bSYour Name 
1252*5113495bSYour Name 	umac_reset_ctx->irq_handler = irq_handler;
1253*5113495bSYour Name 	umac_reset_ctx->cb_handler = tl_handler;
1254*5113495bSYour Name 	umac_reset_ctx->cb_ctx = cb_ctx;
1255*5113495bSYour Name 	umac_reset_ctx->os_irq = irq;
1256*5113495bSYour Name 
1257*5113495bSYour Name 	/* Init the tasklet */
1258*5113495bSYour Name 	tasklet_init(&umac_reset_ctx->intr_tq,
1259*5113495bSYour Name 		     hif_umac_reset_handler_tasklet,
1260*5113495bSYour Name 		     (unsigned long)umac_reset_ctx);
1261*5113495bSYour Name 
1262*5113495bSYour Name 	/* Register the interrupt handler */
1263*5113495bSYour Name 	ret  = pfrm_request_irq(hif_sc->qdf_dev->dev, irq,
1264*5113495bSYour Name 				hif_umac_reset_irq_handler,
1265*5113495bSYour Name 				IRQF_NO_SUSPEND,
1266*5113495bSYour Name 				"umac_hw_reset_irq",
1267*5113495bSYour Name 				umac_reset_ctx);
1268*5113495bSYour Name 	if (ret) {
1269*5113495bSYour Name 		hif_err("request_irq failed: %d", ret);
1270*5113495bSYour Name 		return qdf_status_from_os_return(ret);
1271*5113495bSYour Name 	}
1272*5113495bSYour Name 
1273*5113495bSYour Name 	umac_reset_ctx->irq_configured = true;
1274*5113495bSYour Name 
1275*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
1276*5113495bSYour Name }
1277*5113495bSYour Name 
1278*5113495bSYour Name qdf_export_symbol(hif_register_umac_reset_handler);
1279*5113495bSYour Name 
hif_unregister_umac_reset_handler(struct hif_opaque_softc * hif_scn)1280*5113495bSYour Name QDF_STATUS hif_unregister_umac_reset_handler(struct hif_opaque_softc *hif_scn)
1281*5113495bSYour Name {
1282*5113495bSYour Name 	struct hif_softc *hif_sc = HIF_GET_SOFTC(hif_scn);
1283*5113495bSYour Name 	struct hif_umac_reset_ctx *umac_reset_ctx;
1284*5113495bSYour Name 	int ret;
1285*5113495bSYour Name 
1286*5113495bSYour Name 	if (!hif_sc) {
1287*5113495bSYour Name 		hif_err("scn is null");
1288*5113495bSYour Name 		return QDF_STATUS_E_NULL_VALUE;
1289*5113495bSYour Name 	}
1290*5113495bSYour Name 
1291*5113495bSYour Name 	umac_reset_ctx = &hif_sc->umac_reset_ctx;
1292*5113495bSYour Name 	if (!umac_reset_ctx->irq_configured) {
1293*5113495bSYour Name 		hif_err("unregister called without a prior IRQ configuration");
1294*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
1295*5113495bSYour Name 	}
1296*5113495bSYour Name 
1297*5113495bSYour Name 	ret  = pfrm_free_irq(hif_sc->qdf_dev->dev,
1298*5113495bSYour Name 			     umac_reset_ctx->os_irq,
1299*5113495bSYour Name 			     umac_reset_ctx);
1300*5113495bSYour Name 	if (ret) {
1301*5113495bSYour Name 		hif_err("free_irq failed: %d", ret);
1302*5113495bSYour Name 		return qdf_status_from_os_return(ret);
1303*5113495bSYour Name 	}
1304*5113495bSYour Name 	umac_reset_ctx->irq_configured = false;
1305*5113495bSYour Name 
1306*5113495bSYour Name 	tasklet_disable(&umac_reset_ctx->intr_tq);
1307*5113495bSYour Name 	tasklet_kill(&umac_reset_ctx->intr_tq);
1308*5113495bSYour Name 
1309*5113495bSYour Name 	umac_reset_ctx->cb_handler = NULL;
1310*5113495bSYour Name 	umac_reset_ctx->cb_ctx = NULL;
1311*5113495bSYour Name 
1312*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
1313*5113495bSYour Name }
1314*5113495bSYour Name 
1315*5113495bSYour Name qdf_export_symbol(hif_unregister_umac_reset_handler);
1316*5113495bSYour Name #endif
1317