xref: /wlan-driver/qca-wifi-host-cmn/hif/src/hif_exec.c (revision 5113495b16420b49004c444715d2daae2066e7dc)
1 /*
2  * Copyright (c) 2017-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <hif_exec.h>
21 #include <ce_main.h>
22 #include "qdf_module.h"
23 #include "qdf_net_if.h"
24 #include <pld_common.h>
25 #ifdef DP_UMAC_HW_RESET_SUPPORT
26 #include "if_pci.h"
27 #endif
28 #include "qdf_ssr_driver_dump.h"
29 
30 /* mapping NAPI budget 0 to internal budget 0
31  * NAPI budget 1 to internal budget [1,scaler -1]
32  * NAPI budget 2 to internal budget [scaler, 2 * scaler - 1], etc
33  */
34 #define NAPI_BUDGET_TO_INTERNAL_BUDGET(n, s) \
35 	(((n) << (s)) - 1)
36 #define INTERNAL_BUDGET_TO_NAPI_BUDGET(n, s) \
37 	(((n) + 1) >> (s))
38 
39 static struct hif_exec_context *hif_exec_tasklet_create(void);
40 
41 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
42 struct hif_event_history hif_event_desc_history[HIF_NUM_INT_CONTEXTS];
43 uint32_t hif_event_hist_max = HIF_EVENT_HIST_MAX;
44 
hif_desc_history_log_register(void)45 void hif_desc_history_log_register(void)
46 {
47 	qdf_ssr_driver_dump_register_region("hif_event_history",
48 					    hif_event_desc_history,
49 					    sizeof(hif_event_desc_history));
50 	qdf_ssr_driver_dump_register_region("hif_event_hist_max",
51 					    &hif_event_hist_max,
52 					    sizeof(hif_event_hist_max));
53 }
54 
hif_desc_history_log_unregister(void)55 void hif_desc_history_log_unregister(void)
56 {
57 	qdf_ssr_driver_dump_unregister_region("hif_event_hist_max");
58 	qdf_ssr_driver_dump_unregister_region("hif_event_history");
59 }
60 
61 static inline
hif_get_next_record_index(qdf_atomic_t * table_index,int array_size)62 int hif_get_next_record_index(qdf_atomic_t *table_index,
63 			      int array_size)
64 {
65 	int record_index = qdf_atomic_inc_return(table_index);
66 
67 	return record_index & (array_size - 1);
68 }
69 
70 /**
71  * hif_hist_is_prev_record() - Check if index is the immediate
72  *  previous record wrt curr_index
73  * @curr_index: curr index in the event history
74  * @index: index to be checked
75  * @hist_size: history size
76  *
77  * Return: true if index is immediately behind curr_index else false
78  */
79 static inline
hif_hist_is_prev_record(int32_t curr_index,int32_t index,uint32_t hist_size)80 bool hif_hist_is_prev_record(int32_t curr_index, int32_t index,
81 			     uint32_t hist_size)
82 {
83 	return (((index + 1) & (hist_size - 1)) == curr_index) ?
84 			true : false;
85 }
86 
87 /**
88  * hif_hist_skip_event_record() - Check if current event needs to be
89  *  recorded or not
90  * @hist_ev: HIF event history
91  * @event: DP event entry
92  *
93  * Return: true if current event needs to be skipped else false
94  */
95 static bool
hif_hist_skip_event_record(struct hif_event_history * hist_ev,struct hif_event_record * event)96 hif_hist_skip_event_record(struct hif_event_history *hist_ev,
97 			   struct hif_event_record *event)
98 {
99 	struct hif_event_record *rec;
100 	struct hif_event_record *last_irq_rec;
101 	int32_t index;
102 
103 	index = qdf_atomic_read(&hist_ev->index);
104 	if (index < 0)
105 		return false;
106 
107 	index &= (HIF_EVENT_HIST_MAX - 1);
108 	rec = &hist_ev->event[index];
109 
110 	switch (event->type) {
111 	case HIF_EVENT_IRQ_TRIGGER:
112 		/*
113 		 * The prev record check is to prevent skipping the IRQ event
114 		 * record in case where BH got re-scheduled due to force_break
115 		 * but there are no entries to be reaped in the rings.
116 		 */
117 		if (rec->type == HIF_EVENT_BH_SCHED &&
118 		    hif_hist_is_prev_record(index,
119 					    hist_ev->misc.last_irq_index,
120 					    HIF_EVENT_HIST_MAX)) {
121 			last_irq_rec =
122 				&hist_ev->event[hist_ev->misc.last_irq_index];
123 			last_irq_rec->timestamp = hif_get_log_timestamp();
124 			last_irq_rec->cpu_id = qdf_get_cpu();
125 			last_irq_rec->hp++;
126 			last_irq_rec->tp = last_irq_rec->timestamp -
127 						hist_ev->misc.last_irq_ts;
128 			return true;
129 		}
130 		break;
131 	case HIF_EVENT_BH_SCHED:
132 		if (rec->type == HIF_EVENT_BH_SCHED) {
133 			rec->timestamp = hif_get_log_timestamp();
134 			rec->cpu_id = qdf_get_cpu();
135 			return true;
136 		}
137 		break;
138 	case HIF_EVENT_SRNG_ACCESS_START:
139 		if (event->hp == event->tp)
140 			return true;
141 		break;
142 	case HIF_EVENT_SRNG_ACCESS_END:
143 		if (rec->type != HIF_EVENT_SRNG_ACCESS_START)
144 			return true;
145 		break;
146 	case HIF_EVENT_BH_COMPLETE:
147 	case HIF_EVENT_BH_FORCE_BREAK:
148 		if (rec->type != HIF_EVENT_SRNG_ACCESS_END)
149 			return true;
150 		break;
151 	default:
152 		break;
153 	}
154 
155 	return false;
156 }
157 
hif_hist_record_event(struct hif_opaque_softc * hif_ctx,struct hif_event_record * event,uint8_t intr_grp_id)158 void hif_hist_record_event(struct hif_opaque_softc *hif_ctx,
159 			   struct hif_event_record *event, uint8_t intr_grp_id)
160 {
161 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
162 	struct hif_event_history *hist_ev;
163 	struct hif_event_record *record;
164 	int record_index;
165 
166 	if (!(scn->event_enable_mask & BIT(event->type)))
167 		return;
168 
169 	if (qdf_unlikely(intr_grp_id >= HIF_NUM_INT_CONTEXTS)) {
170 		hif_err("Invalid interrupt group id %d", intr_grp_id);
171 		return;
172 	}
173 
174 	hist_ev = scn->evt_hist[intr_grp_id];
175 	if (qdf_unlikely(!hist_ev))
176 		return;
177 
178 	if (hif_hist_skip_event_record(hist_ev, event))
179 		return;
180 
181 	record_index = hif_get_next_record_index(
182 			&hist_ev->index, HIF_EVENT_HIST_MAX);
183 
184 	record = &hist_ev->event[record_index];
185 
186 	if (event->type == HIF_EVENT_IRQ_TRIGGER) {
187 		hist_ev->misc.last_irq_index = record_index;
188 		hist_ev->misc.last_irq_ts = hif_get_log_timestamp();
189 	}
190 
191 	record->hal_ring_id = event->hal_ring_id;
192 	record->hp = event->hp;
193 	record->tp = event->tp;
194 	record->cpu_id = qdf_get_cpu();
195 	record->timestamp = hif_get_log_timestamp();
196 	record->type = event->type;
197 }
198 
hif_event_history_init(struct hif_opaque_softc * hif_ctx,uint8_t id)199 void hif_event_history_init(struct hif_opaque_softc *hif_ctx, uint8_t id)
200 {
201 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
202 
203 	scn->evt_hist[id] = &hif_event_desc_history[id];
204 	qdf_atomic_set(&scn->evt_hist[id]->index, -1);
205 
206 	hif_info("SRNG events history initialized for group: %d", id);
207 }
208 
hif_event_history_deinit(struct hif_opaque_softc * hif_ctx,uint8_t id)209 void hif_event_history_deinit(struct hif_opaque_softc *hif_ctx, uint8_t id)
210 {
211 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
212 
213 	scn->evt_hist[id] = NULL;
214 	hif_info("SRNG events history de-initialized for group: %d", id);
215 }
216 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
217 
218 #ifndef QCA_WIFI_WCN6450
219 /**
220  * hif_print_napi_latency_stats() - print NAPI scheduling latency stats
221  * @hif_state: hif context
222  *
223  * return: void
224  */
225 #ifdef HIF_LATENCY_PROFILE_ENABLE
hif_print_napi_latency_stats(struct HIF_CE_state * hif_state)226 static void hif_print_napi_latency_stats(struct HIF_CE_state *hif_state)
227 {
228 	struct hif_exec_context *hif_ext_group;
229 	int i, j;
230 	int64_t cur_tstamp;
231 
232 	const char time_str[HIF_SCHED_LATENCY_BUCKETS][15] =  {
233 		"0-2   ms",
234 		"3-10  ms",
235 		"11-20 ms",
236 		"21-50 ms",
237 		"51-100 ms",
238 		"101-250 ms",
239 		"251-500 ms",
240 		"> 500 ms"
241 	};
242 
243 	cur_tstamp = qdf_ktime_to_ms(qdf_ktime_get());
244 
245 	QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_INFO_HIGH,
246 		  "Current timestamp: %lld", cur_tstamp);
247 
248 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
249 		if (hif_state->hif_ext_group[i]) {
250 			hif_ext_group = hif_state->hif_ext_group[i];
251 
252 			QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_INFO_HIGH,
253 				  "ext grp %d Last serviced timestamp: %lld",
254 				  i, hif_ext_group->tstamp);
255 
256 			QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_INFO_HIGH,
257 				  "Latency Bucket     | Time elapsed");
258 
259 			for (j = 0; j < HIF_SCHED_LATENCY_BUCKETS; j++) {
260 				if (hif_ext_group->sched_latency_stats[j])
261 					QDF_TRACE(QDF_MODULE_ID_HIF,
262 						  QDF_TRACE_LEVEL_INFO_HIGH,
263 						  "%s     |    %lld",
264 						  time_str[j],
265 						  hif_ext_group->
266 						  sched_latency_stats[j]);
267 			}
268 		}
269 	}
270 }
271 #else
hif_print_napi_latency_stats(struct HIF_CE_state * hif_state)272 static void hif_print_napi_latency_stats(struct HIF_CE_state *hif_state)
273 {
274 }
275 #endif
276 
277 /**
278  * hif_clear_napi_stats() - reset NAPI stats
279  * @hif_ctx: hif context
280  *
281  * return: void
282  */
hif_clear_napi_stats(struct hif_opaque_softc * hif_ctx)283 void hif_clear_napi_stats(struct hif_opaque_softc *hif_ctx)
284 {
285 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
286 	struct hif_exec_context *hif_ext_group;
287 	size_t i;
288 
289 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
290 		hif_ext_group = hif_state->hif_ext_group[i];
291 
292 		if (!hif_ext_group)
293 			return;
294 
295 		qdf_mem_set(hif_ext_group->sched_latency_stats,
296 			    sizeof(hif_ext_group->sched_latency_stats),
297 			    0x0);
298 	}
299 }
300 
301 qdf_export_symbol(hif_clear_napi_stats);
302 
303 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
304 /**
305  * hif_get_poll_times_hist_str() - Get HIF poll times histogram string
306  * @stats: NAPI stats to get poll time buckets
307  * @buf: buffer to fill histogram string
308  * @buf_len: length of the buffer
309  *
310  * Return: void
311  */
hif_get_poll_times_hist_str(struct qca_napi_stat * stats,char * buf,uint8_t buf_len)312 static void hif_get_poll_times_hist_str(struct qca_napi_stat *stats, char *buf,
313 					uint8_t buf_len)
314 {
315 	int i;
316 	int str_index = 0;
317 
318 	for (i = 0; i < QCA_NAPI_NUM_BUCKETS; i++)
319 		str_index += qdf_scnprintf(buf + str_index, buf_len - str_index,
320 					   "%u|", stats->poll_time_buckets[i]);
321 }
322 
hif_print_napi_stats(struct hif_opaque_softc * hif_ctx)323 void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx)
324 {
325 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
326 	struct hif_exec_context *hif_ext_group;
327 	struct qca_napi_stat *napi_stats;
328 	int i, j;
329 
330 	/*
331 	 * Max value of uint_32 (poll_time_bucket) = 4294967295
332 	 * Thus we need 10 chars + 1 space =11 chars for each bucket value.
333 	 * +1 space for '\0'.
334 	 */
335 	char hist_str[(QCA_NAPI_NUM_BUCKETS * 11) + 1] = {'\0'};
336 
337 	QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_INFO_HIGH,
338 		  "NAPI[#]CPU[#] |scheds |polls  |comps  |dones  |t-lim  |max(us)|hist(500us buckets)");
339 
340 	for (i = 0;
341 	     (i < hif_state->hif_num_extgroup && hif_state->hif_ext_group[i]);
342 	     i++) {
343 		hif_ext_group = hif_state->hif_ext_group[i];
344 		for (j = 0; j < num_possible_cpus(); j++) {
345 			napi_stats = &hif_ext_group->stats[j];
346 			if (!napi_stats->napi_schedules)
347 				continue;
348 
349 			hif_get_poll_times_hist_str(napi_stats,
350 						    hist_str,
351 						    sizeof(hist_str));
352 			QDF_TRACE(QDF_MODULE_ID_HIF,
353 				  QDF_TRACE_LEVEL_INFO_HIGH,
354 				  "NAPI[%d]CPU[%d]: %7u %7u %7u %7u %7u %7llu %s",
355 				  i, j,
356 				  napi_stats->napi_schedules,
357 				  napi_stats->napi_polls,
358 				  napi_stats->napi_completes,
359 				  napi_stats->napi_workdone,
360 				  napi_stats->time_limit_reached,
361 				  qdf_do_div(napi_stats->napi_max_poll_time,
362 					     1000),
363 				  hist_str);
364 		}
365 	}
366 
367 	hif_print_napi_latency_stats(hif_state);
368 }
369 
370 qdf_export_symbol(hif_print_napi_stats);
371 #else
372 static inline
hif_get_poll_times_hist_str(struct qca_napi_stat * stats,char * buf,uint8_t buf_len)373 void hif_get_poll_times_hist_str(struct qca_napi_stat *stats, char *buf,
374 				 uint8_t buf_len)
375 {
376 }
377 
hif_print_napi_stats(struct hif_opaque_softc * hif_ctx)378 void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx)
379 {
380 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
381 	struct hif_exec_context *hif_ext_group;
382 	struct qca_napi_stat *napi_stats;
383 	int i, j;
384 
385 	QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL,
386 		"NAPI[#ctx]CPU[#] |schedules |polls |completes |workdone");
387 
388 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
389 		if (hif_state->hif_ext_group[i]) {
390 			hif_ext_group = hif_state->hif_ext_group[i];
391 			for (j = 0; j < num_possible_cpus(); j++) {
392 				napi_stats = &(hif_ext_group->stats[j]);
393 				if (napi_stats->napi_schedules != 0)
394 					QDF_TRACE(QDF_MODULE_ID_HIF,
395 						QDF_TRACE_LEVEL_FATAL,
396 						"NAPI[%2d]CPU[%d]: "
397 						"%7d %7d %7d %7d ",
398 						i, j,
399 						napi_stats->napi_schedules,
400 						napi_stats->napi_polls,
401 						napi_stats->napi_completes,
402 						napi_stats->napi_workdone);
403 			}
404 		}
405 	}
406 
407 	hif_print_napi_latency_stats(hif_state);
408 }
409 qdf_export_symbol(hif_print_napi_stats);
410 #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */
411 #endif /* QCA_WIFI_WCN6450 */
412 
413 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
414 /**
415  * hif_exec_fill_poll_time_histogram() - fills poll time histogram for a NAPI
416  * @hif_ext_group: hif_ext_group of type NAPI
417  *
418  * The function is called at the end of a NAPI poll to calculate poll time
419  * buckets.
420  *
421  * Return: void
422  */
423 static
hif_exec_fill_poll_time_histogram(struct hif_exec_context * hif_ext_group)424 void hif_exec_fill_poll_time_histogram(struct hif_exec_context *hif_ext_group)
425 {
426 	struct qca_napi_stat *napi_stat;
427 	unsigned long long poll_time_ns;
428 	uint32_t poll_time_us;
429 	uint32_t bucket_size_us = 500;
430 	uint32_t bucket;
431 	uint32_t cpu_id = qdf_get_cpu();
432 
433 	poll_time_ns = qdf_time_sched_clock() - hif_ext_group->poll_start_time;
434 	poll_time_us = qdf_do_div(poll_time_ns, 1000);
435 
436 	napi_stat = &hif_ext_group->stats[cpu_id];
437 	if (poll_time_ns > hif_ext_group->stats[cpu_id].napi_max_poll_time)
438 		hif_ext_group->stats[cpu_id].napi_max_poll_time = poll_time_ns;
439 
440 	bucket = poll_time_us / bucket_size_us;
441 	if (bucket >= QCA_NAPI_NUM_BUCKETS)
442 		bucket = QCA_NAPI_NUM_BUCKETS - 1;
443 	++napi_stat->poll_time_buckets[bucket];
444 }
445 
446 /**
447  * hif_exec_poll_should_yield() - Local function deciding if NAPI should yield
448  * @hif_ext_group: hif_ext_group of type NAPI
449  *
450  * Return: true if NAPI needs to yield, else false
451  */
hif_exec_poll_should_yield(struct hif_exec_context * hif_ext_group)452 static bool hif_exec_poll_should_yield(struct hif_exec_context *hif_ext_group)
453 {
454 	bool time_limit_reached = false;
455 	unsigned long long poll_time_ns;
456 	int cpu_id = qdf_get_cpu();
457 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
458 	struct hif_config_info *cfg = &scn->hif_config;
459 
460 	poll_time_ns = qdf_time_sched_clock() - hif_ext_group->poll_start_time;
461 	time_limit_reached =
462 		poll_time_ns > cfg->rx_softirq_max_yield_duration_ns ? 1 : 0;
463 
464 	if (time_limit_reached) {
465 		hif_ext_group->stats[cpu_id].time_limit_reached++;
466 		hif_ext_group->force_break = true;
467 	}
468 
469 	return time_limit_reached;
470 }
471 
hif_exec_should_yield(struct hif_opaque_softc * hif_ctx,uint grp_id)472 bool hif_exec_should_yield(struct hif_opaque_softc *hif_ctx, uint grp_id)
473 {
474 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
475 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
476 	struct hif_exec_context *hif_ext_group;
477 	bool ret_val = false;
478 
479 	if (!(grp_id < hif_state->hif_num_extgroup) ||
480 	    !(grp_id < HIF_MAX_GROUP))
481 		return false;
482 
483 	hif_ext_group = hif_state->hif_ext_group[grp_id];
484 
485 	if (hif_ext_group->type == HIF_EXEC_NAPI_TYPE)
486 		ret_val = hif_exec_poll_should_yield(hif_ext_group);
487 
488 	return ret_val;
489 }
490 
491 /**
492  * hif_exec_update_service_start_time() - Update NAPI poll start time
493  * @hif_ext_group: hif_ext_group of type NAPI
494  *
495  * The function is called at the beginning of a NAPI poll to record the poll
496  * start time.
497  *
498  * Return: None
499  */
500 static inline
hif_exec_update_service_start_time(struct hif_exec_context * hif_ext_group)501 void hif_exec_update_service_start_time(struct hif_exec_context *hif_ext_group)
502 {
503 	hif_ext_group->poll_start_time = qdf_time_sched_clock();
504 }
505 
506 #else
507 static inline
hif_exec_update_service_start_time(struct hif_exec_context * hif_ext_group)508 void hif_exec_update_service_start_time(struct hif_exec_context *hif_ext_group)
509 {
510 }
511 
512 static inline
hif_exec_fill_poll_time_histogram(struct hif_exec_context * hif_ext_group)513 void hif_exec_fill_poll_time_histogram(struct hif_exec_context *hif_ext_group)
514 {
515 }
516 #endif /* WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT */
517 
hif_exec_tasklet_schedule(struct hif_exec_context * ctx)518 static void hif_exec_tasklet_schedule(struct hif_exec_context *ctx)
519 {
520 	struct hif_tasklet_exec_context *t_ctx = hif_exec_get_tasklet(ctx);
521 
522 	tasklet_schedule(&t_ctx->tasklet);
523 }
524 
525 /**
526  * hif_exec_tasklet_fn() - grp tasklet
527  * @data: context
528  *
529  * Return: void
530  */
hif_exec_tasklet_fn(unsigned long data)531 static void hif_exec_tasklet_fn(unsigned long data)
532 {
533 	struct hif_exec_context *hif_ext_group =
534 			(struct hif_exec_context *)data;
535 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
536 	unsigned int work_done;
537 	int cpu = smp_processor_id();
538 
539 	work_done =
540 		hif_ext_group->handler(hif_ext_group->context, HIF_MAX_BUDGET,
541 				       cpu);
542 
543 	if (hif_ext_group->work_complete(hif_ext_group, work_done)) {
544 		qdf_atomic_dec(&(scn->active_grp_tasklet_cnt));
545 		hif_ext_group->irq_enable(hif_ext_group);
546 	} else {
547 		hif_exec_tasklet_schedule(hif_ext_group);
548 	}
549 }
550 
551 /**
552  * hif_latency_profile_measure() - calculate latency and update histogram
553  * @hif_ext_group: hif exec context
554  *
555  * Return: None
556  */
557 #ifdef HIF_LATENCY_PROFILE_ENABLE
hif_latency_profile_measure(struct hif_exec_context * hif_ext_group)558 static void hif_latency_profile_measure(struct hif_exec_context *hif_ext_group)
559 {
560 	int64_t cur_tstamp;
561 	int64_t time_elapsed;
562 
563 	cur_tstamp = qdf_ktime_to_ms(qdf_ktime_get());
564 
565 	if (cur_tstamp > hif_ext_group->tstamp)
566 		time_elapsed = (cur_tstamp - hif_ext_group->tstamp);
567 	else
568 		time_elapsed = ~0x0 - (hif_ext_group->tstamp - cur_tstamp);
569 
570 	hif_ext_group->tstamp = cur_tstamp;
571 
572 	if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_0_2)
573 		hif_ext_group->sched_latency_stats[0]++;
574 	else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_3_10)
575 		hif_ext_group->sched_latency_stats[1]++;
576 	else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_11_20)
577 		hif_ext_group->sched_latency_stats[2]++;
578 	else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_21_50)
579 		hif_ext_group->sched_latency_stats[3]++;
580 	else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_51_100)
581 		hif_ext_group->sched_latency_stats[4]++;
582 	else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_101_250)
583 		hif_ext_group->sched_latency_stats[5]++;
584 	else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_251_500)
585 		hif_ext_group->sched_latency_stats[6]++;
586 	else
587 		hif_ext_group->sched_latency_stats[7]++;
588 }
589 #else
590 static inline
hif_latency_profile_measure(struct hif_exec_context * hif_ext_group)591 void hif_latency_profile_measure(struct hif_exec_context *hif_ext_group)
592 {
593 }
594 #endif
595 
596 /**
597  * hif_latency_profile_start() - Update the start timestamp for HIF ext group
598  * @hif_ext_group: hif exec context
599  *
600  * Return: None
601  */
602 #ifdef HIF_LATENCY_PROFILE_ENABLE
hif_latency_profile_start(struct hif_exec_context * hif_ext_group)603 static void hif_latency_profile_start(struct hif_exec_context *hif_ext_group)
604 {
605 	hif_ext_group->tstamp = qdf_ktime_to_ms(qdf_ktime_get());
606 }
607 #else
608 static inline
hif_latency_profile_start(struct hif_exec_context * hif_ext_group)609 void hif_latency_profile_start(struct hif_exec_context *hif_ext_group)
610 {
611 }
612 #endif
613 
614 #ifdef FEATURE_NAPI
615 #ifdef FEATURE_IRQ_AFFINITY
616 static inline int32_t
hif_is_force_napi_complete_required(struct hif_exec_context * hif_ext_group)617 hif_is_force_napi_complete_required(struct hif_exec_context *hif_ext_group)
618 {
619 	return qdf_atomic_inc_not_zero(&hif_ext_group->force_napi_complete);
620 }
621 #else
622 static inline int32_t
hif_is_force_napi_complete_required(struct hif_exec_context * hif_ext_group)623 hif_is_force_napi_complete_required(struct hif_exec_context *hif_ext_group)
624 {
625 	return 0;
626 }
627 #endif
628 
629 /**
630  * hif_irq_disabled_time_limit_reached() - determine if irq disabled limit
631  * reached for single MSI
632  * @hif_ext_group: hif exec context
633  *
634  * Return: true if reached, else false.
635  */
636 static bool
hif_irq_disabled_time_limit_reached(struct hif_exec_context * hif_ext_group)637 hif_irq_disabled_time_limit_reached(struct hif_exec_context *hif_ext_group)
638 {
639 	unsigned long long irq_disabled_duration_ns;
640 
641 	if (hif_ext_group->type != HIF_EXEC_NAPI_TYPE)
642 		return false;
643 
644 	irq_disabled_duration_ns = qdf_time_sched_clock() -
645 					hif_ext_group->irq_disabled_start_time;
646 	if (irq_disabled_duration_ns >= IRQ_DISABLED_MAX_DURATION_NS) {
647 		hif_record_event(hif_ext_group->hif, hif_ext_group->grp_id,
648 				 0, 0, 0, HIF_EVENT_IRQ_DISABLE_EXPIRED);
649 		return true;
650 	}
651 
652 	return false;
653 }
654 
655 /**
656  * hif_exec_poll() - napi poll
657  * @napi: napi struct
658  * @budget: budget for napi
659  *
660  * Return: mapping of internal budget to napi
661  */
hif_exec_poll(struct napi_struct * napi,int budget)662 static int hif_exec_poll(struct napi_struct *napi, int budget)
663 {
664 	struct hif_napi_exec_context *napi_exec_ctx =
665 		    qdf_container_of(napi, struct hif_napi_exec_context, napi);
666 	struct hif_exec_context *hif_ext_group = &napi_exec_ctx->exec_ctx;
667 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
668 	int work_done;
669 	int normalized_budget = 0;
670 	int actual_dones;
671 	int shift = hif_ext_group->scale_bin_shift;
672 	int cpu = smp_processor_id();
673 	bool force_complete = false;
674 
675 	hif_record_event(hif_ext_group->hif, hif_ext_group->grp_id,
676 			 0, 0, 0, HIF_EVENT_BH_SCHED);
677 
678 	hif_ext_group->force_break = false;
679 	hif_exec_update_service_start_time(hif_ext_group);
680 
681 	if (budget)
682 		normalized_budget = NAPI_BUDGET_TO_INTERNAL_BUDGET(budget, shift);
683 
684 	hif_latency_profile_measure(hif_ext_group);
685 
686 	work_done = hif_ext_group->handler(hif_ext_group->context,
687 					   normalized_budget, cpu);
688 
689 	actual_dones = work_done;
690 
691 	if (hif_is_force_napi_complete_required(hif_ext_group)) {
692 		force_complete = true;
693 		if (work_done >= normalized_budget)
694 			work_done = normalized_budget - 1;
695 	}
696 
697 	if (qdf_unlikely(force_complete) ||
698 	    (!hif_ext_group->force_break && work_done < normalized_budget) ||
699 	    ((pld_is_one_msi(scn->qdf_dev->dev) &&
700 	    hif_irq_disabled_time_limit_reached(hif_ext_group)))) {
701 		hif_record_event(hif_ext_group->hif, hif_ext_group->grp_id,
702 				 0, 0, 0, HIF_EVENT_BH_COMPLETE);
703 		napi_complete(napi);
704 		qdf_atomic_dec(&scn->active_grp_tasklet_cnt);
705 		hif_ext_group->irq_enable(hif_ext_group);
706 		hif_ext_group->stats[cpu].napi_completes++;
707 	} else {
708 		/* if the ext_group supports time based yield, claim full work
709 		 * done anyways */
710 		hif_record_event(hif_ext_group->hif, hif_ext_group->grp_id,
711 				 0, 0, 0, HIF_EVENT_BH_FORCE_BREAK);
712 		work_done = normalized_budget;
713 	}
714 
715 	hif_ext_group->stats[cpu].napi_polls++;
716 	hif_ext_group->stats[cpu].napi_workdone += actual_dones;
717 
718 	/* map internal budget to NAPI budget */
719 	if (work_done)
720 		work_done = INTERNAL_BUDGET_TO_NAPI_BUDGET(work_done, shift);
721 
722 	hif_exec_fill_poll_time_histogram(hif_ext_group);
723 
724 	return work_done;
725 }
726 
727 /**
728  * hif_exec_napi_schedule() - schedule the napi exec instance
729  * @ctx: a hif_exec_context known to be of napi type
730  */
hif_exec_napi_schedule(struct hif_exec_context * ctx)731 static void hif_exec_napi_schedule(struct hif_exec_context *ctx)
732 {
733 	struct hif_napi_exec_context *n_ctx = hif_exec_get_napi(ctx);
734 	ctx->stats[smp_processor_id()].napi_schedules++;
735 
736 	napi_schedule(&n_ctx->napi);
737 }
738 
739 /**
740  * hif_exec_napi_kill() - stop a napi exec context from being rescheduled
741  * @ctx: a hif_exec_context known to be of napi type
742  */
hif_exec_napi_kill(struct hif_exec_context * ctx)743 static void hif_exec_napi_kill(struct hif_exec_context *ctx)
744 {
745 	struct hif_napi_exec_context *n_ctx = hif_exec_get_napi(ctx);
746 	int irq_ind;
747 
748 	if (ctx->inited) {
749 		qdf_napi_disable(&n_ctx->napi);
750 		ctx->inited = 0;
751 	}
752 
753 	for (irq_ind = 0; irq_ind < ctx->numirq; irq_ind++)
754 		hif_irq_affinity_remove(ctx->os_irq[irq_ind]);
755 
756 	hif_core_ctl_set_boost(false);
757 	qdf_netif_napi_del(&(n_ctx->napi));
758 }
759 
760 struct hif_execution_ops napi_sched_ops = {
761 	.schedule = &hif_exec_napi_schedule,
762 	.kill = &hif_exec_napi_kill,
763 };
764 
765 /**
766  * hif_exec_napi_create() - allocate and initialize a napi exec context
767  * @scale: a binary shift factor to map NAPI budget from\to internal
768  *         budget
769  */
hif_exec_napi_create(uint32_t scale)770 static struct hif_exec_context *hif_exec_napi_create(uint32_t scale)
771 {
772 	struct hif_napi_exec_context *ctx;
773 
774 	ctx = qdf_mem_malloc(sizeof(struct hif_napi_exec_context));
775 	if (!ctx)
776 		return NULL;
777 
778 	ctx->exec_ctx.sched_ops = &napi_sched_ops;
779 	ctx->exec_ctx.inited = true;
780 	ctx->exec_ctx.scale_bin_shift = scale;
781 	qdf_net_if_create_dummy_if((struct qdf_net_if *)&ctx->netdev);
782 	qdf_netif_napi_add(&(ctx->netdev), &(ctx->napi), hif_exec_poll,
783 			   QCA_NAPI_BUDGET);
784 	qdf_napi_enable(&ctx->napi);
785 
786 	return &ctx->exec_ctx;
787 }
788 #else
hif_exec_napi_create(uint32_t scale)789 static struct hif_exec_context *hif_exec_napi_create(uint32_t scale)
790 {
791 	hif_warn("FEATURE_NAPI not defined, making tasklet");
792 	return hif_exec_tasklet_create();
793 }
794 #endif
795 
796 
797 /**
798  * hif_exec_tasklet_kill() - stop a tasklet exec context from being rescheduled
799  * @ctx: a hif_exec_context known to be of tasklet type
800  */
hif_exec_tasklet_kill(struct hif_exec_context * ctx)801 static void hif_exec_tasklet_kill(struct hif_exec_context *ctx)
802 {
803 	struct hif_tasklet_exec_context *t_ctx = hif_exec_get_tasklet(ctx);
804 	int irq_ind;
805 
806 	if (ctx->inited) {
807 		tasklet_disable(&t_ctx->tasklet);
808 		tasklet_kill(&t_ctx->tasklet);
809 	}
810 	ctx->inited = false;
811 
812 	for (irq_ind = 0; irq_ind < ctx->numirq; irq_ind++)
813 		hif_irq_affinity_remove(ctx->os_irq[irq_ind]);
814 }
815 
816 struct hif_execution_ops tasklet_sched_ops = {
817 	.schedule = &hif_exec_tasklet_schedule,
818 	.kill = &hif_exec_tasklet_kill,
819 };
820 
821 /**
822  * hif_exec_tasklet_create() -  allocate and initialize a tasklet exec context
823  */
hif_exec_tasklet_create(void)824 static struct hif_exec_context *hif_exec_tasklet_create(void)
825 {
826 	struct hif_tasklet_exec_context *ctx;
827 
828 	ctx = qdf_mem_malloc(sizeof(struct hif_tasklet_exec_context));
829 	if (!ctx)
830 		return NULL;
831 
832 	ctx->exec_ctx.sched_ops = &tasklet_sched_ops;
833 	tasklet_init(&ctx->tasklet, hif_exec_tasklet_fn,
834 		     (unsigned long)ctx);
835 
836 	ctx->exec_ctx.inited = true;
837 
838 	return &ctx->exec_ctx;
839 }
840 
841 /**
842  * hif_exec_get_ctx() - retrieve an exec context based on an id
843  * @softc: the hif context owning the exec context
844  * @id: the id of the exec context
845  *
846  * mostly added to make it easier to rename or move the context array
847  */
hif_exec_get_ctx(struct hif_opaque_softc * softc,uint8_t id)848 struct hif_exec_context *hif_exec_get_ctx(struct hif_opaque_softc *softc,
849 					  uint8_t id)
850 {
851 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(softc);
852 
853 	if (id < hif_state->hif_num_extgroup)
854 		return hif_state->hif_ext_group[id];
855 
856 	return NULL;
857 }
858 
hif_get_int_ctx_irq_num(struct hif_opaque_softc * softc,uint8_t id)859 int32_t hif_get_int_ctx_irq_num(struct hif_opaque_softc *softc,
860 				uint8_t id)
861 {
862 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(softc);
863 
864 	if (id < hif_state->hif_num_extgroup)
865 		return hif_state->hif_ext_group[id]->os_irq[0];
866 	return -EINVAL;
867 }
868 
869 qdf_export_symbol(hif_get_int_ctx_irq_num);
870 
hif_configure_ext_group_interrupts(struct hif_opaque_softc * hif_ctx)871 QDF_STATUS hif_configure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx)
872 {
873 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
874 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
875 	struct hif_exec_context *hif_ext_group;
876 	int i, status;
877 
878 	if (scn->ext_grp_irq_configured) {
879 		hif_err("Called after ext grp irq configured");
880 		return QDF_STATUS_E_FAILURE;
881 	}
882 
883 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
884 		hif_ext_group = hif_state->hif_ext_group[i];
885 		status = 0;
886 		qdf_spinlock_create(&hif_ext_group->irq_lock);
887 		if (hif_ext_group->configured &&
888 		    hif_ext_group->irq_requested == false) {
889 			hif_ext_group->irq_enabled = true;
890 			status = hif_grp_irq_configure(scn, hif_ext_group);
891 		}
892 		if (status != 0) {
893 			hif_err("Failed for group %d", i);
894 			hif_ext_group->irq_enabled = false;
895 		}
896 	}
897 
898 	scn->ext_grp_irq_configured = true;
899 
900 	return QDF_STATUS_SUCCESS;
901 }
902 
903 qdf_export_symbol(hif_configure_ext_group_interrupts);
904 
hif_deconfigure_ext_group_interrupts(struct hif_opaque_softc * hif_ctx)905 void hif_deconfigure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx)
906 {
907 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
908 
909 	if (!scn || !scn->ext_grp_irq_configured) {
910 		hif_err("scn(%pk) is NULL or grp irq not configured", scn);
911 		return;
912 	}
913 
914 	hif_grp_irq_deconfigure(scn);
915 	scn->ext_grp_irq_configured = false;
916 }
917 
918 qdf_export_symbol(hif_deconfigure_ext_group_interrupts);
919 
920 #ifdef WLAN_SUSPEND_RESUME_TEST
921 /**
922  * hif_check_and_trigger_ut_resume() - check if unit-test command was used to
923  *				       to trigger fake-suspend command, if yes
924  *				       then issue resume procedure.
925  * @scn: opaque HIF software context
926  *
927  * This API checks if unit-test command was used to trigger fake-suspend command
928  * and if answer is yes then it would trigger resume procedure.
929  *
930  * Make this API inline to save API-switch overhead and do branch-prediction to
931  * optimize performance impact.
932  *
933  * Return: void
934  */
hif_check_and_trigger_ut_resume(struct hif_softc * scn)935 static inline void hif_check_and_trigger_ut_resume(struct hif_softc *scn)
936 {
937 	if (qdf_unlikely(hif_irq_trigger_ut_resume(scn)))
938 		hif_ut_fw_resume(scn);
939 }
940 #else
hif_check_and_trigger_ut_resume(struct hif_softc * scn)941 static inline void hif_check_and_trigger_ut_resume(struct hif_softc *scn)
942 {
943 }
944 #endif
945 
946 /**
947  * hif_check_and_trigger_sys_resume() - Check for bus suspend and
948  *  trigger system resume
949  * @scn: hif context
950  * @irq: irq number
951  *
952  * Return: None
953  */
954 static inline void
hif_check_and_trigger_sys_resume(struct hif_softc * scn,int irq)955 hif_check_and_trigger_sys_resume(struct hif_softc *scn, int irq)
956 {
957 	if (scn->bus_suspended && scn->linkstate_vote) {
958 		hif_info_rl("interrupt rcvd:%d trigger sys resume", irq);
959 		qdf_pm_system_wakeup();
960 	}
961 }
962 
963 /**
964  * hif_ext_group_interrupt_handler() - handler for related interrupts
965  * @irq: irq number of the interrupt
966  * @context: the associated hif_exec_group context
967  *
968  * This callback function takes care of disabling the associated interrupts
969  * and scheduling the expected bottom half for the exec_context.
970  * This callback function also helps keep track of the count running contexts.
971  */
hif_ext_group_interrupt_handler(int irq,void * context)972 irqreturn_t hif_ext_group_interrupt_handler(int irq, void *context)
973 {
974 	struct hif_exec_context *hif_ext_group = context;
975 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
976 
977 	if (hif_ext_group->irq_requested) {
978 		hif_latency_profile_start(hif_ext_group);
979 
980 		hif_record_event(hif_ext_group->hif, hif_ext_group->grp_id,
981 				 0, 0, 0, HIF_EVENT_IRQ_TRIGGER);
982 
983 		hif_ext_group->irq_disable(hif_ext_group);
984 
985 		if (pld_is_one_msi(scn->qdf_dev->dev))
986 			hif_ext_group->irq_disabled_start_time =
987 							qdf_time_sched_clock();
988 		/*
989 		 * if private ioctl has issued fake suspend command to put
990 		 * FW in D0-WOW state then here is our chance to bring FW out
991 		 * of WOW mode.
992 		 *
993 		 * The reason why you need to explicitly wake-up the FW is here:
994 		 * APSS should have been in fully awake through-out when
995 		 * fake APSS suspend command was issued (to put FW in WOW mode)
996 		 * hence organic way of waking-up the FW
997 		 * (as part-of APSS-host wake-up) won't happen because
998 		 * in reality APSS didn't really suspend.
999 		 */
1000 		hif_check_and_trigger_ut_resume(scn);
1001 
1002 		hif_check_and_trigger_sys_resume(scn, irq);
1003 
1004 		qdf_atomic_inc(&scn->active_grp_tasklet_cnt);
1005 
1006 		hif_ext_group->sched_ops->schedule(hif_ext_group);
1007 	}
1008 
1009 	return IRQ_HANDLED;
1010 }
1011 
1012 /**
1013  * hif_exec_kill() - grp tasklet kill
1014  * @hif_ctx: hif_softc
1015  *
1016  * return: void
1017  */
hif_exec_kill(struct hif_opaque_softc * hif_ctx)1018 void hif_exec_kill(struct hif_opaque_softc *hif_ctx)
1019 {
1020 	int i;
1021 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_ctx);
1022 
1023 	for (i = 0; i < hif_state->hif_num_extgroup; i++)
1024 		hif_state->hif_ext_group[i]->sched_ops->kill(
1025 			hif_state->hif_ext_group[i]);
1026 
1027 	qdf_atomic_set(&hif_state->ol_sc.active_grp_tasklet_cnt, 0);
1028 }
1029 
1030 #ifdef FEATURE_IRQ_AFFINITY
1031 static inline void
hif_init_force_napi_complete(struct hif_exec_context * hif_ext_group)1032 hif_init_force_napi_complete(struct hif_exec_context *hif_ext_group)
1033 {
1034 	qdf_atomic_init(&hif_ext_group->force_napi_complete);
1035 }
1036 #else
1037 static inline void
hif_init_force_napi_complete(struct hif_exec_context * hif_ext_group)1038 hif_init_force_napi_complete(struct hif_exec_context *hif_ext_group)
1039 {
1040 }
1041 #endif
1042 
1043 /**
1044  * hif_register_ext_group() - API to register external group
1045  * interrupt handler.
1046  * @hif_ctx : HIF Context
1047  * @numirq: number of irq's in the group
1048  * @irq: array of irq values
1049  * @handler: callback interrupt handler function
1050  * @cb_ctx: context to passed in callback
1051  * @context_name: context name
1052  * @type: napi vs tasklet
1053  * @scale:
1054  *
1055  * Return: QDF_STATUS
1056  */
hif_register_ext_group(struct hif_opaque_softc * hif_ctx,uint32_t numirq,uint32_t irq[],ext_intr_handler handler,void * cb_ctx,const char * context_name,enum hif_exec_type type,uint32_t scale)1057 QDF_STATUS hif_register_ext_group(struct hif_opaque_softc *hif_ctx,
1058 				  uint32_t numirq, uint32_t irq[],
1059 				  ext_intr_handler handler,
1060 				  void *cb_ctx, const char *context_name,
1061 				  enum hif_exec_type type, uint32_t scale)
1062 {
1063 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1064 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1065 	struct hif_exec_context *hif_ext_group;
1066 
1067 	if (scn->ext_grp_irq_configured) {
1068 		hif_err("Called after ext grp irq configured");
1069 		return QDF_STATUS_E_FAILURE;
1070 	}
1071 
1072 	if (hif_state->hif_num_extgroup >= HIF_MAX_GROUP) {
1073 		hif_err("Max groups: %d reached", hif_state->hif_num_extgroup);
1074 		return QDF_STATUS_E_FAILURE;
1075 	}
1076 
1077 	if (numirq >= HIF_MAX_GRP_IRQ) {
1078 		hif_err("Invalid numirq: %d", numirq);
1079 		return QDF_STATUS_E_FAILURE;
1080 	}
1081 
1082 	hif_ext_group = hif_exec_create(type, scale);
1083 	if (!hif_ext_group)
1084 		return QDF_STATUS_E_FAILURE;
1085 
1086 	hif_state->hif_ext_group[hif_state->hif_num_extgroup] =
1087 		hif_ext_group;
1088 
1089 	hif_ext_group->numirq = numirq;
1090 	qdf_mem_copy(&hif_ext_group->irq[0], irq, numirq * sizeof(irq[0]));
1091 	hif_ext_group->context = cb_ctx;
1092 	hif_ext_group->handler = handler;
1093 	hif_ext_group->configured = true;
1094 	hif_ext_group->grp_id = hif_state->hif_num_extgroup;
1095 	hif_ext_group->hif = hif_ctx;
1096 	hif_ext_group->context_name = context_name;
1097 	hif_ext_group->type = type;
1098 	hif_init_force_napi_complete(hif_ext_group);
1099 
1100 	hif_state->hif_num_extgroup++;
1101 	return QDF_STATUS_SUCCESS;
1102 }
1103 qdf_export_symbol(hif_register_ext_group);
1104 
1105 /**
1106  * hif_exec_create() - create an execution context
1107  * @type: the type of execution context to create
1108  * @scale:
1109  */
hif_exec_create(enum hif_exec_type type,uint32_t scale)1110 struct hif_exec_context *hif_exec_create(enum hif_exec_type type,
1111 						uint32_t scale)
1112 {
1113 	hif_debug("%s: create exec_type %d budget %d",
1114 		  __func__, type, QCA_NAPI_BUDGET * scale);
1115 
1116 	switch (type) {
1117 	case HIF_EXEC_NAPI_TYPE:
1118 		return hif_exec_napi_create(scale);
1119 
1120 	case HIF_EXEC_TASKLET_TYPE:
1121 		return hif_exec_tasklet_create();
1122 	default:
1123 		return NULL;
1124 	}
1125 }
1126 
1127 /**
1128  * hif_exec_destroy() - free the hif_exec context
1129  * @ctx: context to free
1130  *
1131  * please kill the context before freeing it to avoid a use after free.
1132  */
hif_exec_destroy(struct hif_exec_context * ctx)1133 void hif_exec_destroy(struct hif_exec_context *ctx)
1134 {
1135 	struct hif_softc *scn = HIF_GET_SOFTC(ctx->hif);
1136 
1137 	if (scn->ext_grp_irq_configured)
1138 		qdf_spinlock_destroy(&ctx->irq_lock);
1139 	qdf_mem_free(ctx);
1140 }
1141 
1142 /**
1143  * hif_deregister_exec_group() - API to free the exec contexts
1144  * @hif_ctx: HIF context
1145  * @context_name: name of the module whose contexts need to be deregistered
1146  *
1147  * This function deregisters the contexts of the requestor identified
1148  * based on the context_name & frees the memory.
1149  *
1150  * Return: void
1151  */
hif_deregister_exec_group(struct hif_opaque_softc * hif_ctx,const char * context_name)1152 void hif_deregister_exec_group(struct hif_opaque_softc *hif_ctx,
1153 				const char *context_name)
1154 {
1155 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1156 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
1157 	struct hif_exec_context *hif_ext_group;
1158 	int i;
1159 
1160 	for (i = 0; i < HIF_MAX_GROUP; i++) {
1161 		hif_ext_group = hif_state->hif_ext_group[i];
1162 
1163 		if (!hif_ext_group)
1164 			continue;
1165 
1166 		hif_debug("%s: Deregistering grp id %d name %s",
1167 			  __func__,
1168 			  hif_ext_group->grp_id,
1169 			  hif_ext_group->context_name);
1170 
1171 		if (strcmp(hif_ext_group->context_name, context_name) == 0) {
1172 			hif_ext_group->sched_ops->kill(hif_ext_group);
1173 			hif_state->hif_ext_group[i] = NULL;
1174 			hif_exec_destroy(hif_ext_group);
1175 			hif_state->hif_num_extgroup--;
1176 		}
1177 
1178 	}
1179 }
1180 qdf_export_symbol(hif_deregister_exec_group);
1181 
1182 #ifdef DP_UMAC_HW_RESET_SUPPORT
1183 /**
1184  * hif_umac_reset_handler_tasklet() - Tasklet for UMAC HW reset interrupt
1185  * @data: UMAC HW reset HIF context
1186  *
1187  * return: void
1188  */
hif_umac_reset_handler_tasklet(unsigned long data)1189 static void hif_umac_reset_handler_tasklet(unsigned long data)
1190 {
1191 	struct hif_umac_reset_ctx *umac_reset_ctx =
1192 		(struct hif_umac_reset_ctx *)data;
1193 
1194 	/* call the callback handler */
1195 	umac_reset_ctx->cb_handler(umac_reset_ctx->cb_ctx);
1196 }
1197 
1198 /**
1199  * hif_umac_reset_irq_handler() - Interrupt service routine of UMAC HW reset
1200  * @irq: irq coming from kernel
1201  * @ctx: UMAC HW reset HIF context
1202  *
1203  * return: IRQ_HANDLED if success, else IRQ_NONE
1204  */
hif_umac_reset_irq_handler(int irq,void * ctx)1205 static irqreturn_t hif_umac_reset_irq_handler(int irq, void *ctx)
1206 {
1207 	struct hif_umac_reset_ctx *umac_reset_ctx = ctx;
1208 
1209 	/* Schedule the tasklet if it is umac reset interrupt and exit */
1210 	if (umac_reset_ctx->irq_handler(umac_reset_ctx->cb_ctx))
1211 		tasklet_hi_schedule(&umac_reset_ctx->intr_tq);
1212 
1213 	return IRQ_HANDLED;
1214 }
1215 
hif_get_umac_reset_irq(struct hif_opaque_softc * hif_scn,int * umac_reset_irq)1216 QDF_STATUS hif_get_umac_reset_irq(struct hif_opaque_softc *hif_scn,
1217 				  int *umac_reset_irq)
1218 {
1219 	int ret;
1220 	struct hif_softc *hif_sc = HIF_GET_SOFTC(hif_scn);
1221 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_sc);
1222 	struct platform_device *pdev = (struct platform_device *)sc->pdev;
1223 
1224 	ret = pfrm_get_irq(&pdev->dev, (struct qdf_pfm_hndl *)pdev,
1225 			   "umac_reset", 0, umac_reset_irq);
1226 
1227 	if (ret) {
1228 		hif_err("umac reset get irq failed ret %d", ret);
1229 		return QDF_STATUS_E_FAILURE;
1230 	}
1231 	return QDF_STATUS_SUCCESS;
1232 }
1233 
1234 qdf_export_symbol(hif_get_umac_reset_irq);
1235 
hif_register_umac_reset_handler(struct hif_opaque_softc * hif_scn,bool (* irq_handler)(void * cb_ctx),int (* tl_handler)(void * cb_ctx),void * cb_ctx,int irq)1236 QDF_STATUS hif_register_umac_reset_handler(struct hif_opaque_softc *hif_scn,
1237 					   bool (*irq_handler)(void *cb_ctx),
1238 					   int (*tl_handler)(void *cb_ctx),
1239 					   void *cb_ctx, int irq)
1240 {
1241 	struct hif_softc *hif_sc = HIF_GET_SOFTC(hif_scn);
1242 	struct hif_umac_reset_ctx *umac_reset_ctx;
1243 	int ret;
1244 
1245 	if (!hif_sc) {
1246 		hif_err("scn is null");
1247 		return QDF_STATUS_E_NULL_VALUE;
1248 	}
1249 
1250 	umac_reset_ctx = &hif_sc->umac_reset_ctx;
1251 
1252 	umac_reset_ctx->irq_handler = irq_handler;
1253 	umac_reset_ctx->cb_handler = tl_handler;
1254 	umac_reset_ctx->cb_ctx = cb_ctx;
1255 	umac_reset_ctx->os_irq = irq;
1256 
1257 	/* Init the tasklet */
1258 	tasklet_init(&umac_reset_ctx->intr_tq,
1259 		     hif_umac_reset_handler_tasklet,
1260 		     (unsigned long)umac_reset_ctx);
1261 
1262 	/* Register the interrupt handler */
1263 	ret  = pfrm_request_irq(hif_sc->qdf_dev->dev, irq,
1264 				hif_umac_reset_irq_handler,
1265 				IRQF_NO_SUSPEND,
1266 				"umac_hw_reset_irq",
1267 				umac_reset_ctx);
1268 	if (ret) {
1269 		hif_err("request_irq failed: %d", ret);
1270 		return qdf_status_from_os_return(ret);
1271 	}
1272 
1273 	umac_reset_ctx->irq_configured = true;
1274 
1275 	return QDF_STATUS_SUCCESS;
1276 }
1277 
1278 qdf_export_symbol(hif_register_umac_reset_handler);
1279 
hif_unregister_umac_reset_handler(struct hif_opaque_softc * hif_scn)1280 QDF_STATUS hif_unregister_umac_reset_handler(struct hif_opaque_softc *hif_scn)
1281 {
1282 	struct hif_softc *hif_sc = HIF_GET_SOFTC(hif_scn);
1283 	struct hif_umac_reset_ctx *umac_reset_ctx;
1284 	int ret;
1285 
1286 	if (!hif_sc) {
1287 		hif_err("scn is null");
1288 		return QDF_STATUS_E_NULL_VALUE;
1289 	}
1290 
1291 	umac_reset_ctx = &hif_sc->umac_reset_ctx;
1292 	if (!umac_reset_ctx->irq_configured) {
1293 		hif_err("unregister called without a prior IRQ configuration");
1294 		return QDF_STATUS_E_FAILURE;
1295 	}
1296 
1297 	ret  = pfrm_free_irq(hif_sc->qdf_dev->dev,
1298 			     umac_reset_ctx->os_irq,
1299 			     umac_reset_ctx);
1300 	if (ret) {
1301 		hif_err("free_irq failed: %d", ret);
1302 		return qdf_status_from_os_return(ret);
1303 	}
1304 	umac_reset_ctx->irq_configured = false;
1305 
1306 	tasklet_disable(&umac_reset_ctx->intr_tq);
1307 	tasklet_kill(&umac_reset_ctx->intr_tq);
1308 
1309 	umac_reset_ctx->cb_handler = NULL;
1310 	umac_reset_ctx->cb_ctx = NULL;
1311 
1312 	return QDF_STATUS_SUCCESS;
1313 }
1314 
1315 qdf_export_symbol(hif_unregister_umac_reset_handler);
1316 #endif
1317