xref: /wlan-driver/qca-wifi-host-cmn/scheduler/src/scheduler_core.c (revision 5113495b16420b49004c444715d2daae2066e7dc)
1 /*
2  * Copyright (c) 2014-2020 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <scheduler_core.h>
21 #include <qdf_atomic.h>
22 #include "qdf_flex_mem.h"
23 
24 static struct scheduler_ctx g_sched_ctx;
25 static struct scheduler_ctx *gp_sched_ctx;
26 
27 DEFINE_QDF_FLEX_MEM_POOL(sched_pool, sizeof(struct scheduler_msg),
28 			 WLAN_SCHED_REDUCTION_LIMIT);
29 
30 #ifdef WLAN_SCHED_HISTORY_SIZE
31 
32 #define SCHEDULER_HISTORY_HEADER "|Callback                               "\
33 				 "|Message Type"			   \
34 				 "|Queue Duration(us)|Queue Depth"	   \
35 				 "|Run Duration(us)|"
36 
37 #define SCHEDULER_HISTORY_LINE "--------------------------------------" \
38 			       "--------------------------------------" \
39 			       "--------------------------------------"
40 
41 /**
42  * struct sched_history_item - metrics for a scheduler message
43  * @callback: the message's execution callback
44  * @type_id: the message's type_id
45  * @queue_id: Id of the queue the message was added to
46  * @queue_start_us: timestamp when the message was queued in microseconds
47  * @queue_duration_us: duration the message was queued in microseconds
48  * @queue_depth: depth of the queue when the message was queued
49  * @run_start_us: timesatmp when the message started execution in microseconds
50  * @run_duration_us: duration the message was executed in microseconds
51  */
52 struct sched_history_item {
53 	void *callback;
54 	uint32_t type_id;
55 	QDF_MODULE_ID queue_id;
56 	uint64_t queue_start_us;
57 	uint32_t queue_duration_us;
58 	uint32_t queue_depth;
59 	uint64_t run_start_us;
60 	uint32_t run_duration_us;
61 };
62 
63 static struct sched_history_item sched_history[WLAN_SCHED_HISTORY_SIZE];
64 static uint32_t sched_history_index;
65 
sched_history_queue(struct scheduler_mq_type * queue,struct scheduler_msg * msg)66 static void sched_history_queue(struct scheduler_mq_type *queue,
67 				struct scheduler_msg *msg)
68 {
69 	msg->queue_id = queue->qid;
70 	msg->queue_depth = qdf_list_size(&queue->mq_list);
71 	msg->queued_at_us = qdf_get_log_timestamp_usecs();
72 }
73 
sched_history_start(struct scheduler_msg * msg)74 static void sched_history_start(struct scheduler_msg *msg)
75 {
76 	uint64_t started_at_us = qdf_get_log_timestamp_usecs();
77 	struct sched_history_item hist = {
78 		.callback = msg->callback,
79 		.type_id = msg->type,
80 		.queue_start_us = msg->queued_at_us,
81 		.queue_duration_us = started_at_us - msg->queued_at_us,
82 		.queue_depth = msg->queue_depth,
83 		.run_start_us = started_at_us,
84 	};
85 
86 	sched_history[sched_history_index] = hist;
87 }
88 
sched_history_stop(void)89 static void sched_history_stop(void)
90 {
91 	struct sched_history_item *hist = &sched_history[sched_history_index];
92 	uint64_t stopped_at_us = qdf_get_log_timestamp_usecs();
93 
94 	hist->run_duration_us = stopped_at_us - hist->run_start_us;
95 
96 	sched_history_index++;
97 	sched_history_index %= WLAN_SCHED_HISTORY_SIZE;
98 }
99 
sched_history_print(void)100 void sched_history_print(void)
101 {
102 	struct sched_history_item *history, *item;
103 	uint32_t history_idx;
104 	uint32_t idx, index;
105 
106 	history = qdf_mem_malloc(sizeof(*history) * WLAN_SCHED_HISTORY_SIZE);
107 
108 	if (!history) {
109 		sched_err("Mem alloc failed");
110 		return;
111 	}
112 
113 	qdf_mem_copy(history, &sched_history,
114 		     (sizeof(*history) * WLAN_SCHED_HISTORY_SIZE));
115 	history_idx = sched_history_index;
116 
117 	sched_nofl_fatal(SCHEDULER_HISTORY_LINE);
118 	sched_nofl_fatal(SCHEDULER_HISTORY_HEADER);
119 	sched_nofl_fatal(SCHEDULER_HISTORY_LINE);
120 
121 	for (idx = 0; idx < WLAN_SCHED_HISTORY_SIZE; idx++) {
122 		index = (history_idx + idx) % WLAN_SCHED_HISTORY_SIZE;
123 		item = history + index;
124 
125 		if (!item->callback)
126 			continue;
127 
128 		sched_nofl_fatal("%40pF|%12d|%18d|%11d|%16d|",
129 				 item->callback, item->type_id,
130 				 item->queue_duration_us,
131 				 item->queue_depth,
132 				 item->run_duration_us);
133 	}
134 
135 	sched_nofl_fatal(SCHEDULER_HISTORY_LINE);
136 
137 	qdf_mem_free(history);
138 }
139 #else /* WLAN_SCHED_HISTORY_SIZE */
140 
sched_history_queue(struct scheduler_mq_type * queue,struct scheduler_msg * msg)141 static inline void sched_history_queue(struct scheduler_mq_type *queue,
142 				       struct scheduler_msg *msg) { }
sched_history_start(struct scheduler_msg * msg)143 static inline void sched_history_start(struct scheduler_msg *msg) { }
sched_history_stop(void)144 static inline void sched_history_stop(void) { }
sched_history_print(void)145 void sched_history_print(void) { }
146 
147 #endif /* WLAN_SCHED_HISTORY_SIZE */
148 
scheduler_create_ctx(void)149 QDF_STATUS scheduler_create_ctx(void)
150 {
151 	qdf_flex_mem_init(&sched_pool);
152 	gp_sched_ctx = &g_sched_ctx;
153 
154 	return QDF_STATUS_SUCCESS;
155 }
156 
scheduler_destroy_ctx(void)157 QDF_STATUS scheduler_destroy_ctx(void)
158 {
159 	gp_sched_ctx = NULL;
160 	qdf_flex_mem_deinit(&sched_pool);
161 
162 	return QDF_STATUS_SUCCESS;
163 }
164 
scheduler_get_context(void)165 struct scheduler_ctx *scheduler_get_context(void)
166 {
167 	QDF_BUG(gp_sched_ctx);
168 
169 	return gp_sched_ctx;
170 }
171 
scheduler_mq_init(struct scheduler_mq_type * msg_q)172 static QDF_STATUS scheduler_mq_init(struct scheduler_mq_type *msg_q)
173 {
174 	sched_enter();
175 
176 	qdf_spinlock_create(&msg_q->mq_lock);
177 	qdf_list_create(&msg_q->mq_list, SCHEDULER_CORE_MAX_MESSAGES);
178 
179 	sched_exit();
180 
181 	return QDF_STATUS_SUCCESS;
182 }
183 
scheduler_mq_deinit(struct scheduler_mq_type * msg_q)184 static void scheduler_mq_deinit(struct scheduler_mq_type *msg_q)
185 {
186 	sched_enter();
187 
188 	qdf_list_destroy(&msg_q->mq_list);
189 	qdf_spinlock_destroy(&msg_q->mq_lock);
190 
191 	sched_exit();
192 }
193 
194 static qdf_atomic_t __sched_queue_depth;
195 static qdf_atomic_t __sched_dup_fail_count;
196 
scheduler_all_queues_init(struct scheduler_ctx * sched_ctx)197 static QDF_STATUS scheduler_all_queues_init(struct scheduler_ctx *sched_ctx)
198 {
199 	QDF_STATUS status;
200 	int i;
201 
202 	sched_enter();
203 
204 	QDF_BUG(sched_ctx);
205 	if (!sched_ctx)
206 		return QDF_STATUS_E_FAILURE;
207 
208 	qdf_atomic_set(&__sched_queue_depth, 0);
209 
210 	/* Initialize all message queues */
211 	for (i = 0; i < SCHEDULER_NUMBER_OF_MSG_QUEUE; i++) {
212 		status = scheduler_mq_init(&sched_ctx->queue_ctx.sch_msg_q[i]);
213 		if (QDF_STATUS_SUCCESS != status)
214 			return status;
215 	}
216 
217 	/* Initialize all qid to qidx mapping to invalid values */
218 	for (i = 0; i < QDF_MODULE_ID_MAX; i++)
219 		sched_ctx->queue_ctx.scheduler_msg_qid_to_qidx[i] =
220 					SCHEDULER_NUMBER_OF_MSG_QUEUE;
221 
222 	sched_exit();
223 
224 	return status;
225 }
226 
scheduler_all_queues_deinit(struct scheduler_ctx * sched_ctx)227 static QDF_STATUS scheduler_all_queues_deinit(struct scheduler_ctx *sched_ctx)
228 {
229 	int i;
230 
231 	sched_enter();
232 
233 	QDF_BUG(sched_ctx);
234 	if (!sched_ctx)
235 		return QDF_STATUS_E_FAILURE;
236 
237 	/* De-Initialize all message queues */
238 	for (i = 0; i < SCHEDULER_NUMBER_OF_MSG_QUEUE; i++)
239 		scheduler_mq_deinit(&sched_ctx->queue_ctx.sch_msg_q[i]);
240 
241 	/* Initialize all qid to qidx mapping to invalid values */
242 	for (i = 0; i < QDF_MODULE_ID_MAX; i++)
243 		sched_ctx->queue_ctx.scheduler_msg_qid_to_qidx[i] =
244 					SCHEDULER_NUMBER_OF_MSG_QUEUE;
245 
246 	sched_exit();
247 
248 	return QDF_STATUS_SUCCESS;
249 }
250 
scheduler_mq_put(struct scheduler_mq_type * msg_q,struct scheduler_msg * msg)251 void scheduler_mq_put(struct scheduler_mq_type *msg_q,
252 		      struct scheduler_msg *msg)
253 {
254 	qdf_spin_lock_irqsave(&msg_q->mq_lock);
255 	sched_history_queue(msg_q, msg);
256 	qdf_list_insert_back(&msg_q->mq_list, &msg->node);
257 	qdf_spin_unlock_irqrestore(&msg_q->mq_lock);
258 }
259 
scheduler_mq_put_front(struct scheduler_mq_type * msg_q,struct scheduler_msg * msg)260 void scheduler_mq_put_front(struct scheduler_mq_type *msg_q,
261 			    struct scheduler_msg *msg)
262 {
263 	qdf_spin_lock_irqsave(&msg_q->mq_lock);
264 	sched_history_queue(msg_q, msg);
265 	qdf_list_insert_front(&msg_q->mq_list, &msg->node);
266 	qdf_spin_unlock_irqrestore(&msg_q->mq_lock);
267 }
268 
scheduler_mq_get(struct scheduler_mq_type * msg_q)269 struct scheduler_msg *scheduler_mq_get(struct scheduler_mq_type *msg_q)
270 {
271 	QDF_STATUS status;
272 	qdf_list_node_t *node;
273 
274 	qdf_spin_lock_irqsave(&msg_q->mq_lock);
275 	status = qdf_list_remove_front(&msg_q->mq_list, &node);
276 	qdf_spin_unlock_irqrestore(&msg_q->mq_lock);
277 
278 	if (QDF_IS_STATUS_ERROR(status))
279 		return NULL;
280 
281 	return qdf_container_of(node, struct scheduler_msg, node);
282 }
283 
scheduler_queues_deinit(struct scheduler_ctx * sched_ctx)284 QDF_STATUS scheduler_queues_deinit(struct scheduler_ctx *sched_ctx)
285 {
286 	return scheduler_all_queues_deinit(sched_ctx);
287 }
288 
scheduler_queues_init(struct scheduler_ctx * sched_ctx)289 QDF_STATUS scheduler_queues_init(struct scheduler_ctx *sched_ctx)
290 {
291 	QDF_STATUS status;
292 
293 	sched_enter();
294 
295 	QDF_BUG(sched_ctx);
296 	if (!sched_ctx)
297 		return QDF_STATUS_E_FAILURE;
298 
299 	status = scheduler_all_queues_init(sched_ctx);
300 	if (QDF_IS_STATUS_ERROR(status)) {
301 		scheduler_all_queues_deinit(sched_ctx);
302 		sched_err("Failed to initialize the msg queues");
303 		return status;
304 	}
305 
306 	sched_debug("Queue init passed");
307 
308 	sched_exit();
309 
310 	return QDF_STATUS_SUCCESS;
311 }
312 
scheduler_core_msg_dup(struct scheduler_msg * msg)313 struct scheduler_msg *scheduler_core_msg_dup(struct scheduler_msg *msg)
314 {
315 	struct scheduler_msg *dup;
316 
317 	if (qdf_atomic_inc_return(&__sched_queue_depth) >
318 	    SCHEDULER_CORE_MAX_MESSAGES)
319 		goto buffer_full;
320 
321 	dup = qdf_flex_mem_alloc(&sched_pool);
322 	if (!dup) {
323 		sched_err("out of memory");
324 		goto dec_queue_count;
325 	}
326 
327 	qdf_mem_copy(dup, msg, sizeof(*dup));
328 
329 	qdf_atomic_set(&__sched_dup_fail_count, 0);
330 
331 	return dup;
332 
333 buffer_full:
334 	if (qdf_atomic_inc_return(&__sched_dup_fail_count) >
335 	    SCHEDULER_WRAPPER_MAX_FAIL_COUNT)
336 		QDF_DEBUG_PANIC("Scheduler buffer is full");
337 
338 
339 dec_queue_count:
340 	qdf_atomic_dec(&__sched_queue_depth);
341 
342 	return NULL;
343 }
344 
scheduler_core_msg_free(struct scheduler_msg * msg)345 void scheduler_core_msg_free(struct scheduler_msg *msg)
346 {
347 	qdf_flex_mem_free(&sched_pool, msg);
348 	qdf_atomic_dec(&__sched_queue_depth);
349 }
350 
scheduler_thread_process_queues(struct scheduler_ctx * sch_ctx,bool * shutdown)351 static void scheduler_thread_process_queues(struct scheduler_ctx *sch_ctx,
352 					    bool *shutdown)
353 {
354 	int i;
355 	QDF_STATUS status;
356 	struct scheduler_msg *msg;
357 
358 	if (!sch_ctx) {
359 		QDF_DEBUG_PANIC("sch_ctx is null");
360 		return;
361 	}
362 
363 	/* start with highest priority queue : timer queue at index 0 */
364 	i = 0;
365 	while (i < SCHEDULER_NUMBER_OF_MSG_QUEUE) {
366 		/* Check if MC needs to shutdown */
367 		if (qdf_atomic_test_bit(MC_SHUTDOWN_EVENT_MASK,
368 					&sch_ctx->sch_event_flag)) {
369 			sched_debug("scheduler thread signaled to shutdown");
370 			*shutdown = true;
371 
372 			/* Check for any Suspend Indication */
373 			if (qdf_atomic_test_and_clear_bit(MC_SUSPEND_EVENT_MASK,
374 						&sch_ctx->sch_event_flag)) {
375 				/* Unblock anyone waiting on suspend */
376 				if (gp_sched_ctx->hdd_callback)
377 					gp_sched_ctx->hdd_callback();
378 			}
379 
380 			break;
381 		}
382 
383 		msg = scheduler_mq_get(&sch_ctx->queue_ctx.sch_msg_q[i]);
384 		if (!msg) {
385 			/* check next queue */
386 			i++;
387 			continue;
388 		}
389 
390 		if (sch_ctx->queue_ctx.scheduler_msg_process_fn[i]) {
391 			sch_ctx->watchdog_msg_type = msg->type;
392 			sch_ctx->watchdog_callback = msg->callback;
393 
394 			sched_history_start(msg);
395 			qdf_timer_start(&sch_ctx->watchdog_timer,
396 					sch_ctx->timeout);
397 			status = sch_ctx->queue_ctx.
398 					scheduler_msg_process_fn[i](msg);
399 			qdf_timer_stop(&sch_ctx->watchdog_timer);
400 			sched_history_stop();
401 
402 			if (QDF_IS_STATUS_ERROR(status))
403 				sched_err("Failed processing Qid[%d] message",
404 					  sch_ctx->queue_ctx.sch_msg_q[i].qid);
405 
406 			scheduler_core_msg_free(msg);
407 		}
408 
409 		/* start again with highest priority queue at index 0 */
410 		i = 0;
411 	}
412 
413 	/* Check for any Suspend Indication */
414 	if (qdf_atomic_test_and_clear_bit(MC_SUSPEND_EVENT_MASK,
415 			&sch_ctx->sch_event_flag)) {
416 		qdf_spin_lock(&sch_ctx->sch_thread_lock);
417 		qdf_event_reset(&sch_ctx->resume_sch_event);
418 		/* controller thread suspend completion callback */
419 		if (gp_sched_ctx->hdd_callback)
420 			gp_sched_ctx->hdd_callback();
421 		qdf_spin_unlock(&sch_ctx->sch_thread_lock);
422 		/* Wait for resume indication */
423 		qdf_wait_single_event(&sch_ctx->resume_sch_event, 0);
424 	}
425 
426 	return;  /* Nothing to process wait on wait queue */
427 }
428 
scheduler_thread(void * arg)429 int scheduler_thread(void *arg)
430 {
431 	struct scheduler_ctx *sch_ctx = (struct scheduler_ctx *)arg;
432 	int retWaitStatus = 0;
433 	bool shutdown = false;
434 
435 	if (!arg) {
436 		QDF_DEBUG_PANIC("arg is null");
437 		return 0;
438 	}
439 	qdf_set_user_nice(current, -2);
440 
441 	/* Ack back to the context from which the main controller thread
442 	 * has been created
443 	 */
444 	qdf_event_set(&sch_ctx->sch_start_event);
445 	sched_debug("scheduler thread %d (%s) starting up",
446 		    current->pid, current->comm);
447 
448 	while (!shutdown) {
449 		/* This implements the execution model algorithm */
450 		retWaitStatus = qdf_wait_queue_interruptible(
451 					sch_ctx->sch_wait_queue,
452 					qdf_atomic_test_bit(MC_POST_EVENT_MASK,
453 						&sch_ctx->sch_event_flag) ||
454 					qdf_atomic_test_bit(MC_SUSPEND_EVENT_MASK,
455 						&sch_ctx->sch_event_flag));
456 
457 		if (retWaitStatus == -ERESTARTSYS)
458 			QDF_DEBUG_PANIC("Scheduler received -ERESTARTSYS");
459 
460 		qdf_atomic_clear_bit(MC_POST_EVENT_MASK, &sch_ctx->sch_event_flag);
461 		scheduler_thread_process_queues(sch_ctx, &shutdown);
462 	}
463 
464 	/* If we get here the scheduler thread must exit */
465 	sched_debug("Scheduler thread exiting");
466 	qdf_event_set(&sch_ctx->sch_shutdown);
467 
468 	return 0;
469 }
470 
scheduler_flush_single_queue(struct scheduler_mq_type * mq)471 static void scheduler_flush_single_queue(struct scheduler_mq_type *mq)
472 {
473 	struct scheduler_msg *msg;
474 	QDF_STATUS (*flush_cb)(struct scheduler_msg *);
475 
476 	while ((msg = scheduler_mq_get(mq))) {
477 		if (msg->flush_callback) {
478 			sched_debug("Calling flush callback; type: %x",
479 				    msg->type);
480 			flush_cb = msg->flush_callback;
481 			flush_cb(msg);
482 		} else if (msg->bodyptr) {
483 			sched_debug("Freeing scheduler msg bodyptr; type: %x",
484 				    msg->type);
485 			qdf_mem_free(msg->bodyptr);
486 		}
487 
488 		scheduler_core_msg_free(msg);
489 	}
490 }
491 
scheduler_queues_flush(struct scheduler_ctx * sched_ctx)492 void scheduler_queues_flush(struct scheduler_ctx *sched_ctx)
493 {
494 	struct scheduler_mq_type *mq;
495 	int i;
496 
497 	sched_debug("Flushing scheduler message queues");
498 
499 	for (i = 0; i < SCHEDULER_NUMBER_OF_MSG_QUEUE; i++) {
500 		mq = &sched_ctx->queue_ctx.sch_msg_q[i];
501 		scheduler_flush_single_queue(mq);
502 	}
503 }
504 
505