xref: /wlan-driver/qcacld-3.0/core/cds/src/cds_sched.c (revision 5113495b16420b49004c444715d2daae2066e7dc)
1 /*
2  * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 /**
21  *  DOC: CDS Scheduler Implementation
22  */
23 
24 #include <cds_api.h>
25 #include <ani_global.h>
26 #include <sir_types.h>
27 #include <qdf_types.h>
28 #include <lim_api.h>
29 #include <sme_api.h>
30 #include <wlan_qct_sys.h>
31 #include "cds_sched.h"
32 #include <wlan_hdd_power.h>
33 #include "wma_types.h"
34 #include <linux/spinlock.h>
35 #include <linux/kthread.h>
36 #include <linux/cpu.h>
37 #ifdef RX_PERFORMANCE
38 #include <linux/sched/types.h>
39 #endif
40 #include "wlan_dp_ucfg_api.h"
41 
42 /*
43  * The following commit was introduced in v5.17:
44  * cead18552660 ("exit: Rename complete_and_exit to kthread_complete_and_exit")
45  * Use the old name for kernels before 5.17
46  */
47 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 17, 0))
48 /**
49  * kthread_complete_and_exit - completes the thread and exit
50  * @c: thread or task to be completed
51  * @s: exit code
52  */
53 #define kthread_complete_and_exit(c, s) complete_and_exit(c, s)
54 #endif
55 
56 static spinlock_t ssr_protect_lock;
57 
58 struct shutdown_notifier {
59 	struct list_head list;
60 	void (*cb)(void *priv);
61 	void *priv;
62 };
63 
64 struct list_head shutdown_notifier_head;
65 
66 enum notifier_state {
67 	NOTIFIER_STATE_NONE,
68 	NOTIFIER_STATE_NOTIFYING,
69 } notifier_state;
70 
71 static p_cds_sched_context gp_cds_sched_context;
72 
73 #ifdef WLAN_DP_LEGACY_OL_RX_THREAD
74 static int cds_ol_rx_thread(void *arg);
75 static uint32_t affine_cpu;
76 static QDF_STATUS cds_alloc_ol_rx_pkt_freeq(p_cds_sched_context pSchedContext);
77 
78 #define CDS_CORE_PER_CLUSTER (4)
79 /*Maximum 2 clusters supported*/
80 #define CDS_MAX_CPU_CLUSTERS 2
81 
82 #define CDS_CPU_CLUSTER_TYPE_LITTLE 0
83 #define CDS_CPU_CLUSTER_TYPE_PERF 1
84 
85 static inline
cds_set_cpus_allowed_ptr_with_cpu(struct task_struct * task,unsigned long cpu)86 int cds_set_cpus_allowed_ptr_with_cpu(struct task_struct *task,
87 				      unsigned long cpu)
88 {
89 	return set_cpus_allowed_ptr(task, cpumask_of(cpu));
90 }
91 
92 static inline
cds_set_cpus_allowed_ptr_with_mask(struct task_struct * task,qdf_cpu_mask * new_mask)93 int cds_set_cpus_allowed_ptr_with_mask(struct task_struct *task,
94 				       qdf_cpu_mask *new_mask)
95 {
96 	return set_cpus_allowed_ptr(task, new_mask);
97 }
98 
cds_set_rx_thread_cpu_mask(uint8_t cpu_affinity_mask)99 void cds_set_rx_thread_cpu_mask(uint8_t cpu_affinity_mask)
100 {
101 	p_cds_sched_context sched_context = get_cds_sched_ctxt();
102 
103 	if (!sched_context) {
104 		qdf_err("invalid context");
105 		return;
106 	}
107 	sched_context->conf_rx_thread_cpu_mask = cpu_affinity_mask;
108 }
109 
cds_set_rx_thread_ul_cpu_mask(uint8_t cpu_affinity_mask)110 void cds_set_rx_thread_ul_cpu_mask(uint8_t cpu_affinity_mask)
111 {
112 	p_cds_sched_context sched_context = get_cds_sched_ctxt();
113 
114 	if (!sched_context) {
115 		qdf_err("invalid context");
116 		return;
117 	}
118 	sched_context->conf_rx_thread_ul_affinity = cpu_affinity_mask;
119 }
120 
121 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
122 /**
123  * cds_rx_thread_log_cpu_affinity_change() - Log Rx thread affinity change
124  * @core_affine_cnt: Available cores
125  * @tput_req: Throughput request
126  * @old_mask: Old affinity mask
127  * @new_mask: New affinity mask
128  *
129  * Return: NONE
130  */
cds_rx_thread_log_cpu_affinity_change(unsigned char core_affine_cnt,int tput_req,struct cpumask * old_mask,struct cpumask * new_mask)131 static void cds_rx_thread_log_cpu_affinity_change(unsigned char core_affine_cnt,
132 						  int tput_req,
133 						  struct cpumask *old_mask,
134 						  struct cpumask *new_mask)
135 {
136 	char new_mask_str[10];
137 	char old_mask_str[10];
138 
139 	qdf_mem_zero(new_mask_str, sizeof(new_mask_str));
140 	qdf_mem_zero(new_mask_str, sizeof(old_mask_str));
141 
142 	cpumap_print_to_pagebuf(false, old_mask_str, old_mask);
143 	cpumap_print_to_pagebuf(false, new_mask_str, new_mask);
144 
145 	cds_debug("num online cores %d, high tput req %d, Rx_thread old mask %s new mask %s",
146 		  core_affine_cnt, tput_req, old_mask_str, new_mask_str);
147 }
148 #else
cds_rx_thread_log_cpu_affinity_change(unsigned char core_affine_cnt,int tput_req,struct cpumask * old_mask,struct cpumask * new_mask)149 static void cds_rx_thread_log_cpu_affinity_change(unsigned char core_affine_cnt,
150 						  int tput_req,
151 						  struct cpumask *old_mask,
152 						  struct cpumask *new_mask)
153 {
154 }
155 #endif
156 
157 /**
158  * cds_sched_find_attach_cpu - find available cores and attach to required core
159  * @pSchedContext:	wlan scheduler context
160  * @high_throughput:	high throughput is required or not
161  *
162  * Find current online cores.
163  * During high TPUT,
164  * 1) If user INI configured cores, affine to those cores
165  * 2) Otherwise perf cores.
166  * 3) Otherwise to all cores.
167  *
168  * During low TPUT, set affinity to any core, let system decide.
169  *
170  * Return: 0 success
171  *         1 fail
172  */
cds_sched_find_attach_cpu(p_cds_sched_context pSchedContext,bool high_throughput)173 static int cds_sched_find_attach_cpu(p_cds_sched_context pSchedContext,
174 	bool high_throughput)
175 {
176 	unsigned char core_affine_count = 0;
177 	qdf_cpu_mask new_mask;
178 	unsigned long cpus;
179 	struct cds_config_info *cds_cfg;
180 
181 	cds_debug("num possible cpu %d", num_possible_cpus());
182 
183 	qdf_cpumask_clear(&new_mask);
184 
185 	if (high_throughput) {
186 		/* Get Online perf/pwr CPU count */
187 		for_each_online_cpu(cpus) {
188 			if (topology_physical_package_id(cpus) >
189 							CDS_MAX_CPU_CLUSTERS) {
190 				cds_err("can handle max %d clusters, returning...",
191 					CDS_MAX_CPU_CLUSTERS);
192 				goto err;
193 			}
194 
195 			if (pSchedContext->conf_rx_thread_cpu_mask) {
196 				if (pSchedContext->conf_rx_thread_cpu_mask &
197 								(1 << cpus))
198 					qdf_cpumask_set_cpu(cpus, &new_mask);
199 			} else if (topology_physical_package_id(cpus) ==
200 						 CDS_CPU_CLUSTER_TYPE_PERF) {
201 				qdf_cpumask_set_cpu(cpus, &new_mask);
202 			}
203 
204 			core_affine_count++;
205 		}
206 	} else {
207 		/* Attach to all cores, let scheduler decide */
208 		qdf_cpumask_setall(&new_mask);
209 	}
210 
211 	cds_rx_thread_log_cpu_affinity_change(core_affine_count,
212 				(int)pSchedContext->high_throughput_required,
213 				&pSchedContext->rx_thread_cpu_mask,
214 				&new_mask);
215 
216 	if (!cpumask_equal(&pSchedContext->rx_thread_cpu_mask, &new_mask)) {
217 		cds_cfg = cds_get_ini_config();
218 		cpumask_copy(&pSchedContext->rx_thread_cpu_mask, &new_mask);
219 		if (cds_cfg && cds_cfg->enable_dp_rx_threads)
220 			ucfg_dp_txrx_set_cpu_mask(cds_get_context(QDF_MODULE_ID_SOC),
221 						  &new_mask);
222 		else
223 			cds_set_cpus_allowed_ptr_with_mask(pSchedContext->ol_rx_thread,
224 							   &new_mask);
225 	}
226 
227 	return 0;
228 err:
229 	return 1;
230 }
231 
cds_sched_handle_cpu_hot_plug(void)232 int cds_sched_handle_cpu_hot_plug(void)
233 {
234 	p_cds_sched_context pSchedContext = get_cds_sched_ctxt();
235 
236 	if (!pSchedContext) {
237 		cds_err("invalid context");
238 		return 1;
239 	}
240 
241 	if (cds_is_load_or_unload_in_progress())
242 		return 0;
243 
244 	mutex_lock(&pSchedContext->affinity_lock);
245 	if (cds_sched_find_attach_cpu(pSchedContext,
246 		pSchedContext->high_throughput_required)) {
247 		cds_err("handle hot plug fail");
248 		mutex_unlock(&pSchedContext->affinity_lock);
249 		return 1;
250 	}
251 	mutex_unlock(&pSchedContext->affinity_lock);
252 	return 0;
253 }
254 
cds_sched_handle_rx_thread_affinity_req(bool high_throughput)255 void cds_sched_handle_rx_thread_affinity_req(bool high_throughput)
256 {
257 	p_cds_sched_context pschedcontext = get_cds_sched_ctxt();
258 	unsigned long cpus;
259 	qdf_cpu_mask new_mask;
260 	unsigned char core_affine_count = 0;
261 
262 	if (!pschedcontext || !pschedcontext->ol_rx_thread)
263 		return;
264 
265 	if (cds_is_load_or_unload_in_progress()) {
266 		cds_err("load or unload in progress");
267 		return;
268 	}
269 
270 	if (pschedcontext->rx_affinity_required == high_throughput)
271 		return;
272 
273 	pschedcontext->rx_affinity_required = high_throughput;
274 	qdf_cpumask_clear(&new_mask);
275 	if (!high_throughput) {
276 		/* Attach to all cores, let scheduler decide */
277 		qdf_cpumask_setall(&new_mask);
278 		goto affine_thread;
279 	}
280 	for_each_online_cpu(cpus) {
281 		if (topology_physical_package_id(cpus) >
282 		    CDS_MAX_CPU_CLUSTERS) {
283 			cds_err("can handle max %d clusters ",
284 				CDS_MAX_CPU_CLUSTERS);
285 			return;
286 		}
287 		if (pschedcontext->conf_rx_thread_ul_affinity &&
288 		    (pschedcontext->conf_rx_thread_ul_affinity &
289 				 (1 << cpus)))
290 			qdf_cpumask_set_cpu(cpus, &new_mask);
291 
292 		core_affine_count++;
293 	}
294 
295 affine_thread:
296 	cds_rx_thread_log_cpu_affinity_change(
297 		core_affine_count,
298 		(int)pschedcontext->rx_affinity_required,
299 		&pschedcontext->rx_thread_cpu_mask,
300 		&new_mask);
301 
302 	mutex_lock(&pschedcontext->affinity_lock);
303 	if (!cpumask_equal(&pschedcontext->rx_thread_cpu_mask, &new_mask)) {
304 		cpumask_copy(&pschedcontext->rx_thread_cpu_mask, &new_mask);
305 		cds_set_cpus_allowed_ptr_with_mask(pschedcontext->ol_rx_thread,
306 						   &new_mask);
307 	}
308 	mutex_unlock(&pschedcontext->affinity_lock);
309 }
310 
cds_sched_handle_throughput_req(bool high_tput_required)311 int cds_sched_handle_throughput_req(bool high_tput_required)
312 {
313 	p_cds_sched_context pSchedContext = get_cds_sched_ctxt();
314 
315 	if (!pSchedContext) {
316 		cds_err("invalid context");
317 		return 1;
318 	}
319 
320 	if (cds_is_load_or_unload_in_progress()) {
321 		cds_err("load or unload in progress");
322 		return 0;
323 	}
324 
325 	mutex_lock(&pSchedContext->affinity_lock);
326 	if (pSchedContext->high_throughput_required != high_tput_required) {
327 		pSchedContext->high_throughput_required = high_tput_required;
328 		if (cds_sched_find_attach_cpu(pSchedContext,
329 					      high_tput_required)) {
330 			mutex_unlock(&pSchedContext->affinity_lock);
331 			return 1;
332 		}
333 	}
334 	mutex_unlock(&pSchedContext->affinity_lock);
335 	return 0;
336 }
337 
338 /**
339  * cds_cpu_hotplug_multi_cluster() - calls the multi-cluster hotplug handler,
340  *	when on a multi-cluster platform
341  *
342  * Return: QDF_STATUS
343  */
cds_cpu_hotplug_multi_cluster(void)344 static QDF_STATUS cds_cpu_hotplug_multi_cluster(void)
345 {
346 	int cpus;
347 	unsigned int multi_cluster = 0;
348 
349 	for_each_online_cpu(cpus) {
350 		multi_cluster = topology_physical_package_id(cpus);
351 	}
352 
353 	if (!multi_cluster)
354 		return QDF_STATUS_E_NOSUPPORT;
355 
356 	if (cds_sched_handle_cpu_hot_plug())
357 		return QDF_STATUS_E_FAILURE;
358 
359 	return QDF_STATUS_SUCCESS;
360 }
361 
362 /**
363  * __cds_cpu_hotplug_notify() - CPU hotplug event handler
364  * @cpu: CPU Id of the CPU generating the event
365  * @cpu_up: true if the CPU is online
366  *
367  * Return: None
368  */
__cds_cpu_hotplug_notify(uint32_t cpu,bool cpu_up)369 static void __cds_cpu_hotplug_notify(uint32_t cpu, bool cpu_up)
370 {
371 	unsigned long pref_cpu = 0;
372 	p_cds_sched_context pSchedContext = get_cds_sched_ctxt();
373 	int i;
374 
375 	if (!pSchedContext || !pSchedContext->ol_rx_thread)
376 		return;
377 
378 	if (cds_is_load_or_unload_in_progress() || cds_is_driver_recovering())
379 		return;
380 
381 	cds_debug("'%s' event on CPU %u (of %d); Currently affine to CPU %u",
382 		  cpu_up ? "Up" : "Down", cpu, num_possible_cpus(), affine_cpu);
383 
384 	/* try multi-cluster scheduling first */
385 	if (QDF_IS_STATUS_SUCCESS(cds_cpu_hotplug_multi_cluster()))
386 		return;
387 
388 	if (cpu_up) {
389 		if (affine_cpu != 0)
390 			return;
391 
392 		for_each_online_cpu(i) {
393 			if (i == 0)
394 				continue;
395 			pref_cpu = i;
396 			break;
397 		}
398 	} else {
399 		if (cpu != affine_cpu)
400 			return;
401 
402 		affine_cpu = 0;
403 		for_each_online_cpu(i) {
404 			if (i == 0)
405 				continue;
406 			pref_cpu = i;
407 			break;
408 		}
409 	}
410 
411 	if (pref_cpu == 0)
412 		return;
413 
414 	if (pSchedContext->ol_rx_thread &&
415 	    !cds_set_cpus_allowed_ptr_with_cpu(pSchedContext->ol_rx_thread,
416 					       pref_cpu))
417 		affine_cpu = pref_cpu;
418 }
419 
420 /**
421  * cds_cpu_hotplug_notify() - cpu core up/down notification handler wrapper
422  * @cpu: CPU Id of the CPU generating the event
423  * @cpu_up: true if the CPU is online
424  *
425  * Return: None
426  */
cds_cpu_hotplug_notify(uint32_t cpu,bool cpu_up)427 static void cds_cpu_hotplug_notify(uint32_t cpu, bool cpu_up)
428 {
429 	struct qdf_op_sync *op_sync;
430 
431 	if (qdf_op_protect(&op_sync))
432 		return;
433 
434 	__cds_cpu_hotplug_notify(cpu, cpu_up);
435 
436 	qdf_op_unprotect(op_sync);
437 }
438 
cds_cpu_online_cb(void * context,uint32_t cpu)439 static void cds_cpu_online_cb(void *context, uint32_t cpu)
440 {
441 	cds_cpu_hotplug_notify(cpu, true);
442 }
443 
cds_cpu_before_offline_cb(void * context,uint32_t cpu)444 static void cds_cpu_before_offline_cb(void *context, uint32_t cpu)
445 {
446 	cds_cpu_hotplug_notify(cpu, false);
447 }
448 #endif /* WLAN_DP_LEGACY_OL_RX_THREAD */
449 
cds_sched_open(void * p_cds_context,p_cds_sched_context pSchedContext,uint32_t SchedCtxSize)450 QDF_STATUS cds_sched_open(void *p_cds_context,
451 			  p_cds_sched_context pSchedContext,
452 			  uint32_t SchedCtxSize)
453 {
454 	cds_debug("Opening the CDS Scheduler");
455 	/* Sanity checks */
456 	if ((!p_cds_context) || (!pSchedContext)) {
457 		cds_err("Null params being passed");
458 		return QDF_STATUS_E_FAILURE;
459 	}
460 	if (sizeof(cds_sched_context) != SchedCtxSize) {
461 		cds_debug("Incorrect CDS Sched Context size passed");
462 		return QDF_STATUS_E_INVAL;
463 	}
464 	qdf_mem_zero(pSchedContext, sizeof(cds_sched_context));
465 #ifdef WLAN_DP_LEGACY_OL_RX_THREAD
466 	spin_lock_init(&pSchedContext->ol_rx_thread_lock);
467 	init_waitqueue_head(&pSchedContext->ol_rx_wait_queue);
468 	init_completion(&pSchedContext->ol_rx_start_event);
469 	init_completion(&pSchedContext->ol_suspend_rx_event);
470 	init_completion(&pSchedContext->ol_resume_rx_event);
471 	init_completion(&pSchedContext->ol_rx_shutdown);
472 	pSchedContext->ol_rx_event_flag = 0;
473 	spin_lock_init(&pSchedContext->ol_rx_queue_lock);
474 	spin_lock_init(&pSchedContext->cds_ol_rx_pkt_freeq_lock);
475 	INIT_LIST_HEAD(&pSchedContext->ol_rx_thread_queue);
476 	spin_lock_bh(&pSchedContext->cds_ol_rx_pkt_freeq_lock);
477 	INIT_LIST_HEAD(&pSchedContext->cds_ol_rx_pkt_freeq);
478 	spin_unlock_bh(&pSchedContext->cds_ol_rx_pkt_freeq_lock);
479 	if (cds_alloc_ol_rx_pkt_freeq(pSchedContext) != QDF_STATUS_SUCCESS)
480 		goto pkt_freeqalloc_failure;
481 	qdf_cpuhp_register(&pSchedContext->cpuhp_event_handle,
482 			   NULL,
483 			   cds_cpu_online_cb,
484 			   cds_cpu_before_offline_cb);
485 	mutex_init(&pSchedContext->affinity_lock);
486 	pSchedContext->high_throughput_required = false;
487 	pSchedContext->rx_affinity_required = false;
488 	pSchedContext->active_staid = OL_TXRX_INVALID_LOCAL_PEER_ID;
489 #endif
490 	gp_cds_sched_context = pSchedContext;
491 
492 #ifdef WLAN_DP_LEGACY_OL_RX_THREAD
493 	pSchedContext->ol_rx_thread = kthread_create(cds_ol_rx_thread,
494 						       pSchedContext,
495 						       "cds_ol_rx_thread");
496 	if (IS_ERR(pSchedContext->ol_rx_thread)) {
497 
498 		cds_alert("Could not Create CDS OL RX Thread");
499 		goto OL_RX_THREAD_START_FAILURE;
500 
501 	}
502 	wake_up_process(pSchedContext->ol_rx_thread);
503 	cds_debug("CDS OL RX thread Created");
504 	wait_for_completion_interruptible(&pSchedContext->ol_rx_start_event);
505 	cds_debug("CDS OL Rx Thread has started");
506 #endif
507 	/* We're good now: Let's get the ball rolling!!! */
508 	cds_debug("CDS Scheduler successfully Opened");
509 	return QDF_STATUS_SUCCESS;
510 #ifdef WLAN_DP_LEGACY_OL_RX_THREAD
511 OL_RX_THREAD_START_FAILURE:
512 	qdf_cpuhp_unregister(&pSchedContext->cpuhp_event_handle);
513 	cds_free_ol_rx_pkt_freeq(gp_cds_sched_context);
514 pkt_freeqalloc_failure:
515 #endif
516 	gp_cds_sched_context = NULL;
517 
518 	return QDF_STATUS_E_RESOURCES;
519 
520 } /* cds_sched_open() */
521 
522 #ifdef WLAN_DP_LEGACY_OL_RX_THREAD
cds_free_ol_rx_pkt_freeq(p_cds_sched_context pSchedContext)523 void cds_free_ol_rx_pkt_freeq(p_cds_sched_context pSchedContext)
524 {
525 	struct cds_ol_rx_pkt *pkt;
526 
527 	spin_lock_bh(&pSchedContext->cds_ol_rx_pkt_freeq_lock);
528 	while (!list_empty(&pSchedContext->cds_ol_rx_pkt_freeq)) {
529 		pkt = list_entry((&pSchedContext->cds_ol_rx_pkt_freeq)->next,
530 			typeof(*pkt), list);
531 		list_del(&pkt->list);
532 		spin_unlock_bh(&pSchedContext->cds_ol_rx_pkt_freeq_lock);
533 		qdf_mem_free(pkt);
534 		spin_lock_bh(&pSchedContext->cds_ol_rx_pkt_freeq_lock);
535 	}
536 	spin_unlock_bh(&pSchedContext->cds_ol_rx_pkt_freeq_lock);
537 }
538 
539 /**
540  * cds_alloc_ol_rx_pkt_freeq() - Function to allocate free buffer queue
541  * @pSchedContext: pointer to the global CDS Sched Context
542  *
543  * This API allocates CDS_MAX_OL_RX_PKT number of cds message buffers
544  * which are used for Rx data processing.
545  *
546  * Return: status of memory allocation
547  */
cds_alloc_ol_rx_pkt_freeq(p_cds_sched_context pSchedContext)548 static QDF_STATUS cds_alloc_ol_rx_pkt_freeq(p_cds_sched_context pSchedContext)
549 {
550 	struct cds_ol_rx_pkt *pkt, *tmp;
551 	int i;
552 
553 	for (i = 0; i < CDS_MAX_OL_RX_PKT; i++) {
554 		pkt = qdf_mem_malloc(sizeof(*pkt));
555 		if (!pkt) {
556 			cds_err("Vos packet allocation for ol rx thread failed");
557 			goto free;
558 		}
559 		spin_lock_bh(&pSchedContext->cds_ol_rx_pkt_freeq_lock);
560 		list_add_tail(&pkt->list, &pSchedContext->cds_ol_rx_pkt_freeq);
561 		spin_unlock_bh(&pSchedContext->cds_ol_rx_pkt_freeq_lock);
562 	}
563 
564 	return QDF_STATUS_SUCCESS;
565 
566 free:
567 	spin_lock_bh(&pSchedContext->cds_ol_rx_pkt_freeq_lock);
568 	list_for_each_entry_safe(pkt, tmp, &pSchedContext->cds_ol_rx_pkt_freeq,
569 				 list) {
570 		list_del(&pkt->list);
571 		spin_unlock_bh(&pSchedContext->cds_ol_rx_pkt_freeq_lock);
572 		qdf_mem_free(pkt);
573 		spin_lock_bh(&pSchedContext->cds_ol_rx_pkt_freeq_lock);
574 	}
575 	spin_unlock_bh(&pSchedContext->cds_ol_rx_pkt_freeq_lock);
576 	return QDF_STATUS_E_NOMEM;
577 }
578 
579 void
cds_free_ol_rx_pkt(p_cds_sched_context pSchedContext,struct cds_ol_rx_pkt * pkt)580 cds_free_ol_rx_pkt(p_cds_sched_context pSchedContext,
581 		    struct cds_ol_rx_pkt *pkt)
582 {
583 	memset(pkt, 0, sizeof(*pkt));
584 	spin_lock_bh(&pSchedContext->cds_ol_rx_pkt_freeq_lock);
585 	list_add_tail(&pkt->list, &pSchedContext->cds_ol_rx_pkt_freeq);
586 	spin_unlock_bh(&pSchedContext->cds_ol_rx_pkt_freeq_lock);
587 }
588 
cds_alloc_ol_rx_pkt(p_cds_sched_context pSchedContext)589 struct cds_ol_rx_pkt *cds_alloc_ol_rx_pkt(p_cds_sched_context pSchedContext)
590 {
591 	struct cds_ol_rx_pkt *pkt;
592 
593 	spin_lock_bh(&pSchedContext->cds_ol_rx_pkt_freeq_lock);
594 	if (list_empty(&pSchedContext->cds_ol_rx_pkt_freeq)) {
595 		spin_unlock_bh(&pSchedContext->cds_ol_rx_pkt_freeq_lock);
596 		return NULL;
597 	}
598 	pkt = list_first_entry(&pSchedContext->cds_ol_rx_pkt_freeq,
599 			       struct cds_ol_rx_pkt, list);
600 	list_del(&pkt->list);
601 	spin_unlock_bh(&pSchedContext->cds_ol_rx_pkt_freeq_lock);
602 	return pkt;
603 }
604 
605 void
cds_indicate_rxpkt(p_cds_sched_context pSchedContext,struct cds_ol_rx_pkt * pkt)606 cds_indicate_rxpkt(p_cds_sched_context pSchedContext,
607 		   struct cds_ol_rx_pkt *pkt)
608 {
609 	spin_lock_bh(&pSchedContext->ol_rx_queue_lock);
610 	list_add_tail(&pkt->list, &pSchedContext->ol_rx_thread_queue);
611 	spin_unlock_bh(&pSchedContext->ol_rx_queue_lock);
612 	set_bit(RX_POST_EVENT, &pSchedContext->ol_rx_event_flag);
613 	wake_up_interruptible(&pSchedContext->ol_rx_wait_queue);
614 }
615 
cds_close_rx_thread(void)616 QDF_STATUS cds_close_rx_thread(void)
617 {
618 	cds_debug("invoked");
619 
620 	if (!gp_cds_sched_context) {
621 		cds_err("!gp_cds_sched_context");
622 		return QDF_STATUS_E_FAILURE;
623 	}
624 
625 	if (!gp_cds_sched_context->ol_rx_thread)
626 		return QDF_STATUS_SUCCESS;
627 
628 	/* Shut down Tlshim Rx thread */
629 	set_bit(RX_SHUTDOWN_EVENT, &gp_cds_sched_context->ol_rx_event_flag);
630 	set_bit(RX_POST_EVENT, &gp_cds_sched_context->ol_rx_event_flag);
631 	wake_up_interruptible(&gp_cds_sched_context->ol_rx_wait_queue);
632 	wait_for_completion(&gp_cds_sched_context->ol_rx_shutdown);
633 	gp_cds_sched_context->ol_rx_thread = NULL;
634 	cds_drop_rxpkt_by_staid(gp_cds_sched_context, WLAN_MAX_STA_COUNT);
635 	cds_free_ol_rx_pkt_freeq(gp_cds_sched_context);
636 	qdf_cpuhp_unregister(&gp_cds_sched_context->cpuhp_event_handle);
637 
638 	return QDF_STATUS_SUCCESS;
639 } /* cds_close_rx_thread */
640 
cds_drop_rxpkt_by_staid(p_cds_sched_context pSchedContext,uint16_t staId)641 void cds_drop_rxpkt_by_staid(p_cds_sched_context pSchedContext, uint16_t staId)
642 {
643 	struct list_head local_list;
644 	struct cds_ol_rx_pkt *pkt, *tmp;
645 	qdf_nbuf_t buf, next_buf;
646 	uint32_t timeout = 0;
647 
648 	INIT_LIST_HEAD(&local_list);
649 	spin_lock_bh(&pSchedContext->ol_rx_queue_lock);
650 	if (list_empty(&pSchedContext->ol_rx_thread_queue)) {
651 		spin_unlock_bh(&pSchedContext->ol_rx_queue_lock);
652 		return;
653 	}
654 	list_for_each_entry_safe(pkt, tmp, &pSchedContext->ol_rx_thread_queue,
655 								list) {
656 		if (pkt->staId == staId || staId == WLAN_MAX_STA_COUNT)
657 			list_move_tail(&pkt->list, &local_list);
658 	}
659 	spin_unlock_bh(&pSchedContext->ol_rx_queue_lock);
660 
661 	list_for_each_entry_safe(pkt, tmp, &local_list, list) {
662 		list_del(&pkt->list);
663 		buf = pkt->Rxpkt;
664 		while (buf) {
665 			next_buf = qdf_nbuf_queue_next(buf);
666 			qdf_nbuf_free(buf);
667 			buf = next_buf;
668 		}
669 		cds_free_ol_rx_pkt(pSchedContext, pkt);
670 	}
671 
672 	while (pSchedContext->active_staid == staId &&
673 	       timeout <= CDS_ACTIVE_STAID_CLEANUP_TIMEOUT) {
674 		if (qdf_in_interrupt())
675 			qdf_mdelay(CDS_ACTIVE_STAID_CLEANUP_DELAY);
676 		else
677 			qdf_sleep(CDS_ACTIVE_STAID_CLEANUP_DELAY);
678 		timeout += CDS_ACTIVE_STAID_CLEANUP_DELAY;
679 	}
680 
681 	if (pSchedContext->active_staid == staId)
682 		cds_err("Failed to cleanup RX packets for staId:%u", staId);
683 }
684 
685 /**
686  * cds_rx_from_queue() - function to process pending Rx packets
687  * @pSchedContext: Pointer to the global CDS Sched Context
688  *
689  * This api traverses the pending buffer list and calling the callback.
690  * This callback would essentially send the packet to HDD.
691  *
692  * Return: none
693  */
cds_rx_from_queue(p_cds_sched_context pSchedContext)694 static void cds_rx_from_queue(p_cds_sched_context pSchedContext)
695 {
696 	struct cds_ol_rx_pkt *pkt;
697 	uint16_t sta_id;
698 
699 	spin_lock_bh(&pSchedContext->ol_rx_queue_lock);
700 	while (!list_empty(&pSchedContext->ol_rx_thread_queue)) {
701 		pkt = list_first_entry(&pSchedContext->ol_rx_thread_queue,
702 				       struct cds_ol_rx_pkt, list);
703 		list_del(&pkt->list);
704 		pSchedContext->active_staid = pkt->staId;
705 		spin_unlock_bh(&pSchedContext->ol_rx_queue_lock);
706 		sta_id = pkt->staId;
707 		pkt->callback(pkt->context, pkt->Rxpkt, sta_id);
708 		cds_free_ol_rx_pkt(pSchedContext, pkt);
709 		spin_lock_bh(&pSchedContext->ol_rx_queue_lock);
710 		pSchedContext->active_staid = OL_TXRX_INVALID_LOCAL_PEER_ID;
711 	}
712 	spin_unlock_bh(&pSchedContext->ol_rx_queue_lock);
713 }
714 
715 /**
716  * cds_ol_rx_thread() - cds main tlshim rx thread
717  * @arg: pointer to the global CDS Sched Context
718  *
719  * This api is the thread handler for Tlshim Data packet processing.
720  *
721  * Return: thread exit code
722  */
cds_ol_rx_thread(void * arg)723 static int cds_ol_rx_thread(void *arg)
724 {
725 	p_cds_sched_context pSchedContext = (p_cds_sched_context) arg;
726 	bool shutdown = false;
727 	int status;
728 
729 #ifdef RX_THREAD_PRIORITY
730 	struct sched_param scheduler_params = {0};
731 
732 	scheduler_params.sched_priority = 1;
733 	sched_setscheduler(current, SCHED_FIFO, &scheduler_params);
734 #else
735 	set_user_nice(current, -1);
736 #endif
737 
738 	qdf_set_wake_up_idle(true);
739 
740 	complete(&pSchedContext->ol_rx_start_event);
741 
742 	while (!shutdown) {
743 		status =
744 			wait_event_interruptible(pSchedContext->ol_rx_wait_queue,
745 						 test_bit(RX_POST_EVENT,
746 							  &pSchedContext->ol_rx_event_flag)
747 						 || test_bit(RX_SUSPEND_EVENT,
748 							     &pSchedContext->ol_rx_event_flag));
749 		if (status == -ERESTARTSYS)
750 			break;
751 
752 		clear_bit(RX_POST_EVENT, &pSchedContext->ol_rx_event_flag);
753 		while (true) {
754 			if (test_bit(RX_SHUTDOWN_EVENT,
755 				     &pSchedContext->ol_rx_event_flag)) {
756 				clear_bit(RX_SHUTDOWN_EVENT,
757 					  &pSchedContext->ol_rx_event_flag);
758 				if (test_bit(RX_SUSPEND_EVENT,
759 					     &pSchedContext->ol_rx_event_flag)) {
760 					clear_bit(RX_SUSPEND_EVENT,
761 						  &pSchedContext->ol_rx_event_flag);
762 					complete
763 						(&pSchedContext->ol_suspend_rx_event);
764 				}
765 				cds_debug("Shutting down OL RX Thread");
766 				shutdown = true;
767 				break;
768 			}
769 			cds_rx_from_queue(pSchedContext);
770 
771 			if (test_bit(RX_SUSPEND_EVENT,
772 				     &pSchedContext->ol_rx_event_flag)) {
773 				clear_bit(RX_SUSPEND_EVENT,
774 					  &pSchedContext->ol_rx_event_flag);
775 				spin_lock(&pSchedContext->ol_rx_thread_lock);
776 				INIT_COMPLETION
777 					(pSchedContext->ol_resume_rx_event);
778 				complete(&pSchedContext->ol_suspend_rx_event);
779 				spin_unlock(&pSchedContext->ol_rx_thread_lock);
780 				wait_for_completion_interruptible
781 					(&pSchedContext->ol_resume_rx_event);
782 			}
783 			break;
784 		}
785 	}
786 
787 	cds_debug("Exiting CDS OL rx thread");
788 	kthread_complete_and_exit(&pSchedContext->ol_rx_shutdown, 0);
789 
790 	return 0;
791 }
792 
cds_resume_rx_thread(void)793 void cds_resume_rx_thread(void)
794 {
795 	p_cds_sched_context cds_sched_context;
796 
797 	cds_sched_context = get_cds_sched_ctxt();
798 	if (!cds_sched_context) {
799 		cds_err("cds_sched_context is NULL");
800 		return;
801 	}
802 
803 	complete(&cds_sched_context->ol_resume_rx_event);
804 }
805 #endif
806 
cds_sched_close(void)807 QDF_STATUS cds_sched_close(void)
808 {
809 	cds_debug("invoked");
810 
811 	if (!gp_cds_sched_context) {
812 		cds_err("!gp_cds_sched_context");
813 		return QDF_STATUS_E_FAILURE;
814 	}
815 
816 	cds_close_rx_thread();
817 
818 	gp_cds_sched_context = NULL;
819 	return QDF_STATUS_SUCCESS;
820 } /* cds_sched_close() */
821 
get_cds_sched_ctxt(void)822 p_cds_sched_context get_cds_sched_ctxt(void)
823 {
824 	/* Make sure that Vos Scheduler context has been initialized */
825 	if (!gp_cds_sched_context)
826 		cds_err("!gp_cds_sched_context");
827 
828 	return gp_cds_sched_context;
829 }
830 
cds_ssr_protect_init(void)831 void cds_ssr_protect_init(void)
832 {
833 	spin_lock_init(&ssr_protect_lock);
834 	INIT_LIST_HEAD(&shutdown_notifier_head);
835 }
836 
cds_shutdown_notifier_register(void (* cb)(void * priv),void * priv)837 QDF_STATUS cds_shutdown_notifier_register(void (*cb)(void *priv), void *priv)
838 {
839 	struct shutdown_notifier *notifier;
840 	unsigned long irq_flags;
841 
842 	notifier = qdf_mem_malloc(sizeof(*notifier));
843 
844 	if (!notifier)
845 		return QDF_STATUS_E_NOMEM;
846 
847 	/*
848 	 * This logic can be simpilfied if there is separate state maintained
849 	 * for shutdown and reinit. Right now there is only recovery in progress
850 	 * state and it doesn't help to check against it as during reinit some
851 	 * of the modules may need to register the call backs.
852 	 * For now this logic added to avoid notifier registration happen while
853 	 * this function is trying to call the call back with the notification.
854 	 */
855 	spin_lock_irqsave(&ssr_protect_lock, irq_flags);
856 	if (notifier_state == NOTIFIER_STATE_NOTIFYING) {
857 		spin_unlock_irqrestore(&ssr_protect_lock, irq_flags);
858 		qdf_mem_free(notifier);
859 		return -EINVAL;
860 	}
861 
862 	notifier->cb = cb;
863 	notifier->priv = priv;
864 
865 	list_add_tail(&notifier->list, &shutdown_notifier_head);
866 	spin_unlock_irqrestore(&ssr_protect_lock, irq_flags);
867 
868 	return 0;
869 }
870 
cds_shutdown_notifier_purge(void)871 void cds_shutdown_notifier_purge(void)
872 {
873 	struct shutdown_notifier *notifier, *temp;
874 	unsigned long irq_flags;
875 
876 	spin_lock_irqsave(&ssr_protect_lock, irq_flags);
877 	list_for_each_entry_safe(notifier, temp,
878 				 &shutdown_notifier_head, list) {
879 		list_del(&notifier->list);
880 		spin_unlock_irqrestore(&ssr_protect_lock, irq_flags);
881 
882 		qdf_mem_free(notifier);
883 
884 		spin_lock_irqsave(&ssr_protect_lock, irq_flags);
885 	}
886 
887 	spin_unlock_irqrestore(&ssr_protect_lock, irq_flags);
888 }
889 
cds_shutdown_notifier_call(void)890 void cds_shutdown_notifier_call(void)
891 {
892 	struct shutdown_notifier *notifier;
893 	unsigned long irq_flags;
894 
895 	spin_lock_irqsave(&ssr_protect_lock, irq_flags);
896 	notifier_state = NOTIFIER_STATE_NOTIFYING;
897 
898 	list_for_each_entry(notifier, &shutdown_notifier_head, list) {
899 		spin_unlock_irqrestore(&ssr_protect_lock, irq_flags);
900 
901 		notifier->cb(notifier->priv);
902 
903 		spin_lock_irqsave(&ssr_protect_lock, irq_flags);
904 	}
905 
906 	notifier_state = NOTIFIER_STATE_NONE;
907 	spin_unlock_irqrestore(&ssr_protect_lock, irq_flags);
908 }
909 
cds_get_gfp_flags(void)910 int cds_get_gfp_flags(void)
911 {
912 	int flags = GFP_KERNEL;
913 
914 	if (in_interrupt() || in_atomic() || irqs_disabled())
915 		flags = GFP_ATOMIC;
916 
917 	return flags;
918 }
919 
920 /**
921  * cds_get_rx_thread_pending(): get rx thread status
922  * @soc: ol_txrx_soc_handle object
923  *
924  * Return: 1 if rx thread is not empty.
925  *	   0 if rx thread is empty
926  */
927 #ifdef WLAN_DP_LEGACY_OL_RX_THREAD
cds_get_rx_thread_pending(ol_txrx_soc_handle soc)928 int cds_get_rx_thread_pending(ol_txrx_soc_handle soc)
929 {
930 	p_cds_sched_context cds_sched_context = get_cds_sched_ctxt();
931 
932 	if (!cds_sched_context) {
933 		cds_err("cds_sched_context is NULL");
934 		return 0;
935 	}
936 
937 	spin_lock_bh(&cds_sched_context->ol_rx_queue_lock);
938 
939 	if (list_empty(&cds_sched_context->ol_rx_thread_queue)) {
940 		spin_unlock_bh(&cds_sched_context->ol_rx_queue_lock);
941 		return 0;
942 	}
943 
944 	/* In helium there is no scope to get no of pending frames
945 	 * in rx thread, Hence return 1 if frames are queued
946 	 */
947 	spin_unlock_bh(&cds_sched_context->ol_rx_queue_lock);
948 	return 1;
949 }
950 #endif
951