xref: /wlan-driver/qcacld-3.0/core/cds/src/cds_sched.c (revision 5113495b16420b49004c444715d2daae2066e7dc)
1*5113495bSYour Name /*
2*5113495bSYour Name  * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
3*5113495bSYour Name  * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4*5113495bSYour Name  *
5*5113495bSYour Name  * Permission to use, copy, modify, and/or distribute this software for
6*5113495bSYour Name  * any purpose with or without fee is hereby granted, provided that the
7*5113495bSYour Name  * above copyright notice and this permission notice appear in all
8*5113495bSYour Name  * copies.
9*5113495bSYour Name  *
10*5113495bSYour Name  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11*5113495bSYour Name  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12*5113495bSYour Name  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13*5113495bSYour Name  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14*5113495bSYour Name  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15*5113495bSYour Name  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16*5113495bSYour Name  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17*5113495bSYour Name  * PERFORMANCE OF THIS SOFTWARE.
18*5113495bSYour Name  */
19*5113495bSYour Name 
20*5113495bSYour Name /**
21*5113495bSYour Name  *  DOC: CDS Scheduler Implementation
22*5113495bSYour Name  */
23*5113495bSYour Name 
24*5113495bSYour Name #include <cds_api.h>
25*5113495bSYour Name #include <ani_global.h>
26*5113495bSYour Name #include <sir_types.h>
27*5113495bSYour Name #include <qdf_types.h>
28*5113495bSYour Name #include <lim_api.h>
29*5113495bSYour Name #include <sme_api.h>
30*5113495bSYour Name #include <wlan_qct_sys.h>
31*5113495bSYour Name #include "cds_sched.h"
32*5113495bSYour Name #include <wlan_hdd_power.h>
33*5113495bSYour Name #include "wma_types.h"
34*5113495bSYour Name #include <linux/spinlock.h>
35*5113495bSYour Name #include <linux/kthread.h>
36*5113495bSYour Name #include <linux/cpu.h>
37*5113495bSYour Name #ifdef RX_PERFORMANCE
38*5113495bSYour Name #include <linux/sched/types.h>
39*5113495bSYour Name #endif
40*5113495bSYour Name #include "wlan_dp_ucfg_api.h"
41*5113495bSYour Name 
42*5113495bSYour Name /*
43*5113495bSYour Name  * The following commit was introduced in v5.17:
44*5113495bSYour Name  * cead18552660 ("exit: Rename complete_and_exit to kthread_complete_and_exit")
45*5113495bSYour Name  * Use the old name for kernels before 5.17
46*5113495bSYour Name  */
47*5113495bSYour Name #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 17, 0))
48*5113495bSYour Name /**
49*5113495bSYour Name  * kthread_complete_and_exit - completes the thread and exit
50*5113495bSYour Name  * @c: thread or task to be completed
51*5113495bSYour Name  * @s: exit code
52*5113495bSYour Name  */
53*5113495bSYour Name #define kthread_complete_and_exit(c, s) complete_and_exit(c, s)
54*5113495bSYour Name #endif
55*5113495bSYour Name 
56*5113495bSYour Name static spinlock_t ssr_protect_lock;
57*5113495bSYour Name 
58*5113495bSYour Name struct shutdown_notifier {
59*5113495bSYour Name 	struct list_head list;
60*5113495bSYour Name 	void (*cb)(void *priv);
61*5113495bSYour Name 	void *priv;
62*5113495bSYour Name };
63*5113495bSYour Name 
64*5113495bSYour Name struct list_head shutdown_notifier_head;
65*5113495bSYour Name 
66*5113495bSYour Name enum notifier_state {
67*5113495bSYour Name 	NOTIFIER_STATE_NONE,
68*5113495bSYour Name 	NOTIFIER_STATE_NOTIFYING,
69*5113495bSYour Name } notifier_state;
70*5113495bSYour Name 
71*5113495bSYour Name static p_cds_sched_context gp_cds_sched_context;
72*5113495bSYour Name 
73*5113495bSYour Name #ifdef WLAN_DP_LEGACY_OL_RX_THREAD
74*5113495bSYour Name static int cds_ol_rx_thread(void *arg);
75*5113495bSYour Name static uint32_t affine_cpu;
76*5113495bSYour Name static QDF_STATUS cds_alloc_ol_rx_pkt_freeq(p_cds_sched_context pSchedContext);
77*5113495bSYour Name 
78*5113495bSYour Name #define CDS_CORE_PER_CLUSTER (4)
79*5113495bSYour Name /*Maximum 2 clusters supported*/
80*5113495bSYour Name #define CDS_MAX_CPU_CLUSTERS 2
81*5113495bSYour Name 
82*5113495bSYour Name #define CDS_CPU_CLUSTER_TYPE_LITTLE 0
83*5113495bSYour Name #define CDS_CPU_CLUSTER_TYPE_PERF 1
84*5113495bSYour Name 
85*5113495bSYour Name static inline
cds_set_cpus_allowed_ptr_with_cpu(struct task_struct * task,unsigned long cpu)86*5113495bSYour Name int cds_set_cpus_allowed_ptr_with_cpu(struct task_struct *task,
87*5113495bSYour Name 				      unsigned long cpu)
88*5113495bSYour Name {
89*5113495bSYour Name 	return set_cpus_allowed_ptr(task, cpumask_of(cpu));
90*5113495bSYour Name }
91*5113495bSYour Name 
92*5113495bSYour Name static inline
cds_set_cpus_allowed_ptr_with_mask(struct task_struct * task,qdf_cpu_mask * new_mask)93*5113495bSYour Name int cds_set_cpus_allowed_ptr_with_mask(struct task_struct *task,
94*5113495bSYour Name 				       qdf_cpu_mask *new_mask)
95*5113495bSYour Name {
96*5113495bSYour Name 	return set_cpus_allowed_ptr(task, new_mask);
97*5113495bSYour Name }
98*5113495bSYour Name 
cds_set_rx_thread_cpu_mask(uint8_t cpu_affinity_mask)99*5113495bSYour Name void cds_set_rx_thread_cpu_mask(uint8_t cpu_affinity_mask)
100*5113495bSYour Name {
101*5113495bSYour Name 	p_cds_sched_context sched_context = get_cds_sched_ctxt();
102*5113495bSYour Name 
103*5113495bSYour Name 	if (!sched_context) {
104*5113495bSYour Name 		qdf_err("invalid context");
105*5113495bSYour Name 		return;
106*5113495bSYour Name 	}
107*5113495bSYour Name 	sched_context->conf_rx_thread_cpu_mask = cpu_affinity_mask;
108*5113495bSYour Name }
109*5113495bSYour Name 
cds_set_rx_thread_ul_cpu_mask(uint8_t cpu_affinity_mask)110*5113495bSYour Name void cds_set_rx_thread_ul_cpu_mask(uint8_t cpu_affinity_mask)
111*5113495bSYour Name {
112*5113495bSYour Name 	p_cds_sched_context sched_context = get_cds_sched_ctxt();
113*5113495bSYour Name 
114*5113495bSYour Name 	if (!sched_context) {
115*5113495bSYour Name 		qdf_err("invalid context");
116*5113495bSYour Name 		return;
117*5113495bSYour Name 	}
118*5113495bSYour Name 	sched_context->conf_rx_thread_ul_affinity = cpu_affinity_mask;
119*5113495bSYour Name }
120*5113495bSYour Name 
121*5113495bSYour Name #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
122*5113495bSYour Name /**
123*5113495bSYour Name  * cds_rx_thread_log_cpu_affinity_change() - Log Rx thread affinity change
124*5113495bSYour Name  * @core_affine_cnt: Available cores
125*5113495bSYour Name  * @tput_req: Throughput request
126*5113495bSYour Name  * @old_mask: Old affinity mask
127*5113495bSYour Name  * @new_mask: New affinity mask
128*5113495bSYour Name  *
129*5113495bSYour Name  * Return: NONE
130*5113495bSYour Name  */
cds_rx_thread_log_cpu_affinity_change(unsigned char core_affine_cnt,int tput_req,struct cpumask * old_mask,struct cpumask * new_mask)131*5113495bSYour Name static void cds_rx_thread_log_cpu_affinity_change(unsigned char core_affine_cnt,
132*5113495bSYour Name 						  int tput_req,
133*5113495bSYour Name 						  struct cpumask *old_mask,
134*5113495bSYour Name 						  struct cpumask *new_mask)
135*5113495bSYour Name {
136*5113495bSYour Name 	char new_mask_str[10];
137*5113495bSYour Name 	char old_mask_str[10];
138*5113495bSYour Name 
139*5113495bSYour Name 	qdf_mem_zero(new_mask_str, sizeof(new_mask_str));
140*5113495bSYour Name 	qdf_mem_zero(new_mask_str, sizeof(old_mask_str));
141*5113495bSYour Name 
142*5113495bSYour Name 	cpumap_print_to_pagebuf(false, old_mask_str, old_mask);
143*5113495bSYour Name 	cpumap_print_to_pagebuf(false, new_mask_str, new_mask);
144*5113495bSYour Name 
145*5113495bSYour Name 	cds_debug("num online cores %d, high tput req %d, Rx_thread old mask %s new mask %s",
146*5113495bSYour Name 		  core_affine_cnt, tput_req, old_mask_str, new_mask_str);
147*5113495bSYour Name }
148*5113495bSYour Name #else
cds_rx_thread_log_cpu_affinity_change(unsigned char core_affine_cnt,int tput_req,struct cpumask * old_mask,struct cpumask * new_mask)149*5113495bSYour Name static void cds_rx_thread_log_cpu_affinity_change(unsigned char core_affine_cnt,
150*5113495bSYour Name 						  int tput_req,
151*5113495bSYour Name 						  struct cpumask *old_mask,
152*5113495bSYour Name 						  struct cpumask *new_mask)
153*5113495bSYour Name {
154*5113495bSYour Name }
155*5113495bSYour Name #endif
156*5113495bSYour Name 
157*5113495bSYour Name /**
158*5113495bSYour Name  * cds_sched_find_attach_cpu - find available cores and attach to required core
159*5113495bSYour Name  * @pSchedContext:	wlan scheduler context
160*5113495bSYour Name  * @high_throughput:	high throughput is required or not
161*5113495bSYour Name  *
162*5113495bSYour Name  * Find current online cores.
163*5113495bSYour Name  * During high TPUT,
164*5113495bSYour Name  * 1) If user INI configured cores, affine to those cores
165*5113495bSYour Name  * 2) Otherwise perf cores.
166*5113495bSYour Name  * 3) Otherwise to all cores.
167*5113495bSYour Name  *
168*5113495bSYour Name  * During low TPUT, set affinity to any core, let system decide.
169*5113495bSYour Name  *
170*5113495bSYour Name  * Return: 0 success
171*5113495bSYour Name  *         1 fail
172*5113495bSYour Name  */
cds_sched_find_attach_cpu(p_cds_sched_context pSchedContext,bool high_throughput)173*5113495bSYour Name static int cds_sched_find_attach_cpu(p_cds_sched_context pSchedContext,
174*5113495bSYour Name 	bool high_throughput)
175*5113495bSYour Name {
176*5113495bSYour Name 	unsigned char core_affine_count = 0;
177*5113495bSYour Name 	qdf_cpu_mask new_mask;
178*5113495bSYour Name 	unsigned long cpus;
179*5113495bSYour Name 	struct cds_config_info *cds_cfg;
180*5113495bSYour Name 
181*5113495bSYour Name 	cds_debug("num possible cpu %d", num_possible_cpus());
182*5113495bSYour Name 
183*5113495bSYour Name 	qdf_cpumask_clear(&new_mask);
184*5113495bSYour Name 
185*5113495bSYour Name 	if (high_throughput) {
186*5113495bSYour Name 		/* Get Online perf/pwr CPU count */
187*5113495bSYour Name 		for_each_online_cpu(cpus) {
188*5113495bSYour Name 			if (topology_physical_package_id(cpus) >
189*5113495bSYour Name 							CDS_MAX_CPU_CLUSTERS) {
190*5113495bSYour Name 				cds_err("can handle max %d clusters, returning...",
191*5113495bSYour Name 					CDS_MAX_CPU_CLUSTERS);
192*5113495bSYour Name 				goto err;
193*5113495bSYour Name 			}
194*5113495bSYour Name 
195*5113495bSYour Name 			if (pSchedContext->conf_rx_thread_cpu_mask) {
196*5113495bSYour Name 				if (pSchedContext->conf_rx_thread_cpu_mask &
197*5113495bSYour Name 								(1 << cpus))
198*5113495bSYour Name 					qdf_cpumask_set_cpu(cpus, &new_mask);
199*5113495bSYour Name 			} else if (topology_physical_package_id(cpus) ==
200*5113495bSYour Name 						 CDS_CPU_CLUSTER_TYPE_PERF) {
201*5113495bSYour Name 				qdf_cpumask_set_cpu(cpus, &new_mask);
202*5113495bSYour Name 			}
203*5113495bSYour Name 
204*5113495bSYour Name 			core_affine_count++;
205*5113495bSYour Name 		}
206*5113495bSYour Name 	} else {
207*5113495bSYour Name 		/* Attach to all cores, let scheduler decide */
208*5113495bSYour Name 		qdf_cpumask_setall(&new_mask);
209*5113495bSYour Name 	}
210*5113495bSYour Name 
211*5113495bSYour Name 	cds_rx_thread_log_cpu_affinity_change(core_affine_count,
212*5113495bSYour Name 				(int)pSchedContext->high_throughput_required,
213*5113495bSYour Name 				&pSchedContext->rx_thread_cpu_mask,
214*5113495bSYour Name 				&new_mask);
215*5113495bSYour Name 
216*5113495bSYour Name 	if (!cpumask_equal(&pSchedContext->rx_thread_cpu_mask, &new_mask)) {
217*5113495bSYour Name 		cds_cfg = cds_get_ini_config();
218*5113495bSYour Name 		cpumask_copy(&pSchedContext->rx_thread_cpu_mask, &new_mask);
219*5113495bSYour Name 		if (cds_cfg && cds_cfg->enable_dp_rx_threads)
220*5113495bSYour Name 			ucfg_dp_txrx_set_cpu_mask(cds_get_context(QDF_MODULE_ID_SOC),
221*5113495bSYour Name 						  &new_mask);
222*5113495bSYour Name 		else
223*5113495bSYour Name 			cds_set_cpus_allowed_ptr_with_mask(pSchedContext->ol_rx_thread,
224*5113495bSYour Name 							   &new_mask);
225*5113495bSYour Name 	}
226*5113495bSYour Name 
227*5113495bSYour Name 	return 0;
228*5113495bSYour Name err:
229*5113495bSYour Name 	return 1;
230*5113495bSYour Name }
231*5113495bSYour Name 
cds_sched_handle_cpu_hot_plug(void)232*5113495bSYour Name int cds_sched_handle_cpu_hot_plug(void)
233*5113495bSYour Name {
234*5113495bSYour Name 	p_cds_sched_context pSchedContext = get_cds_sched_ctxt();
235*5113495bSYour Name 
236*5113495bSYour Name 	if (!pSchedContext) {
237*5113495bSYour Name 		cds_err("invalid context");
238*5113495bSYour Name 		return 1;
239*5113495bSYour Name 	}
240*5113495bSYour Name 
241*5113495bSYour Name 	if (cds_is_load_or_unload_in_progress())
242*5113495bSYour Name 		return 0;
243*5113495bSYour Name 
244*5113495bSYour Name 	mutex_lock(&pSchedContext->affinity_lock);
245*5113495bSYour Name 	if (cds_sched_find_attach_cpu(pSchedContext,
246*5113495bSYour Name 		pSchedContext->high_throughput_required)) {
247*5113495bSYour Name 		cds_err("handle hot plug fail");
248*5113495bSYour Name 		mutex_unlock(&pSchedContext->affinity_lock);
249*5113495bSYour Name 		return 1;
250*5113495bSYour Name 	}
251*5113495bSYour Name 	mutex_unlock(&pSchedContext->affinity_lock);
252*5113495bSYour Name 	return 0;
253*5113495bSYour Name }
254*5113495bSYour Name 
cds_sched_handle_rx_thread_affinity_req(bool high_throughput)255*5113495bSYour Name void cds_sched_handle_rx_thread_affinity_req(bool high_throughput)
256*5113495bSYour Name {
257*5113495bSYour Name 	p_cds_sched_context pschedcontext = get_cds_sched_ctxt();
258*5113495bSYour Name 	unsigned long cpus;
259*5113495bSYour Name 	qdf_cpu_mask new_mask;
260*5113495bSYour Name 	unsigned char core_affine_count = 0;
261*5113495bSYour Name 
262*5113495bSYour Name 	if (!pschedcontext || !pschedcontext->ol_rx_thread)
263*5113495bSYour Name 		return;
264*5113495bSYour Name 
265*5113495bSYour Name 	if (cds_is_load_or_unload_in_progress()) {
266*5113495bSYour Name 		cds_err("load or unload in progress");
267*5113495bSYour Name 		return;
268*5113495bSYour Name 	}
269*5113495bSYour Name 
270*5113495bSYour Name 	if (pschedcontext->rx_affinity_required == high_throughput)
271*5113495bSYour Name 		return;
272*5113495bSYour Name 
273*5113495bSYour Name 	pschedcontext->rx_affinity_required = high_throughput;
274*5113495bSYour Name 	qdf_cpumask_clear(&new_mask);
275*5113495bSYour Name 	if (!high_throughput) {
276*5113495bSYour Name 		/* Attach to all cores, let scheduler decide */
277*5113495bSYour Name 		qdf_cpumask_setall(&new_mask);
278*5113495bSYour Name 		goto affine_thread;
279*5113495bSYour Name 	}
280*5113495bSYour Name 	for_each_online_cpu(cpus) {
281*5113495bSYour Name 		if (topology_physical_package_id(cpus) >
282*5113495bSYour Name 		    CDS_MAX_CPU_CLUSTERS) {
283*5113495bSYour Name 			cds_err("can handle max %d clusters ",
284*5113495bSYour Name 				CDS_MAX_CPU_CLUSTERS);
285*5113495bSYour Name 			return;
286*5113495bSYour Name 		}
287*5113495bSYour Name 		if (pschedcontext->conf_rx_thread_ul_affinity &&
288*5113495bSYour Name 		    (pschedcontext->conf_rx_thread_ul_affinity &
289*5113495bSYour Name 				 (1 << cpus)))
290*5113495bSYour Name 			qdf_cpumask_set_cpu(cpus, &new_mask);
291*5113495bSYour Name 
292*5113495bSYour Name 		core_affine_count++;
293*5113495bSYour Name 	}
294*5113495bSYour Name 
295*5113495bSYour Name affine_thread:
296*5113495bSYour Name 	cds_rx_thread_log_cpu_affinity_change(
297*5113495bSYour Name 		core_affine_count,
298*5113495bSYour Name 		(int)pschedcontext->rx_affinity_required,
299*5113495bSYour Name 		&pschedcontext->rx_thread_cpu_mask,
300*5113495bSYour Name 		&new_mask);
301*5113495bSYour Name 
302*5113495bSYour Name 	mutex_lock(&pschedcontext->affinity_lock);
303*5113495bSYour Name 	if (!cpumask_equal(&pschedcontext->rx_thread_cpu_mask, &new_mask)) {
304*5113495bSYour Name 		cpumask_copy(&pschedcontext->rx_thread_cpu_mask, &new_mask);
305*5113495bSYour Name 		cds_set_cpus_allowed_ptr_with_mask(pschedcontext->ol_rx_thread,
306*5113495bSYour Name 						   &new_mask);
307*5113495bSYour Name 	}
308*5113495bSYour Name 	mutex_unlock(&pschedcontext->affinity_lock);
309*5113495bSYour Name }
310*5113495bSYour Name 
cds_sched_handle_throughput_req(bool high_tput_required)311*5113495bSYour Name int cds_sched_handle_throughput_req(bool high_tput_required)
312*5113495bSYour Name {
313*5113495bSYour Name 	p_cds_sched_context pSchedContext = get_cds_sched_ctxt();
314*5113495bSYour Name 
315*5113495bSYour Name 	if (!pSchedContext) {
316*5113495bSYour Name 		cds_err("invalid context");
317*5113495bSYour Name 		return 1;
318*5113495bSYour Name 	}
319*5113495bSYour Name 
320*5113495bSYour Name 	if (cds_is_load_or_unload_in_progress()) {
321*5113495bSYour Name 		cds_err("load or unload in progress");
322*5113495bSYour Name 		return 0;
323*5113495bSYour Name 	}
324*5113495bSYour Name 
325*5113495bSYour Name 	mutex_lock(&pSchedContext->affinity_lock);
326*5113495bSYour Name 	if (pSchedContext->high_throughput_required != high_tput_required) {
327*5113495bSYour Name 		pSchedContext->high_throughput_required = high_tput_required;
328*5113495bSYour Name 		if (cds_sched_find_attach_cpu(pSchedContext,
329*5113495bSYour Name 					      high_tput_required)) {
330*5113495bSYour Name 			mutex_unlock(&pSchedContext->affinity_lock);
331*5113495bSYour Name 			return 1;
332*5113495bSYour Name 		}
333*5113495bSYour Name 	}
334*5113495bSYour Name 	mutex_unlock(&pSchedContext->affinity_lock);
335*5113495bSYour Name 	return 0;
336*5113495bSYour Name }
337*5113495bSYour Name 
338*5113495bSYour Name /**
339*5113495bSYour Name  * cds_cpu_hotplug_multi_cluster() - calls the multi-cluster hotplug handler,
340*5113495bSYour Name  *	when on a multi-cluster platform
341*5113495bSYour Name  *
342*5113495bSYour Name  * Return: QDF_STATUS
343*5113495bSYour Name  */
cds_cpu_hotplug_multi_cluster(void)344*5113495bSYour Name static QDF_STATUS cds_cpu_hotplug_multi_cluster(void)
345*5113495bSYour Name {
346*5113495bSYour Name 	int cpus;
347*5113495bSYour Name 	unsigned int multi_cluster = 0;
348*5113495bSYour Name 
349*5113495bSYour Name 	for_each_online_cpu(cpus) {
350*5113495bSYour Name 		multi_cluster = topology_physical_package_id(cpus);
351*5113495bSYour Name 	}
352*5113495bSYour Name 
353*5113495bSYour Name 	if (!multi_cluster)
354*5113495bSYour Name 		return QDF_STATUS_E_NOSUPPORT;
355*5113495bSYour Name 
356*5113495bSYour Name 	if (cds_sched_handle_cpu_hot_plug())
357*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
358*5113495bSYour Name 
359*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
360*5113495bSYour Name }
361*5113495bSYour Name 
362*5113495bSYour Name /**
363*5113495bSYour Name  * __cds_cpu_hotplug_notify() - CPU hotplug event handler
364*5113495bSYour Name  * @cpu: CPU Id of the CPU generating the event
365*5113495bSYour Name  * @cpu_up: true if the CPU is online
366*5113495bSYour Name  *
367*5113495bSYour Name  * Return: None
368*5113495bSYour Name  */
__cds_cpu_hotplug_notify(uint32_t cpu,bool cpu_up)369*5113495bSYour Name static void __cds_cpu_hotplug_notify(uint32_t cpu, bool cpu_up)
370*5113495bSYour Name {
371*5113495bSYour Name 	unsigned long pref_cpu = 0;
372*5113495bSYour Name 	p_cds_sched_context pSchedContext = get_cds_sched_ctxt();
373*5113495bSYour Name 	int i;
374*5113495bSYour Name 
375*5113495bSYour Name 	if (!pSchedContext || !pSchedContext->ol_rx_thread)
376*5113495bSYour Name 		return;
377*5113495bSYour Name 
378*5113495bSYour Name 	if (cds_is_load_or_unload_in_progress() || cds_is_driver_recovering())
379*5113495bSYour Name 		return;
380*5113495bSYour Name 
381*5113495bSYour Name 	cds_debug("'%s' event on CPU %u (of %d); Currently affine to CPU %u",
382*5113495bSYour Name 		  cpu_up ? "Up" : "Down", cpu, num_possible_cpus(), affine_cpu);
383*5113495bSYour Name 
384*5113495bSYour Name 	/* try multi-cluster scheduling first */
385*5113495bSYour Name 	if (QDF_IS_STATUS_SUCCESS(cds_cpu_hotplug_multi_cluster()))
386*5113495bSYour Name 		return;
387*5113495bSYour Name 
388*5113495bSYour Name 	if (cpu_up) {
389*5113495bSYour Name 		if (affine_cpu != 0)
390*5113495bSYour Name 			return;
391*5113495bSYour Name 
392*5113495bSYour Name 		for_each_online_cpu(i) {
393*5113495bSYour Name 			if (i == 0)
394*5113495bSYour Name 				continue;
395*5113495bSYour Name 			pref_cpu = i;
396*5113495bSYour Name 			break;
397*5113495bSYour Name 		}
398*5113495bSYour Name 	} else {
399*5113495bSYour Name 		if (cpu != affine_cpu)
400*5113495bSYour Name 			return;
401*5113495bSYour Name 
402*5113495bSYour Name 		affine_cpu = 0;
403*5113495bSYour Name 		for_each_online_cpu(i) {
404*5113495bSYour Name 			if (i == 0)
405*5113495bSYour Name 				continue;
406*5113495bSYour Name 			pref_cpu = i;
407*5113495bSYour Name 			break;
408*5113495bSYour Name 		}
409*5113495bSYour Name 	}
410*5113495bSYour Name 
411*5113495bSYour Name 	if (pref_cpu == 0)
412*5113495bSYour Name 		return;
413*5113495bSYour Name 
414*5113495bSYour Name 	if (pSchedContext->ol_rx_thread &&
415*5113495bSYour Name 	    !cds_set_cpus_allowed_ptr_with_cpu(pSchedContext->ol_rx_thread,
416*5113495bSYour Name 					       pref_cpu))
417*5113495bSYour Name 		affine_cpu = pref_cpu;
418*5113495bSYour Name }
419*5113495bSYour Name 
420*5113495bSYour Name /**
421*5113495bSYour Name  * cds_cpu_hotplug_notify() - cpu core up/down notification handler wrapper
422*5113495bSYour Name  * @cpu: CPU Id of the CPU generating the event
423*5113495bSYour Name  * @cpu_up: true if the CPU is online
424*5113495bSYour Name  *
425*5113495bSYour Name  * Return: None
426*5113495bSYour Name  */
cds_cpu_hotplug_notify(uint32_t cpu,bool cpu_up)427*5113495bSYour Name static void cds_cpu_hotplug_notify(uint32_t cpu, bool cpu_up)
428*5113495bSYour Name {
429*5113495bSYour Name 	struct qdf_op_sync *op_sync;
430*5113495bSYour Name 
431*5113495bSYour Name 	if (qdf_op_protect(&op_sync))
432*5113495bSYour Name 		return;
433*5113495bSYour Name 
434*5113495bSYour Name 	__cds_cpu_hotplug_notify(cpu, cpu_up);
435*5113495bSYour Name 
436*5113495bSYour Name 	qdf_op_unprotect(op_sync);
437*5113495bSYour Name }
438*5113495bSYour Name 
cds_cpu_online_cb(void * context,uint32_t cpu)439*5113495bSYour Name static void cds_cpu_online_cb(void *context, uint32_t cpu)
440*5113495bSYour Name {
441*5113495bSYour Name 	cds_cpu_hotplug_notify(cpu, true);
442*5113495bSYour Name }
443*5113495bSYour Name 
cds_cpu_before_offline_cb(void * context,uint32_t cpu)444*5113495bSYour Name static void cds_cpu_before_offline_cb(void *context, uint32_t cpu)
445*5113495bSYour Name {
446*5113495bSYour Name 	cds_cpu_hotplug_notify(cpu, false);
447*5113495bSYour Name }
448*5113495bSYour Name #endif /* WLAN_DP_LEGACY_OL_RX_THREAD */
449*5113495bSYour Name 
cds_sched_open(void * p_cds_context,p_cds_sched_context pSchedContext,uint32_t SchedCtxSize)450*5113495bSYour Name QDF_STATUS cds_sched_open(void *p_cds_context,
451*5113495bSYour Name 			  p_cds_sched_context pSchedContext,
452*5113495bSYour Name 			  uint32_t SchedCtxSize)
453*5113495bSYour Name {
454*5113495bSYour Name 	cds_debug("Opening the CDS Scheduler");
455*5113495bSYour Name 	/* Sanity checks */
456*5113495bSYour Name 	if ((!p_cds_context) || (!pSchedContext)) {
457*5113495bSYour Name 		cds_err("Null params being passed");
458*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
459*5113495bSYour Name 	}
460*5113495bSYour Name 	if (sizeof(cds_sched_context) != SchedCtxSize) {
461*5113495bSYour Name 		cds_debug("Incorrect CDS Sched Context size passed");
462*5113495bSYour Name 		return QDF_STATUS_E_INVAL;
463*5113495bSYour Name 	}
464*5113495bSYour Name 	qdf_mem_zero(pSchedContext, sizeof(cds_sched_context));
465*5113495bSYour Name #ifdef WLAN_DP_LEGACY_OL_RX_THREAD
466*5113495bSYour Name 	spin_lock_init(&pSchedContext->ol_rx_thread_lock);
467*5113495bSYour Name 	init_waitqueue_head(&pSchedContext->ol_rx_wait_queue);
468*5113495bSYour Name 	init_completion(&pSchedContext->ol_rx_start_event);
469*5113495bSYour Name 	init_completion(&pSchedContext->ol_suspend_rx_event);
470*5113495bSYour Name 	init_completion(&pSchedContext->ol_resume_rx_event);
471*5113495bSYour Name 	init_completion(&pSchedContext->ol_rx_shutdown);
472*5113495bSYour Name 	pSchedContext->ol_rx_event_flag = 0;
473*5113495bSYour Name 	spin_lock_init(&pSchedContext->ol_rx_queue_lock);
474*5113495bSYour Name 	spin_lock_init(&pSchedContext->cds_ol_rx_pkt_freeq_lock);
475*5113495bSYour Name 	INIT_LIST_HEAD(&pSchedContext->ol_rx_thread_queue);
476*5113495bSYour Name 	spin_lock_bh(&pSchedContext->cds_ol_rx_pkt_freeq_lock);
477*5113495bSYour Name 	INIT_LIST_HEAD(&pSchedContext->cds_ol_rx_pkt_freeq);
478*5113495bSYour Name 	spin_unlock_bh(&pSchedContext->cds_ol_rx_pkt_freeq_lock);
479*5113495bSYour Name 	if (cds_alloc_ol_rx_pkt_freeq(pSchedContext) != QDF_STATUS_SUCCESS)
480*5113495bSYour Name 		goto pkt_freeqalloc_failure;
481*5113495bSYour Name 	qdf_cpuhp_register(&pSchedContext->cpuhp_event_handle,
482*5113495bSYour Name 			   NULL,
483*5113495bSYour Name 			   cds_cpu_online_cb,
484*5113495bSYour Name 			   cds_cpu_before_offline_cb);
485*5113495bSYour Name 	mutex_init(&pSchedContext->affinity_lock);
486*5113495bSYour Name 	pSchedContext->high_throughput_required = false;
487*5113495bSYour Name 	pSchedContext->rx_affinity_required = false;
488*5113495bSYour Name 	pSchedContext->active_staid = OL_TXRX_INVALID_LOCAL_PEER_ID;
489*5113495bSYour Name #endif
490*5113495bSYour Name 	gp_cds_sched_context = pSchedContext;
491*5113495bSYour Name 
492*5113495bSYour Name #ifdef WLAN_DP_LEGACY_OL_RX_THREAD
493*5113495bSYour Name 	pSchedContext->ol_rx_thread = kthread_create(cds_ol_rx_thread,
494*5113495bSYour Name 						       pSchedContext,
495*5113495bSYour Name 						       "cds_ol_rx_thread");
496*5113495bSYour Name 	if (IS_ERR(pSchedContext->ol_rx_thread)) {
497*5113495bSYour Name 
498*5113495bSYour Name 		cds_alert("Could not Create CDS OL RX Thread");
499*5113495bSYour Name 		goto OL_RX_THREAD_START_FAILURE;
500*5113495bSYour Name 
501*5113495bSYour Name 	}
502*5113495bSYour Name 	wake_up_process(pSchedContext->ol_rx_thread);
503*5113495bSYour Name 	cds_debug("CDS OL RX thread Created");
504*5113495bSYour Name 	wait_for_completion_interruptible(&pSchedContext->ol_rx_start_event);
505*5113495bSYour Name 	cds_debug("CDS OL Rx Thread has started");
506*5113495bSYour Name #endif
507*5113495bSYour Name 	/* We're good now: Let's get the ball rolling!!! */
508*5113495bSYour Name 	cds_debug("CDS Scheduler successfully Opened");
509*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
510*5113495bSYour Name #ifdef WLAN_DP_LEGACY_OL_RX_THREAD
511*5113495bSYour Name OL_RX_THREAD_START_FAILURE:
512*5113495bSYour Name 	qdf_cpuhp_unregister(&pSchedContext->cpuhp_event_handle);
513*5113495bSYour Name 	cds_free_ol_rx_pkt_freeq(gp_cds_sched_context);
514*5113495bSYour Name pkt_freeqalloc_failure:
515*5113495bSYour Name #endif
516*5113495bSYour Name 	gp_cds_sched_context = NULL;
517*5113495bSYour Name 
518*5113495bSYour Name 	return QDF_STATUS_E_RESOURCES;
519*5113495bSYour Name 
520*5113495bSYour Name } /* cds_sched_open() */
521*5113495bSYour Name 
522*5113495bSYour Name #ifdef WLAN_DP_LEGACY_OL_RX_THREAD
cds_free_ol_rx_pkt_freeq(p_cds_sched_context pSchedContext)523*5113495bSYour Name void cds_free_ol_rx_pkt_freeq(p_cds_sched_context pSchedContext)
524*5113495bSYour Name {
525*5113495bSYour Name 	struct cds_ol_rx_pkt *pkt;
526*5113495bSYour Name 
527*5113495bSYour Name 	spin_lock_bh(&pSchedContext->cds_ol_rx_pkt_freeq_lock);
528*5113495bSYour Name 	while (!list_empty(&pSchedContext->cds_ol_rx_pkt_freeq)) {
529*5113495bSYour Name 		pkt = list_entry((&pSchedContext->cds_ol_rx_pkt_freeq)->next,
530*5113495bSYour Name 			typeof(*pkt), list);
531*5113495bSYour Name 		list_del(&pkt->list);
532*5113495bSYour Name 		spin_unlock_bh(&pSchedContext->cds_ol_rx_pkt_freeq_lock);
533*5113495bSYour Name 		qdf_mem_free(pkt);
534*5113495bSYour Name 		spin_lock_bh(&pSchedContext->cds_ol_rx_pkt_freeq_lock);
535*5113495bSYour Name 	}
536*5113495bSYour Name 	spin_unlock_bh(&pSchedContext->cds_ol_rx_pkt_freeq_lock);
537*5113495bSYour Name }
538*5113495bSYour Name 
539*5113495bSYour Name /**
540*5113495bSYour Name  * cds_alloc_ol_rx_pkt_freeq() - Function to allocate free buffer queue
541*5113495bSYour Name  * @pSchedContext: pointer to the global CDS Sched Context
542*5113495bSYour Name  *
543*5113495bSYour Name  * This API allocates CDS_MAX_OL_RX_PKT number of cds message buffers
544*5113495bSYour Name  * which are used for Rx data processing.
545*5113495bSYour Name  *
546*5113495bSYour Name  * Return: status of memory allocation
547*5113495bSYour Name  */
cds_alloc_ol_rx_pkt_freeq(p_cds_sched_context pSchedContext)548*5113495bSYour Name static QDF_STATUS cds_alloc_ol_rx_pkt_freeq(p_cds_sched_context pSchedContext)
549*5113495bSYour Name {
550*5113495bSYour Name 	struct cds_ol_rx_pkt *pkt, *tmp;
551*5113495bSYour Name 	int i;
552*5113495bSYour Name 
553*5113495bSYour Name 	for (i = 0; i < CDS_MAX_OL_RX_PKT; i++) {
554*5113495bSYour Name 		pkt = qdf_mem_malloc(sizeof(*pkt));
555*5113495bSYour Name 		if (!pkt) {
556*5113495bSYour Name 			cds_err("Vos packet allocation for ol rx thread failed");
557*5113495bSYour Name 			goto free;
558*5113495bSYour Name 		}
559*5113495bSYour Name 		spin_lock_bh(&pSchedContext->cds_ol_rx_pkt_freeq_lock);
560*5113495bSYour Name 		list_add_tail(&pkt->list, &pSchedContext->cds_ol_rx_pkt_freeq);
561*5113495bSYour Name 		spin_unlock_bh(&pSchedContext->cds_ol_rx_pkt_freeq_lock);
562*5113495bSYour Name 	}
563*5113495bSYour Name 
564*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
565*5113495bSYour Name 
566*5113495bSYour Name free:
567*5113495bSYour Name 	spin_lock_bh(&pSchedContext->cds_ol_rx_pkt_freeq_lock);
568*5113495bSYour Name 	list_for_each_entry_safe(pkt, tmp, &pSchedContext->cds_ol_rx_pkt_freeq,
569*5113495bSYour Name 				 list) {
570*5113495bSYour Name 		list_del(&pkt->list);
571*5113495bSYour Name 		spin_unlock_bh(&pSchedContext->cds_ol_rx_pkt_freeq_lock);
572*5113495bSYour Name 		qdf_mem_free(pkt);
573*5113495bSYour Name 		spin_lock_bh(&pSchedContext->cds_ol_rx_pkt_freeq_lock);
574*5113495bSYour Name 	}
575*5113495bSYour Name 	spin_unlock_bh(&pSchedContext->cds_ol_rx_pkt_freeq_lock);
576*5113495bSYour Name 	return QDF_STATUS_E_NOMEM;
577*5113495bSYour Name }
578*5113495bSYour Name 
579*5113495bSYour Name void
cds_free_ol_rx_pkt(p_cds_sched_context pSchedContext,struct cds_ol_rx_pkt * pkt)580*5113495bSYour Name cds_free_ol_rx_pkt(p_cds_sched_context pSchedContext,
581*5113495bSYour Name 		    struct cds_ol_rx_pkt *pkt)
582*5113495bSYour Name {
583*5113495bSYour Name 	memset(pkt, 0, sizeof(*pkt));
584*5113495bSYour Name 	spin_lock_bh(&pSchedContext->cds_ol_rx_pkt_freeq_lock);
585*5113495bSYour Name 	list_add_tail(&pkt->list, &pSchedContext->cds_ol_rx_pkt_freeq);
586*5113495bSYour Name 	spin_unlock_bh(&pSchedContext->cds_ol_rx_pkt_freeq_lock);
587*5113495bSYour Name }
588*5113495bSYour Name 
cds_alloc_ol_rx_pkt(p_cds_sched_context pSchedContext)589*5113495bSYour Name struct cds_ol_rx_pkt *cds_alloc_ol_rx_pkt(p_cds_sched_context pSchedContext)
590*5113495bSYour Name {
591*5113495bSYour Name 	struct cds_ol_rx_pkt *pkt;
592*5113495bSYour Name 
593*5113495bSYour Name 	spin_lock_bh(&pSchedContext->cds_ol_rx_pkt_freeq_lock);
594*5113495bSYour Name 	if (list_empty(&pSchedContext->cds_ol_rx_pkt_freeq)) {
595*5113495bSYour Name 		spin_unlock_bh(&pSchedContext->cds_ol_rx_pkt_freeq_lock);
596*5113495bSYour Name 		return NULL;
597*5113495bSYour Name 	}
598*5113495bSYour Name 	pkt = list_first_entry(&pSchedContext->cds_ol_rx_pkt_freeq,
599*5113495bSYour Name 			       struct cds_ol_rx_pkt, list);
600*5113495bSYour Name 	list_del(&pkt->list);
601*5113495bSYour Name 	spin_unlock_bh(&pSchedContext->cds_ol_rx_pkt_freeq_lock);
602*5113495bSYour Name 	return pkt;
603*5113495bSYour Name }
604*5113495bSYour Name 
605*5113495bSYour Name void
cds_indicate_rxpkt(p_cds_sched_context pSchedContext,struct cds_ol_rx_pkt * pkt)606*5113495bSYour Name cds_indicate_rxpkt(p_cds_sched_context pSchedContext,
607*5113495bSYour Name 		   struct cds_ol_rx_pkt *pkt)
608*5113495bSYour Name {
609*5113495bSYour Name 	spin_lock_bh(&pSchedContext->ol_rx_queue_lock);
610*5113495bSYour Name 	list_add_tail(&pkt->list, &pSchedContext->ol_rx_thread_queue);
611*5113495bSYour Name 	spin_unlock_bh(&pSchedContext->ol_rx_queue_lock);
612*5113495bSYour Name 	set_bit(RX_POST_EVENT, &pSchedContext->ol_rx_event_flag);
613*5113495bSYour Name 	wake_up_interruptible(&pSchedContext->ol_rx_wait_queue);
614*5113495bSYour Name }
615*5113495bSYour Name 
cds_close_rx_thread(void)616*5113495bSYour Name QDF_STATUS cds_close_rx_thread(void)
617*5113495bSYour Name {
618*5113495bSYour Name 	cds_debug("invoked");
619*5113495bSYour Name 
620*5113495bSYour Name 	if (!gp_cds_sched_context) {
621*5113495bSYour Name 		cds_err("!gp_cds_sched_context");
622*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
623*5113495bSYour Name 	}
624*5113495bSYour Name 
625*5113495bSYour Name 	if (!gp_cds_sched_context->ol_rx_thread)
626*5113495bSYour Name 		return QDF_STATUS_SUCCESS;
627*5113495bSYour Name 
628*5113495bSYour Name 	/* Shut down Tlshim Rx thread */
629*5113495bSYour Name 	set_bit(RX_SHUTDOWN_EVENT, &gp_cds_sched_context->ol_rx_event_flag);
630*5113495bSYour Name 	set_bit(RX_POST_EVENT, &gp_cds_sched_context->ol_rx_event_flag);
631*5113495bSYour Name 	wake_up_interruptible(&gp_cds_sched_context->ol_rx_wait_queue);
632*5113495bSYour Name 	wait_for_completion(&gp_cds_sched_context->ol_rx_shutdown);
633*5113495bSYour Name 	gp_cds_sched_context->ol_rx_thread = NULL;
634*5113495bSYour Name 	cds_drop_rxpkt_by_staid(gp_cds_sched_context, WLAN_MAX_STA_COUNT);
635*5113495bSYour Name 	cds_free_ol_rx_pkt_freeq(gp_cds_sched_context);
636*5113495bSYour Name 	qdf_cpuhp_unregister(&gp_cds_sched_context->cpuhp_event_handle);
637*5113495bSYour Name 
638*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
639*5113495bSYour Name } /* cds_close_rx_thread */
640*5113495bSYour Name 
cds_drop_rxpkt_by_staid(p_cds_sched_context pSchedContext,uint16_t staId)641*5113495bSYour Name void cds_drop_rxpkt_by_staid(p_cds_sched_context pSchedContext, uint16_t staId)
642*5113495bSYour Name {
643*5113495bSYour Name 	struct list_head local_list;
644*5113495bSYour Name 	struct cds_ol_rx_pkt *pkt, *tmp;
645*5113495bSYour Name 	qdf_nbuf_t buf, next_buf;
646*5113495bSYour Name 	uint32_t timeout = 0;
647*5113495bSYour Name 
648*5113495bSYour Name 	INIT_LIST_HEAD(&local_list);
649*5113495bSYour Name 	spin_lock_bh(&pSchedContext->ol_rx_queue_lock);
650*5113495bSYour Name 	if (list_empty(&pSchedContext->ol_rx_thread_queue)) {
651*5113495bSYour Name 		spin_unlock_bh(&pSchedContext->ol_rx_queue_lock);
652*5113495bSYour Name 		return;
653*5113495bSYour Name 	}
654*5113495bSYour Name 	list_for_each_entry_safe(pkt, tmp, &pSchedContext->ol_rx_thread_queue,
655*5113495bSYour Name 								list) {
656*5113495bSYour Name 		if (pkt->staId == staId || staId == WLAN_MAX_STA_COUNT)
657*5113495bSYour Name 			list_move_tail(&pkt->list, &local_list);
658*5113495bSYour Name 	}
659*5113495bSYour Name 	spin_unlock_bh(&pSchedContext->ol_rx_queue_lock);
660*5113495bSYour Name 
661*5113495bSYour Name 	list_for_each_entry_safe(pkt, tmp, &local_list, list) {
662*5113495bSYour Name 		list_del(&pkt->list);
663*5113495bSYour Name 		buf = pkt->Rxpkt;
664*5113495bSYour Name 		while (buf) {
665*5113495bSYour Name 			next_buf = qdf_nbuf_queue_next(buf);
666*5113495bSYour Name 			qdf_nbuf_free(buf);
667*5113495bSYour Name 			buf = next_buf;
668*5113495bSYour Name 		}
669*5113495bSYour Name 		cds_free_ol_rx_pkt(pSchedContext, pkt);
670*5113495bSYour Name 	}
671*5113495bSYour Name 
672*5113495bSYour Name 	while (pSchedContext->active_staid == staId &&
673*5113495bSYour Name 	       timeout <= CDS_ACTIVE_STAID_CLEANUP_TIMEOUT) {
674*5113495bSYour Name 		if (qdf_in_interrupt())
675*5113495bSYour Name 			qdf_mdelay(CDS_ACTIVE_STAID_CLEANUP_DELAY);
676*5113495bSYour Name 		else
677*5113495bSYour Name 			qdf_sleep(CDS_ACTIVE_STAID_CLEANUP_DELAY);
678*5113495bSYour Name 		timeout += CDS_ACTIVE_STAID_CLEANUP_DELAY;
679*5113495bSYour Name 	}
680*5113495bSYour Name 
681*5113495bSYour Name 	if (pSchedContext->active_staid == staId)
682*5113495bSYour Name 		cds_err("Failed to cleanup RX packets for staId:%u", staId);
683*5113495bSYour Name }
684*5113495bSYour Name 
685*5113495bSYour Name /**
686*5113495bSYour Name  * cds_rx_from_queue() - function to process pending Rx packets
687*5113495bSYour Name  * @pSchedContext: Pointer to the global CDS Sched Context
688*5113495bSYour Name  *
689*5113495bSYour Name  * This api traverses the pending buffer list and calling the callback.
690*5113495bSYour Name  * This callback would essentially send the packet to HDD.
691*5113495bSYour Name  *
692*5113495bSYour Name  * Return: none
693*5113495bSYour Name  */
cds_rx_from_queue(p_cds_sched_context pSchedContext)694*5113495bSYour Name static void cds_rx_from_queue(p_cds_sched_context pSchedContext)
695*5113495bSYour Name {
696*5113495bSYour Name 	struct cds_ol_rx_pkt *pkt;
697*5113495bSYour Name 	uint16_t sta_id;
698*5113495bSYour Name 
699*5113495bSYour Name 	spin_lock_bh(&pSchedContext->ol_rx_queue_lock);
700*5113495bSYour Name 	while (!list_empty(&pSchedContext->ol_rx_thread_queue)) {
701*5113495bSYour Name 		pkt = list_first_entry(&pSchedContext->ol_rx_thread_queue,
702*5113495bSYour Name 				       struct cds_ol_rx_pkt, list);
703*5113495bSYour Name 		list_del(&pkt->list);
704*5113495bSYour Name 		pSchedContext->active_staid = pkt->staId;
705*5113495bSYour Name 		spin_unlock_bh(&pSchedContext->ol_rx_queue_lock);
706*5113495bSYour Name 		sta_id = pkt->staId;
707*5113495bSYour Name 		pkt->callback(pkt->context, pkt->Rxpkt, sta_id);
708*5113495bSYour Name 		cds_free_ol_rx_pkt(pSchedContext, pkt);
709*5113495bSYour Name 		spin_lock_bh(&pSchedContext->ol_rx_queue_lock);
710*5113495bSYour Name 		pSchedContext->active_staid = OL_TXRX_INVALID_LOCAL_PEER_ID;
711*5113495bSYour Name 	}
712*5113495bSYour Name 	spin_unlock_bh(&pSchedContext->ol_rx_queue_lock);
713*5113495bSYour Name }
714*5113495bSYour Name 
715*5113495bSYour Name /**
716*5113495bSYour Name  * cds_ol_rx_thread() - cds main tlshim rx thread
717*5113495bSYour Name  * @arg: pointer to the global CDS Sched Context
718*5113495bSYour Name  *
719*5113495bSYour Name  * This api is the thread handler for Tlshim Data packet processing.
720*5113495bSYour Name  *
721*5113495bSYour Name  * Return: thread exit code
722*5113495bSYour Name  */
cds_ol_rx_thread(void * arg)723*5113495bSYour Name static int cds_ol_rx_thread(void *arg)
724*5113495bSYour Name {
725*5113495bSYour Name 	p_cds_sched_context pSchedContext = (p_cds_sched_context) arg;
726*5113495bSYour Name 	bool shutdown = false;
727*5113495bSYour Name 	int status;
728*5113495bSYour Name 
729*5113495bSYour Name #ifdef RX_THREAD_PRIORITY
730*5113495bSYour Name 	struct sched_param scheduler_params = {0};
731*5113495bSYour Name 
732*5113495bSYour Name 	scheduler_params.sched_priority = 1;
733*5113495bSYour Name 	sched_setscheduler(current, SCHED_FIFO, &scheduler_params);
734*5113495bSYour Name #else
735*5113495bSYour Name 	set_user_nice(current, -1);
736*5113495bSYour Name #endif
737*5113495bSYour Name 
738*5113495bSYour Name 	qdf_set_wake_up_idle(true);
739*5113495bSYour Name 
740*5113495bSYour Name 	complete(&pSchedContext->ol_rx_start_event);
741*5113495bSYour Name 
742*5113495bSYour Name 	while (!shutdown) {
743*5113495bSYour Name 		status =
744*5113495bSYour Name 			wait_event_interruptible(pSchedContext->ol_rx_wait_queue,
745*5113495bSYour Name 						 test_bit(RX_POST_EVENT,
746*5113495bSYour Name 							  &pSchedContext->ol_rx_event_flag)
747*5113495bSYour Name 						 || test_bit(RX_SUSPEND_EVENT,
748*5113495bSYour Name 							     &pSchedContext->ol_rx_event_flag));
749*5113495bSYour Name 		if (status == -ERESTARTSYS)
750*5113495bSYour Name 			break;
751*5113495bSYour Name 
752*5113495bSYour Name 		clear_bit(RX_POST_EVENT, &pSchedContext->ol_rx_event_flag);
753*5113495bSYour Name 		while (true) {
754*5113495bSYour Name 			if (test_bit(RX_SHUTDOWN_EVENT,
755*5113495bSYour Name 				     &pSchedContext->ol_rx_event_flag)) {
756*5113495bSYour Name 				clear_bit(RX_SHUTDOWN_EVENT,
757*5113495bSYour Name 					  &pSchedContext->ol_rx_event_flag);
758*5113495bSYour Name 				if (test_bit(RX_SUSPEND_EVENT,
759*5113495bSYour Name 					     &pSchedContext->ol_rx_event_flag)) {
760*5113495bSYour Name 					clear_bit(RX_SUSPEND_EVENT,
761*5113495bSYour Name 						  &pSchedContext->ol_rx_event_flag);
762*5113495bSYour Name 					complete
763*5113495bSYour Name 						(&pSchedContext->ol_suspend_rx_event);
764*5113495bSYour Name 				}
765*5113495bSYour Name 				cds_debug("Shutting down OL RX Thread");
766*5113495bSYour Name 				shutdown = true;
767*5113495bSYour Name 				break;
768*5113495bSYour Name 			}
769*5113495bSYour Name 			cds_rx_from_queue(pSchedContext);
770*5113495bSYour Name 
771*5113495bSYour Name 			if (test_bit(RX_SUSPEND_EVENT,
772*5113495bSYour Name 				     &pSchedContext->ol_rx_event_flag)) {
773*5113495bSYour Name 				clear_bit(RX_SUSPEND_EVENT,
774*5113495bSYour Name 					  &pSchedContext->ol_rx_event_flag);
775*5113495bSYour Name 				spin_lock(&pSchedContext->ol_rx_thread_lock);
776*5113495bSYour Name 				INIT_COMPLETION
777*5113495bSYour Name 					(pSchedContext->ol_resume_rx_event);
778*5113495bSYour Name 				complete(&pSchedContext->ol_suspend_rx_event);
779*5113495bSYour Name 				spin_unlock(&pSchedContext->ol_rx_thread_lock);
780*5113495bSYour Name 				wait_for_completion_interruptible
781*5113495bSYour Name 					(&pSchedContext->ol_resume_rx_event);
782*5113495bSYour Name 			}
783*5113495bSYour Name 			break;
784*5113495bSYour Name 		}
785*5113495bSYour Name 	}
786*5113495bSYour Name 
787*5113495bSYour Name 	cds_debug("Exiting CDS OL rx thread");
788*5113495bSYour Name 	kthread_complete_and_exit(&pSchedContext->ol_rx_shutdown, 0);
789*5113495bSYour Name 
790*5113495bSYour Name 	return 0;
791*5113495bSYour Name }
792*5113495bSYour Name 
cds_resume_rx_thread(void)793*5113495bSYour Name void cds_resume_rx_thread(void)
794*5113495bSYour Name {
795*5113495bSYour Name 	p_cds_sched_context cds_sched_context;
796*5113495bSYour Name 
797*5113495bSYour Name 	cds_sched_context = get_cds_sched_ctxt();
798*5113495bSYour Name 	if (!cds_sched_context) {
799*5113495bSYour Name 		cds_err("cds_sched_context is NULL");
800*5113495bSYour Name 		return;
801*5113495bSYour Name 	}
802*5113495bSYour Name 
803*5113495bSYour Name 	complete(&cds_sched_context->ol_resume_rx_event);
804*5113495bSYour Name }
805*5113495bSYour Name #endif
806*5113495bSYour Name 
cds_sched_close(void)807*5113495bSYour Name QDF_STATUS cds_sched_close(void)
808*5113495bSYour Name {
809*5113495bSYour Name 	cds_debug("invoked");
810*5113495bSYour Name 
811*5113495bSYour Name 	if (!gp_cds_sched_context) {
812*5113495bSYour Name 		cds_err("!gp_cds_sched_context");
813*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
814*5113495bSYour Name 	}
815*5113495bSYour Name 
816*5113495bSYour Name 	cds_close_rx_thread();
817*5113495bSYour Name 
818*5113495bSYour Name 	gp_cds_sched_context = NULL;
819*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
820*5113495bSYour Name } /* cds_sched_close() */
821*5113495bSYour Name 
get_cds_sched_ctxt(void)822*5113495bSYour Name p_cds_sched_context get_cds_sched_ctxt(void)
823*5113495bSYour Name {
824*5113495bSYour Name 	/* Make sure that Vos Scheduler context has been initialized */
825*5113495bSYour Name 	if (!gp_cds_sched_context)
826*5113495bSYour Name 		cds_err("!gp_cds_sched_context");
827*5113495bSYour Name 
828*5113495bSYour Name 	return gp_cds_sched_context;
829*5113495bSYour Name }
830*5113495bSYour Name 
cds_ssr_protect_init(void)831*5113495bSYour Name void cds_ssr_protect_init(void)
832*5113495bSYour Name {
833*5113495bSYour Name 	spin_lock_init(&ssr_protect_lock);
834*5113495bSYour Name 	INIT_LIST_HEAD(&shutdown_notifier_head);
835*5113495bSYour Name }
836*5113495bSYour Name 
cds_shutdown_notifier_register(void (* cb)(void * priv),void * priv)837*5113495bSYour Name QDF_STATUS cds_shutdown_notifier_register(void (*cb)(void *priv), void *priv)
838*5113495bSYour Name {
839*5113495bSYour Name 	struct shutdown_notifier *notifier;
840*5113495bSYour Name 	unsigned long irq_flags;
841*5113495bSYour Name 
842*5113495bSYour Name 	notifier = qdf_mem_malloc(sizeof(*notifier));
843*5113495bSYour Name 
844*5113495bSYour Name 	if (!notifier)
845*5113495bSYour Name 		return QDF_STATUS_E_NOMEM;
846*5113495bSYour Name 
847*5113495bSYour Name 	/*
848*5113495bSYour Name 	 * This logic can be simpilfied if there is separate state maintained
849*5113495bSYour Name 	 * for shutdown and reinit. Right now there is only recovery in progress
850*5113495bSYour Name 	 * state and it doesn't help to check against it as during reinit some
851*5113495bSYour Name 	 * of the modules may need to register the call backs.
852*5113495bSYour Name 	 * For now this logic added to avoid notifier registration happen while
853*5113495bSYour Name 	 * this function is trying to call the call back with the notification.
854*5113495bSYour Name 	 */
855*5113495bSYour Name 	spin_lock_irqsave(&ssr_protect_lock, irq_flags);
856*5113495bSYour Name 	if (notifier_state == NOTIFIER_STATE_NOTIFYING) {
857*5113495bSYour Name 		spin_unlock_irqrestore(&ssr_protect_lock, irq_flags);
858*5113495bSYour Name 		qdf_mem_free(notifier);
859*5113495bSYour Name 		return -EINVAL;
860*5113495bSYour Name 	}
861*5113495bSYour Name 
862*5113495bSYour Name 	notifier->cb = cb;
863*5113495bSYour Name 	notifier->priv = priv;
864*5113495bSYour Name 
865*5113495bSYour Name 	list_add_tail(&notifier->list, &shutdown_notifier_head);
866*5113495bSYour Name 	spin_unlock_irqrestore(&ssr_protect_lock, irq_flags);
867*5113495bSYour Name 
868*5113495bSYour Name 	return 0;
869*5113495bSYour Name }
870*5113495bSYour Name 
cds_shutdown_notifier_purge(void)871*5113495bSYour Name void cds_shutdown_notifier_purge(void)
872*5113495bSYour Name {
873*5113495bSYour Name 	struct shutdown_notifier *notifier, *temp;
874*5113495bSYour Name 	unsigned long irq_flags;
875*5113495bSYour Name 
876*5113495bSYour Name 	spin_lock_irqsave(&ssr_protect_lock, irq_flags);
877*5113495bSYour Name 	list_for_each_entry_safe(notifier, temp,
878*5113495bSYour Name 				 &shutdown_notifier_head, list) {
879*5113495bSYour Name 		list_del(&notifier->list);
880*5113495bSYour Name 		spin_unlock_irqrestore(&ssr_protect_lock, irq_flags);
881*5113495bSYour Name 
882*5113495bSYour Name 		qdf_mem_free(notifier);
883*5113495bSYour Name 
884*5113495bSYour Name 		spin_lock_irqsave(&ssr_protect_lock, irq_flags);
885*5113495bSYour Name 	}
886*5113495bSYour Name 
887*5113495bSYour Name 	spin_unlock_irqrestore(&ssr_protect_lock, irq_flags);
888*5113495bSYour Name }
889*5113495bSYour Name 
cds_shutdown_notifier_call(void)890*5113495bSYour Name void cds_shutdown_notifier_call(void)
891*5113495bSYour Name {
892*5113495bSYour Name 	struct shutdown_notifier *notifier;
893*5113495bSYour Name 	unsigned long irq_flags;
894*5113495bSYour Name 
895*5113495bSYour Name 	spin_lock_irqsave(&ssr_protect_lock, irq_flags);
896*5113495bSYour Name 	notifier_state = NOTIFIER_STATE_NOTIFYING;
897*5113495bSYour Name 
898*5113495bSYour Name 	list_for_each_entry(notifier, &shutdown_notifier_head, list) {
899*5113495bSYour Name 		spin_unlock_irqrestore(&ssr_protect_lock, irq_flags);
900*5113495bSYour Name 
901*5113495bSYour Name 		notifier->cb(notifier->priv);
902*5113495bSYour Name 
903*5113495bSYour Name 		spin_lock_irqsave(&ssr_protect_lock, irq_flags);
904*5113495bSYour Name 	}
905*5113495bSYour Name 
906*5113495bSYour Name 	notifier_state = NOTIFIER_STATE_NONE;
907*5113495bSYour Name 	spin_unlock_irqrestore(&ssr_protect_lock, irq_flags);
908*5113495bSYour Name }
909*5113495bSYour Name 
cds_get_gfp_flags(void)910*5113495bSYour Name int cds_get_gfp_flags(void)
911*5113495bSYour Name {
912*5113495bSYour Name 	int flags = GFP_KERNEL;
913*5113495bSYour Name 
914*5113495bSYour Name 	if (in_interrupt() || in_atomic() || irqs_disabled())
915*5113495bSYour Name 		flags = GFP_ATOMIC;
916*5113495bSYour Name 
917*5113495bSYour Name 	return flags;
918*5113495bSYour Name }
919*5113495bSYour Name 
920*5113495bSYour Name /**
921*5113495bSYour Name  * cds_get_rx_thread_pending(): get rx thread status
922*5113495bSYour Name  * @soc: ol_txrx_soc_handle object
923*5113495bSYour Name  *
924*5113495bSYour Name  * Return: 1 if rx thread is not empty.
925*5113495bSYour Name  *	   0 if rx thread is empty
926*5113495bSYour Name  */
927*5113495bSYour Name #ifdef WLAN_DP_LEGACY_OL_RX_THREAD
cds_get_rx_thread_pending(ol_txrx_soc_handle soc)928*5113495bSYour Name int cds_get_rx_thread_pending(ol_txrx_soc_handle soc)
929*5113495bSYour Name {
930*5113495bSYour Name 	p_cds_sched_context cds_sched_context = get_cds_sched_ctxt();
931*5113495bSYour Name 
932*5113495bSYour Name 	if (!cds_sched_context) {
933*5113495bSYour Name 		cds_err("cds_sched_context is NULL");
934*5113495bSYour Name 		return 0;
935*5113495bSYour Name 	}
936*5113495bSYour Name 
937*5113495bSYour Name 	spin_lock_bh(&cds_sched_context->ol_rx_queue_lock);
938*5113495bSYour Name 
939*5113495bSYour Name 	if (list_empty(&cds_sched_context->ol_rx_thread_queue)) {
940*5113495bSYour Name 		spin_unlock_bh(&cds_sched_context->ol_rx_queue_lock);
941*5113495bSYour Name 		return 0;
942*5113495bSYour Name 	}
943*5113495bSYour Name 
944*5113495bSYour Name 	/* In helium there is no scope to get no of pending frames
945*5113495bSYour Name 	 * in rx thread, Hence return 1 if frames are queued
946*5113495bSYour Name 	 */
947*5113495bSYour Name 	spin_unlock_bh(&cds_sched_context->ol_rx_queue_lock);
948*5113495bSYour Name 	return 1;
949*5113495bSYour Name }
950*5113495bSYour Name #endif
951