1 /*
2 * Copyright (c) 2012-2021 The Linux Foundation. All rights reserved.
3 * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for
6 * any purpose with or without fee is hereby granted, provided that the
7 * above copyright notice and this permission notice appear in all
8 * copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17 * PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 #ifndef __CDS_SCHED_H
21 #define __CDS_SCHED_H
22
23 /**
24 * DOC: cds_sched.h
25 * Connectivity driver services scheduler
26 */
27
28 #include <qdf_event.h>
29 #include <i_qdf_types.h>
30 #include <linux/wait.h>
31 #if defined(CONFIG_HAS_WAKELOCK)
32 #include <linux/wakelock.h>
33 #endif
34 #include <qdf_types.h>
35 #include "qdf_lock.h"
36 #include "qdf_mc_timer.h"
37 #include "cds_config.h"
38 #include "qdf_cpuhp.h"
39 #include "cdp_txrx_cmn_struct.h"
40
41 #define MC_SUSPEND_EVENT 0x002
42 #define RX_POST_EVENT 0x001
43 #define RX_SUSPEND_EVENT 0x002
44 #define RX_VDEV_DEL_EVENT 0x004
45 #define RX_SHUTDOWN_EVENT 0x010
46
47 #define RX_REFILL_POST_EVENT 0x001
48 #define RX_REFILL_SUSPEND_EVENT 0x002
49 #define RX_REFILL_SHUTDOWN_EVENT 0x004
50
51 #ifdef WLAN_DP_LEGACY_OL_RX_THREAD
52 /*
53 ** Maximum number of cds messages to be allocated for
54 ** OL Rx thread.
55 */
56 #define CDS_MAX_OL_RX_PKT 4000
57
58 #define CDS_ACTIVE_STAID_CLEANUP_DELAY 10
59 #define CDS_ACTIVE_STAID_CLEANUP_TIMEOUT 200
60 #endif
61
62 typedef void (*cds_ol_rx_thread_cb)(void *context,
63 qdf_nbuf_t rxpkt,
64 uint16_t staid);
65
66 /*
67 ** CDS message wrapper for data rx from TXRX
68 */
69 struct cds_ol_rx_pkt {
70 struct list_head list;
71 void *context;
72
73 /* Rx skb */
74 qdf_nbuf_t Rxpkt;
75
76 /* Station id to which this packet is destined */
77 uint16_t staId;
78
79 /* Call back to further send this packet to txrx layer */
80 cds_ol_rx_thread_cb callback;
81
82 };
83
84 /*
85 ** CDS Scheduler context
86 ** The scheduler context contains the following:
87 ** ** the messages queues
88 ** ** the handle to the thread
89 ** ** pointer to the events that gracefully shutdown the MC and Tx threads
90 **
91 */
92 typedef struct _cds_sched_context {
93 #ifdef WLAN_DP_LEGACY_OL_RX_THREAD
94 spinlock_t ol_rx_thread_lock;
95
96 /* OL Rx thread handle */
97 struct task_struct *ol_rx_thread;
98
99 /* Handle of Event for Rx thread to signal startup */
100 struct completion ol_rx_start_event;
101
102 /* Completion object to suspend OL rx thread */
103 struct completion ol_suspend_rx_event;
104
105 /* Completion object to resume OL rx thread */
106 struct completion ol_resume_rx_event;
107
108 /* Completion object for OL Rxthread shutdown */
109 struct completion ol_rx_shutdown;
110
111 /* Waitq for OL Rx thread */
112 wait_queue_head_t ol_rx_wait_queue;
113
114 unsigned long ol_rx_event_flag;
115
116 /* Rx buffer queue */
117 struct list_head ol_rx_thread_queue;
118
119 /* Spinlock to synchronize between tasklet and thread */
120 spinlock_t ol_rx_queue_lock;
121
122 /* Lock to synchronize free buffer queue access */
123 spinlock_t cds_ol_rx_pkt_freeq_lock;
124
125 /* Free message queue for OL Rx processing */
126 struct list_head cds_ol_rx_pkt_freeq;
127
128 /* The CPU hotplug event registration handle, used to unregister */
129 struct qdf_cpuhp_handler *cpuhp_event_handle;
130
131 /* affinity lock */
132 struct mutex affinity_lock;
133
134 /* Saved rx thread CPU affinity */
135 struct cpumask rx_thread_cpu_mask;
136
137 /* CPU affinity bitmask */
138 uint8_t conf_rx_thread_cpu_mask;
139
140 /* high throughput required */
141 bool high_throughput_required;
142
143 /* affinity required during uplink traffic*/
144 bool rx_affinity_required;
145 uint8_t conf_rx_thread_ul_affinity;
146
147 /* sta id packets under processing in thread context*/
148 uint16_t active_staid;
149 #endif
150 } cds_sched_context, *p_cds_sched_context;
151
152 /**
153 * struct cds_log_complete - Log completion internal structure
154 * @is_fatal: Type is fatal or not
155 * @indicator: Source of bug report
156 * @reason_code: Reason code for bug report
157 * @is_report_in_progress: If bug report is in progress
158 * @recovery_needed: if recovery is needed after report completion
159 *
160 * This structure internally stores the log related params
161 */
162 struct cds_log_complete {
163 uint32_t is_fatal;
164 uint32_t indicator;
165 uint32_t reason_code;
166 bool is_report_in_progress;
167 bool recovery_needed;
168 };
169
170 struct cds_context {
171 /* Scheduler Context */
172 cds_sched_context qdf_sched;
173
174 /* HDD Module Context */
175 void *hdd_context;
176
177 /* MAC Module Context */
178 void *mac_context;
179
180 uint32_t driver_state;
181
182 /* WMA Context */
183 void *wma_context;
184
185 void *hif_context;
186
187 void *htc_ctx;
188
189 void *g_ol_context;
190 /*
191 * qdf_ctx will be used by qdf
192 * while allocating dma memory
193 * to access dev information.
194 */
195 qdf_device_t qdf_ctx;
196
197 void *dp_soc;
198
199 /* Configuration handle used to get system configuration */
200 struct cdp_cfg *cfg_ctx;
201
202 /* radio index per driver */
203 int radio_index;
204
205 bool is_wakelock_log_enabled;
206 uint32_t wakelock_log_level;
207 uint32_t connectivity_log_level;
208 uint32_t packet_stats_log_level;
209 uint32_t driver_debug_log_level;
210 uint32_t fw_debug_log_level;
211 struct cds_log_complete log_complete;
212 qdf_spinlock_t bug_report_lock;
213
214 bool enable_fatal_event;
215 struct cds_config_info *cds_cfg;
216
217 struct ol_tx_sched_wrr_ac_specs_t ac_specs[QCA_WLAN_AC_ALL];
218 qdf_work_t cds_recovery_work;
219 qdf_workqueue_t *cds_recovery_wq;
220 enum qdf_hang_reason recovery_reason;
221
222 /* To protect bit(CDS_DRIVER_STATE_SYS_REBOOTING) of driver_state */
223 qdf_mutex_t sys_reboot_lock;
224 };
225
226 /*---------------------------------------------------------------------------
227 Function declarations and documentation
228 ---------------------------------------------------------------------------*/
229 #ifdef WLAN_DP_LEGACY_OL_RX_THREAD
230
231 /**
232 * cds_sched_handle_cpu_hot_plug() - cpu hotplug event handler
233 *
234 * cpu hotplug indication handler
235 * will find online cores and will assign proper core based on perf requirement
236 *
237 * Return: 0 success
238 * 1 fail
239 */
240 int cds_sched_handle_cpu_hot_plug(void);
241
242 /**
243 * cds_sched_handle_throughput_req() - cpu throughput requirement handler
244 * @high_tput_required: high throughput is required or not
245 *
246 * high or low throughput indication handler
247 * will find online cores and will assign proper core based on perf requirement
248 *
249 * Return: 0 success
250 * 1 fail
251 */
252 int cds_sched_handle_throughput_req(bool high_tput_required);
253
254 /**
255 * cds_sched_handle_rx_thread_affinity_req() - rx thread affinity req handler
256 * @high_throughput: high throughput is required or not
257 *
258 * rx thread affinity handler will find online cores and
259 * will assign proper core based on perf requirement
260 *
261 * Return: None
262 */
263 void cds_sched_handle_rx_thread_affinity_req(bool high_throughput);
264
265 /**
266 * cds_set_rx_thread_ul_cpu_mask() - Rx_thread affinity for UL from INI
267 * @cpu_affinity_mask: CPU affinity bitmap
268 *
269 * Return:None
270 */
271 void cds_set_rx_thread_ul_cpu_mask(uint8_t cpu_affinity_mask);
272
273 /**
274 * cds_set_rx_thread_cpu_mask() - Rx_thread affinity from INI
275 * @cpu_affinity_mask: CPU affinity bitmap
276 *
277 * Return:None
278 */
279 void cds_set_rx_thread_cpu_mask(uint8_t cpu_affinity_mask);
280
281 /**
282 * cds_drop_rxpkt_by_staid() - api to drop pending rx packets for a sta
283 * @pSchedContext: Pointer to the global CDS Sched Context
284 * @staId: Station Id
285 *
286 * This api drops queued packets for a station, to drop all the pending
287 * packets the caller has to send WLAN_MAX_STA_COUNT as staId.
288 *
289 * Return: none
290 */
291 void cds_drop_rxpkt_by_staid(p_cds_sched_context pSchedContext, uint16_t staId);
292
293 /**
294 * cds_indicate_rxpkt() - indicate rx data packet
295 * @pSchedContext: Pointer to the global CDS Sched Context
296 * @pkt: CDS data message buffer
297 *
298 * This api enqueues the rx packet into ol_rx_thread_queue and notifies
299 * cds_ol_rx_thread()
300 *
301 * Return: none
302 */
303 void cds_indicate_rxpkt(p_cds_sched_context pSchedContext,
304 struct cds_ol_rx_pkt *pkt);
305
306 /**
307 * cds_close_rx_thread() - close the Rx thread
308 *
309 * This api closes the Rx thread:
310 *
311 * Return: qdf status
312 */
313 QDF_STATUS cds_close_rx_thread(void);
314
315 /**
316 * cds_alloc_ol_rx_pkt() - API to return next available cds message
317 * @pSchedContext: Pointer to the global CDS Sched Context
318 *
319 * This api returns next available cds message buffer used for rx data
320 * processing
321 *
322 * Return: Pointer to cds message buffer
323 */
324 struct cds_ol_rx_pkt *cds_alloc_ol_rx_pkt(p_cds_sched_context pSchedContext);
325
326 /**
327 * cds_free_ol_rx_pkt() - api to release cds message to the freeq
328 * @pSchedContext: Pointer to the global CDS Sched Context
329 * @pkt: CDS message buffer to be returned to free queue.
330 *
331 * This api returns the cds message used for Rx data to the free queue
332 *
333 * Return: none
334 */
335 void cds_free_ol_rx_pkt(p_cds_sched_context pSchedContext,
336 struct cds_ol_rx_pkt *pkt);
337
338 /**
339 * cds_free_ol_rx_pkt_freeq() - free cds buffer free queue
340 * @pSchedContext: pointer to the global CDS Sched Context
341 *
342 * This API does mem free of the buffers available in free cds buffer
343 * queue which is used for Data rx processing.
344 *
345 * Return: none
346 */
347 void cds_free_ol_rx_pkt_freeq(p_cds_sched_context pSchedContext);
348
349 /**
350 * cds_get_rx_thread_pending() - get rx thread status
351 * @soc: ol_txrx_soc_handle object
352 *
353 * Return: 1 if rx thread is not empty.
354 * 0 if rx thread is empty.
355 */
356 int cds_get_rx_thread_pending(ol_txrx_soc_handle soc);
357 #else
cds_sched_handle_rx_thread_affinity_req(bool high_throughput)358 static inline void cds_sched_handle_rx_thread_affinity_req(
359 bool high_throughput) {}
360
cds_set_rx_thread_ul_cpu_mask(uint8_t cpu_affinity_mask)361 static inline void cds_set_rx_thread_ul_cpu_mask(uint8_t cpu_affinity_mask) {}
362
cds_set_rx_thread_cpu_mask(uint8_t cpu_affinity_mask)363 static inline void cds_set_rx_thread_cpu_mask(uint8_t cpu_affinity_mask) {}
364
365 static inline
cds_drop_rxpkt_by_staid(p_cds_sched_context pSchedContext,uint16_t staId)366 void cds_drop_rxpkt_by_staid(p_cds_sched_context pSchedContext, uint16_t staId)
367 {
368 }
369
370 static inline
cds_indicate_rxpkt(p_cds_sched_context pSchedContext,struct cds_ol_rx_pkt * pkt)371 void cds_indicate_rxpkt(p_cds_sched_context pSchedContext,
372 struct cds_ol_rx_pkt *pkt)
373 {
374 }
375
376 static inline
cds_close_rx_thread(void)377 QDF_STATUS cds_close_rx_thread(void)
378 {
379 return QDF_STATUS_SUCCESS;
380 }
381
382 static inline
cds_alloc_ol_rx_pkt(p_cds_sched_context pSchedContext)383 struct cds_ol_rx_pkt *cds_alloc_ol_rx_pkt(p_cds_sched_context pSchedContext)
384 {
385 return NULL;
386 }
387
388 static inline
cds_free_ol_rx_pkt(p_cds_sched_context pSchedContext,struct cds_ol_rx_pkt * pkt)389 void cds_free_ol_rx_pkt(p_cds_sched_context pSchedContext,
390 struct cds_ol_rx_pkt *pkt)
391 {
392 }
393
394 static inline
cds_free_ol_rx_pkt_freeq(p_cds_sched_context pSchedContext)395 void cds_free_ol_rx_pkt_freeq(p_cds_sched_context pSchedContext)
396 {
397 }
398
cds_sched_handle_throughput_req(bool high_tput_required)399 static inline int cds_sched_handle_throughput_req(
400 bool high_tput_required)
401 {
402 return 0;
403 }
404
cds_get_rx_thread_pending(ol_txrx_soc_handle soc)405 static inline int cds_get_rx_thread_pending(ol_txrx_soc_handle soc)
406 {
407 return 0;
408 }
409 #endif
410
411 /**
412 * cds_sched_open() - initialize the CDS Scheduler
413 * @p_cds_context: Pointer to the global CDS Context
414 * @pSchedContext: Pointer to a previously allocated buffer big
415 * enough to hold a scheduler context.
416 * @SchedCtxSize: CDS scheduler context size
417 *
418 * This function initializes the CDS Scheduler
419 * Upon successful initialization:
420 * - All the message queues are initialized
421 * - The Main Controller thread is created and ready to receive and
422 * dispatch messages.
423 *
424 *
425 * Return: QDF status
426 */
427 QDF_STATUS cds_sched_open(void *p_cds_context,
428 p_cds_sched_context pSchedContext,
429 uint32_t SchedCtxSize);
430
431 /**
432 * cds_sched_close() - close the cds scheduler
433 *
434 * This api closes the CDS Scheduler upon successful closing:
435 * - All the message queues are flushed
436 * - The Main Controller thread is closed
437 * - The Tx thread is closed
438 *
439 *
440 * Return: qdf status
441 */
442 QDF_STATUS cds_sched_close(void);
443
444 /**
445 * get_cds_sched_ctxt() - get cds scheduler context
446 *
447 * Return: cds scheduler context
448 */
449 p_cds_sched_context get_cds_sched_ctxt(void);
450
451 void qdf_timer_module_init(void);
452 void qdf_timer_module_deinit(void);
453
454 /**
455 * cds_ssr_protect_init() - initialize ssr protection debug functionality
456 *
457 * Return:
458 * void
459 */
460 void cds_ssr_protect_init(void);
461
462 /**
463 * cds_get_gfp_flags(): get GFP flags
464 *
465 * Based on the scheduled context, return GFP flags
466 * Return: gfp flags
467 */
468 int cds_get_gfp_flags(void);
469
470 /**
471 * cds_shutdown_notifier_register() - Register for shutdown notification
472 * @cb: Call back to be called
473 * @priv: Private pointer to be passed back to call back
474 *
475 * During driver remove or shutdown (recovery), external threads might be stuck
476 * waiting on some event from firmware at lower layers. Remove or shutdown can't
477 * proceed till the thread completes to avoid any race condition. Call backs can
478 * be registered here to get early notification of remove or shutdown so that
479 * waiting thread can be unblocked and hence remove or shutdown can proceed
480 * further as waiting there may not make sense when FW may already have been
481 * down.
482 *
483 * Return: QDF status
484 */
485 QDF_STATUS cds_shutdown_notifier_register(void (*cb)(void *priv), void *priv);
486
487 /**
488 * cds_shutdown_notifier_purge() - Purge all the notifiers
489 *
490 * Shutdown notifiers are added to provide the early notification of remove or
491 * shutdown being initiated. Adding this API to purge all the registered call
492 * backs as they are not useful any more while all the lower layers are being
493 * shutdown.
494 *
495 * Return: None
496 */
497 void cds_shutdown_notifier_purge(void);
498
499 /**
500 * cds_shutdown_notifier_call() - Call shutdown notifier call back
501 *
502 * Call registered shutdown notifier call back to indicate about remove or
503 * shutdown.
504 */
505 void cds_shutdown_notifier_call(void);
506
507 /**
508 * cds_resume_rx_thread() - resume rx thread by completing its resume event
509 *
510 * Resume RX thread by completing RX thread resume event
511 *
512 * Return: None
513 */
514 void cds_resume_rx_thread(void);
515
516 #endif /* #ifndef __CDS_SCHED_H */
517