xref: /wlan-driver/qcacld-3.0/components/dp/core/inc/wlan_dp_rx_thread.h (revision 5113495b16420b49004c444715d2daae2066e7dc)
1 /*
2  * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #if !defined(__WLAN_DP_RX_THREAD_H)
21 #define __WLAN_DP_RX_THREAD_H
22 
23 #include <qdf_lock.h>
24 #include <qdf_event.h>
25 #include <qdf_threads.h>
26 #include <wlan_objmgr_vdev_obj.h>
27 #include "cfg_dp.h"
28 #include <cdp_txrx_cmn_struct.h>
29 #include <cdp_txrx_cmn.h>
30 #include "wlan_cfg.h"
31 #include "qdf_nbuf.h"
32 #include "qdf_threads.h"
33 #include "qdf_net_if.h"
34 
35 /* Maximum number of REO rings supported (for stats tracking) */
36 #define DP_RX_TM_MAX_REO_RINGS WLAN_CFG_NUM_REO_DEST_RING
37 /* Number of DP RX threads supported */
38 #define DP_MAX_RX_THREADS WLAN_CFG_NUM_REO_DEST_RING
39 
40 /*
41  * struct dp_rx_tm_handle_cmn - Opaque handle for rx_threads to store
42  * rx_tm_handle. This handle will be common for all the threads.
43  * Individual threads should not be accessing
44  * elements from dp_rx_tm_handle. It should be via an API.
45  */
46 struct dp_rx_tm_handle_cmn;
47 
48 /**
49  * struct dp_rx_thread_stats - structure holding stats for DP RX thread
50  * @nbuf_queued: packets queued into the thread per reo ring
51  * @nbuf_queued_total: packets queued into the thread for all reo rings
52  * @nbuf_dequeued: packets de-queued from the thread
53  * @nbuf_sent_to_stack: packets sent to the stack. some dequeued packets may be
54  *			dropped due to no peer or vdev, hence this stat.
55  * @gro_flushes: number of GRO flushes
56  * @gro_flushes_by_vdev_del: number of GRO flushes triggered by vdev del.
57  * @nbufq_max_len: maximum number of nbuf_lists queued for the thread
58  * @dropped_invalid_vdev: packets(nbuf_list) dropped due to no vdev
59  * @rx_flushed: packets flushed after vdev delete
60  * @dropped_invalid_peer: packets(nbuf_list) dropped due to no peer
61  * @dropped_invalid_os_rx_handles: packets(nbuf_list) dropped due to no os rx
62  * handles
63  * @dropped_others: packets dropped due to other reasons
64  * @dropped_enq_fail: packets dropped due to pending queue full
65  * @rx_nbufq_loop_yield: rx loop yield counter
66  */
67 struct dp_rx_thread_stats {
68 	unsigned int nbuf_queued[DP_RX_TM_MAX_REO_RINGS];
69 	unsigned int nbuf_queued_total;
70 	unsigned int nbuf_dequeued;
71 	unsigned int nbuf_sent_to_stack;
72 	unsigned int gro_flushes;
73 	unsigned int gro_flushes_by_vdev_del;
74 	unsigned int nbufq_max_len;
75 	unsigned int dropped_invalid_vdev;
76 	unsigned int rx_flushed;
77 	unsigned int dropped_invalid_peer;
78 	unsigned int dropped_invalid_os_rx_handles;
79 	unsigned int dropped_others;
80 	unsigned int dropped_enq_fail;
81 	unsigned int rx_nbufq_loop_yield;
82 };
83 
84 /**
85  * enum dp_rx_refill_thread_state - enum to keep track of rx refill thread state
86  * @DP_RX_REFILL_THREAD_INVALID: initial invalid state
87  * @DP_RX_REFILL_THREAD_RUNNING: rx refill thread functional(NOT suspended,
88  *                      processing packets or waiting on a wait_queue)
89  * @DP_RX_REFILL_THREAD_SUSPENDING: rx refill thread is suspending
90  * @DP_RX_REFILL_THREAD_SUSPENDED: rx refill_thread suspended
91  */
92 enum dp_rx_refill_thread_state {
93 	DP_RX_REFILL_THREAD_INVALID,
94 	DP_RX_REFILL_THREAD_RUNNING,
95 	DP_RX_REFILL_THREAD_SUSPENDING,
96 	DP_RX_REFILL_THREAD_SUSPENDED
97 };
98 
99 /**
100  * struct dp_rx_thread - structure holding variables for a single DP RX thread
101  * @id: id of the dp_rx_thread (0 or 1 or 2..DP_MAX_RX_THREADS - 1)
102  * @task: task structure corresponding to the thread
103  * @start_event: handle of Event for DP Rx thread to signal startup
104  * @suspend_event: handle of Event for DP Rx thread to signal suspend
105  * @resume_event: handle of Event for DP Rx thread to signal resume
106  * @shutdown_event: handle of Event for DP Rx thread to signal shutdown
107  * @vdev_del_event: handle of Event for vdev del thread to signal completion
108  *		    for gro flush
109  * @gro_flush_ind: gro flush indication for DP Rx thread
110  * @event_flag: event flag to post events to DP Rx thread
111  * @nbuf_queue:nbuf queue used to store RX packets
112  * @nbufq_len: length of the nbuf queue
113  * @aff_mask: cuurent affinity mask of the DP Rx thread
114  * @stats: per thread stats
115  * @rtm_handle_cmn: abstract RX TM handle. This allows access to the dp_rx_tm
116  *		    structures via APIs.
117  * @napi: napi to deliver packet to stack via GRO
118  * @wait_q: wait queue to conditionally wait on events for DP Rx thread
119  * @netdev: dummy netdev to initialize the napi structure with
120  */
121 struct dp_rx_thread {
122 	uint8_t id;
123 	qdf_thread_t *task;
124 	qdf_event_t start_event;
125 	qdf_event_t suspend_event;
126 	qdf_event_t resume_event;
127 	qdf_event_t shutdown_event;
128 	qdf_event_t vdev_del_event;
129 	qdf_atomic_t gro_flush_ind;
130 	unsigned long event_flag;
131 	qdf_nbuf_queue_head_t nbuf_queue;
132 	unsigned long aff_mask;
133 	struct dp_rx_thread_stats stats;
134 	struct dp_rx_tm_handle_cmn *rtm_handle_cmn;
135 	qdf_napi_struct napi;
136 	qdf_wait_queue_head_t wait_q;
137 	qdf_dummy_netdev_t netdev;
138 };
139 
140 /**
141  * struct dp_rx_refill_thread - structure holding info of DP Rx refill thread
142  * @task: task structure corresponding to the thread
143  * @start_event: handle of Event for DP Rx refill thread to signal startup
144  * @suspend_event: handle of Event for DP Rx refill thread to signal suspend
145  * @resume_event: handle of Event for DP Rx refill thread to signal resume
146  * @shutdown_event: handle of Event for DP Rx refill thread to signal shutdown
147  * @event_flag: event flag to post events to DP Rx refill thread
148  * @wait_q: wait queue to conditionally wait on events for DP Rx refill thread
149  * @enabled: flag to check whether DP Rx refill thread is enabled
150  * @soc: abstract DP soc reference used in internal API's
151  * @state: state of DP Rx refill thread
152  */
153 struct dp_rx_refill_thread {
154 	qdf_thread_t *task;
155 	qdf_event_t start_event;
156 	qdf_event_t suspend_event;
157 	qdf_event_t resume_event;
158 	qdf_event_t shutdown_event;
159 	unsigned long event_flag;
160 	qdf_wait_queue_head_t wait_q;
161 	bool enabled;
162 	void *soc;
163 	enum dp_rx_refill_thread_state state;
164 };
165 
166 /**
167  * enum dp_rx_thread_state - enum to keep track of the state of the rx threads
168  * @DP_RX_THREADS_INVALID: initial invalid state
169  * @DP_RX_THREADS_RUNNING: rx threads functional(NOT suspended, processing
170  *			  packets or waiting on a wait_queue)
171  * @DP_RX_THREADS_SUSPENDING: rx thread is suspending
172  * @DP_RX_THREADS_SUSPENDED: rx_threads suspended from cfg8011 suspend
173  */
174 enum dp_rx_thread_state {
175 	DP_RX_THREADS_INVALID,
176 	DP_RX_THREADS_RUNNING,
177 	DP_RX_THREADS_SUSPENDING,
178 	DP_RX_THREADS_SUSPENDED
179 };
180 
181 /**
182  * struct dp_rx_tm_handle - DP RX thread infrastructure handle
183  * @num_dp_rx_threads: number of DP RX threads initialized
184  * @txrx_handle_cmn: opaque txrx handle to get to pdev and soc
185  * @state: state of the rx_threads. All of them should be in the same state.
186  * @rx_thread: array of pointers of type struct dp_rx_thread
187  * @allow_dropping: flag to indicate frame dropping is enabled
188  */
189 struct dp_rx_tm_handle {
190 	uint8_t num_dp_rx_threads;
191 	struct dp_txrx_handle_cmn *txrx_handle_cmn;
192 	enum dp_rx_thread_state state;
193 	struct dp_rx_thread **rx_thread;
194 	qdf_atomic_t allow_dropping;
195 };
196 
197 /**
198  * enum dp_rx_gro_flush_code - enum differentiate different GRO flushes
199  * @DP_RX_GRO_NOT_FLUSH: not fush indication
200  * @DP_RX_GRO_NORMAL_FLUSH: Regular full flush
201  * @DP_RX_GRO_LOW_TPUT_FLUSH: Flush during low tput level
202  */
203 enum dp_rx_gro_flush_code {
204 	DP_RX_GRO_NOT_FLUSH = 0,
205 	DP_RX_GRO_NORMAL_FLUSH,
206 	DP_RX_GRO_LOW_TPUT_FLUSH
207 };
208 
209 /**
210  * struct dp_txrx_config - dp txrx configuration passed to dp txrx modules
211  * @enable_rx_threads: DP rx threads or not
212  */
213 struct dp_txrx_config {
214 	bool enable_rx_threads;
215 };
216 
217 struct dp_txrx_handle_cmn;
218 
219 /**
220  * struct dp_txrx_handle - main dp txrx container handle
221  * @pdev: cdp_pdev pdev handle
222  * @soc: ol_txrx_soc_handle soc handle
223  * @refill_thread: rx refill thread infra handle
224  * @rx_tm_hdl: rx thread infrastructure handle
225  * @config: configuration for DP TXRX modules
226  */
227 struct dp_txrx_handle {
228 	ol_txrx_soc_handle soc;
229 	struct cdp_pdev *pdev;
230 	struct dp_rx_tm_handle rx_tm_hdl;
231 	struct dp_rx_refill_thread refill_thread;
232 	struct dp_txrx_config config;
233 };
234 
235 /**
236  * dp_rx_refill_thread_init() - Initialize DP Rx refill threads
237  * @refill_thread: Contains over all rx refill thread info
238  *
239  * Return: QDF_STATUS
240  */
241 QDF_STATUS dp_rx_refill_thread_init(struct dp_rx_refill_thread *refill_thread);
242 
243 /**
244  * dp_rx_refill_thread_deinit() - De-initialize DP Rx refill threads
245  * @refill_thread: Contains over all rx refill thread info
246  *
247  * Return: QDF_STATUS
248  */
249 QDF_STATUS
250 dp_rx_refill_thread_deinit(struct dp_rx_refill_thread *refill_thread);
251 
252 /**
253  * dp_rx_tm_init() - initialize DP Rx thread infrastructure
254  * @rx_tm_hdl: dp_rx_tm_handle containing the overall thread infrastructure
255  * @num_dp_rx_threads: number of DP Rx threads to be initialized
256  *
257  * Return: QDF_STATUS_SUCCESS
258  */
259 QDF_STATUS dp_rx_tm_init(struct dp_rx_tm_handle *rx_tm_hdl,
260 			 uint8_t num_dp_rx_threads);
261 
262 /**
263  * dp_rx_tm_deinit() - de-initialize DP Rx thread infrastructure
264  * @rx_tm_hdl: dp_rx_tm_handle containing the overall thread infrastructure
265  *
266  * Return: QDF_STATUS_SUCCESS on success, error qdf status on failure
267  */
268 QDF_STATUS dp_rx_tm_deinit(struct dp_rx_tm_handle *rx_tm_hdl);
269 
270 /**
271  * dp_rx_tm_enqueue_pkt() - enqueue RX packet into RXTI
272  * @rx_tm_hdl: dp_rx_tm_handle containing the overall thread infrastructure
273  * @nbuf_list: single or a list of nbufs to be enqueued into RXTI
274  *
275  * Return: QDF_STATUS_SUCCESS
276  */
277 QDF_STATUS dp_rx_tm_enqueue_pkt(struct dp_rx_tm_handle *rx_tm_hdl,
278 				qdf_nbuf_t nbuf_list);
279 
280 /**
281  * dp_rx_tm_gro_flush_ind() - flush GRO packets for a RX Context Id
282  * @rx_tm_handle: dp_rx_tm_handle containing the overall thread infrastructure
283  * @rx_ctx_id: RX Thread Context Id for which GRO flush needs to be done
284  * @flush_code: flush code to differentiate low TPUT flush
285  *
286  * Return: QDF_STATUS_SUCCESS
287  */
288 QDF_STATUS dp_rx_tm_gro_flush_ind(struct dp_rx_tm_handle *rx_tm_handle,
289 				  int rx_ctx_id,
290 				  enum dp_rx_gro_flush_code flush_code);
291 /**
292  * dp_rx_refill_thread_suspend() - Suspend RX refill thread
293  * @refill_thread: pointer to dp_rx_refill_thread object
294  *
295  * Return: QDF_STATUS_SUCCESS on success, error qdf status on failure
296  */
297 QDF_STATUS
298 dp_rx_refill_thread_suspend(struct dp_rx_refill_thread *refill_thread);
299 
300 /**
301  * dp_rx_tm_suspend() - suspend all threads in RXTI
302  * @rx_tm_handle: pointer to dp_rx_tm_handle object
303  *
304  * Return: QDF_STATUS_SUCCESS on success, error qdf status on failure
305  */
306 QDF_STATUS dp_rx_tm_suspend(struct dp_rx_tm_handle *rx_tm_handle);
307 
308 /**
309  * dp_rx_tm_flush_by_vdev_id() - flush rx packets by vdev_id in all
310  * rx thread queues
311  * @rx_tm_hdl: dp_rx_tm_handle containing the overall thread
312  *             infrastructure
313  * @vdev_id: vdev id for which packets are to be flushed
314  *
315  * Return: QDF_STATUS_SUCCESS
316  */
317 QDF_STATUS dp_rx_tm_flush_by_vdev_id(struct dp_rx_tm_handle *rx_tm_hdl,
318 				     uint8_t vdev_id);
319 
320 /**
321  * dp_rx_refill_thread_resume() - Resume RX refill thread
322  * @refill_thread: pointer to dp_rx_refill_thread
323  *
324  * Return: QDF_STATUS_SUCCESS on success, error qdf status on failure
325  */
326 QDF_STATUS
327 dp_rx_refill_thread_resume(struct dp_rx_refill_thread *refill_thread);
328 
329 /**
330  * dp_rx_tm_resume() - resume all threads in RXTI
331  * @rx_tm_handle: pointer to dp_rx_tm_handle object
332  *
333  * Return: QDF_STATUS_SUCCESS on success, error qdf status on failure
334  */
335 QDF_STATUS dp_rx_tm_resume(struct dp_rx_tm_handle *rx_tm_handle);
336 
337 /**
338  * dp_rx_tm_dump_stats() - dump stats for all threads in RXTI
339  * @rx_tm_handle: pointer to dp_rx_tm_handle object
340  *
341  * Return: QDF_STATUS_SUCCESS on success, error qdf status on failure
342  */
343 QDF_STATUS dp_rx_tm_dump_stats(struct dp_rx_tm_handle *rx_tm_handle);
344 
345 /**
346  * dp_rx_thread_get_txrx_handle() - get txrx handle from rx_tm_handle_cmn
347  * @rx_tm_handle_cmn: opaque pointer to dp_rx_tm_handle_cmn struct
348  *
349  * Return: pointer to dp_txrx_handle_cmn handle
350  */
351 static inline struct dp_txrx_handle_cmn*
dp_rx_thread_get_txrx_handle(struct dp_rx_tm_handle_cmn * rx_tm_handle_cmn)352 dp_rx_thread_get_txrx_handle(struct dp_rx_tm_handle_cmn *rx_tm_handle_cmn)
353 {
354 	return (((struct dp_rx_tm_handle *)rx_tm_handle_cmn)->txrx_handle_cmn);
355 }
356 
357 /**
358  * dp_rx_tm_get_napi_context() - get NAPI context for a RX CTX ID
359  * @rx_ctx_id: RX context ID (RX thread ID) corresponding to which NAPI is
360  *             needed
361  * @rx_tm_hdl: dp_rx_tm_handle containing the overall thread
362  *             infrastructure
363  *
364  * Return: NULL on failure, else pointer to NAPI corresponding to rx_ctx_id
365  */
366 qdf_napi_struct *dp_rx_tm_get_napi_context(struct dp_rx_tm_handle *rx_tm_hdl,
367 					   uint8_t rx_ctx_id);
368 
369 /**
370  * dp_rx_tm_set_cpu_mask() - set CPU mask for RX threads
371  * @rx_tm_hdl: dp_rx_tm_handle containing the overall thread
372  *             infrastructure
373  * @new_mask: New CPU mask pointer
374  *
375  * Return: QDF_STATUS_SUCCESS on success, error qdf status on failure
376  */
377 QDF_STATUS dp_rx_tm_set_cpu_mask(struct dp_rx_tm_handle *rx_tm_hdl,
378 				 qdf_cpu_mask *new_mask);
379 
380 #ifdef FEATURE_WLAN_DP_RX_THREADS
381 /**
382  * dp_txrx_get_cmn_hdl_frm_ext_hdl() - conversion func ext_hdl->txrx_handle_cmn
383  * @dp_ext_hdl: pointer to dp_txrx_handle structure
384  *
385  * Return: typecasted pointer of type - struct dp_txrx_handle_cmn
386  */
387 static inline struct dp_txrx_handle_cmn *
dp_txrx_get_cmn_hdl_frm_ext_hdl(struct dp_txrx_handle * dp_ext_hdl)388 dp_txrx_get_cmn_hdl_frm_ext_hdl(struct dp_txrx_handle *dp_ext_hdl)
389 {
390 	return (struct dp_txrx_handle_cmn *)dp_ext_hdl;
391 }
392 
393 /**
394  * dp_txrx_get_ext_hdl_frm_cmn_hdl() - conversion func txrx_handle_cmn->ext_hdl
395  * @txrx_cmn_hdl: pointer to dp_txrx_handle_cmn structure
396  *
397  * Return: typecasted pointer of type - struct dp_txrx_handle
398  */
399 static inline struct dp_txrx_handle *
dp_txrx_get_ext_hdl_frm_cmn_hdl(struct dp_txrx_handle_cmn * txrx_cmn_hdl)400 dp_txrx_get_ext_hdl_frm_cmn_hdl(struct dp_txrx_handle_cmn *txrx_cmn_hdl)
401 {
402 	return (struct dp_txrx_handle *)txrx_cmn_hdl;
403 }
404 
405 static inline ol_txrx_soc_handle
dp_txrx_get_soc_from_ext_handle(struct dp_txrx_handle_cmn * txrx_cmn_hdl)406 dp_txrx_get_soc_from_ext_handle(struct dp_txrx_handle_cmn *txrx_cmn_hdl)
407 {
408 	struct dp_txrx_handle *dp_ext_hdl;
409 
410 	dp_ext_hdl = dp_txrx_get_ext_hdl_frm_cmn_hdl(txrx_cmn_hdl);
411 
412 	return dp_ext_hdl->soc;
413 }
414 
415 /**
416  * dp_txrx_init() - initialize DP TXRX module
417  * @soc: ol_txrx_soc_handle
418  * @pdev_id: id of dp pdev handle
419  * @config: configuration for DP TXRX modules
420  *
421  * Return: QDF_STATUS_SUCCESS on success, error qdf status on failure
422  */
423 QDF_STATUS dp_txrx_init(ol_txrx_soc_handle soc, uint8_t pdev_id,
424 			struct dp_txrx_config *config);
425 
426 /**
427  * dp_txrx_deinit() - de-initialize DP TXRX module
428  * @soc: ol_txrx_soc_handle
429  *
430  * Return: QDF_STATUS_SUCCESS on success, error qdf status on failure
431  */
432 QDF_STATUS dp_txrx_deinit(ol_txrx_soc_handle soc);
433 
434 /**
435  * dp_txrx_flush_pkts_by_vdev_id() - flush rx packets for a vdev_id
436  * @soc: ol_txrx_soc_handle object
437  * @vdev_id: vdev_id for which rx packets are to be flushed
438  *
439  * Return: QDF_STATUS_SUCCESS on success, error qdf status on failure
440  */
dp_txrx_flush_pkts_by_vdev_id(ol_txrx_soc_handle soc,uint8_t vdev_id)441 static inline QDF_STATUS dp_txrx_flush_pkts_by_vdev_id(ol_txrx_soc_handle soc,
442 						       uint8_t vdev_id)
443 {
444 	struct dp_txrx_handle *dp_ext_hdl;
445 	QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
446 
447 	if (!soc) {
448 		qdf_status = QDF_STATUS_E_INVAL;
449 		goto ret;
450 	}
451 
452 	dp_ext_hdl = cdp_soc_get_dp_txrx_handle(soc);
453 	if (!dp_ext_hdl) {
454 		qdf_status = QDF_STATUS_E_FAULT;
455 		goto ret;
456 	}
457 
458 	qdf_status = dp_rx_tm_flush_by_vdev_id(&dp_ext_hdl->rx_tm_hdl, vdev_id);
459 ret:
460 	return qdf_status;
461 }
462 
463 /**
464  * dp_txrx_resume() - resume all threads
465  * @soc: ol_txrx_soc_handle object
466  *
467  * Return: QDF_STATUS_SUCCESS on success, error qdf status on failure
468  */
dp_txrx_resume(ol_txrx_soc_handle soc)469 static inline QDF_STATUS dp_txrx_resume(ol_txrx_soc_handle soc)
470 {
471 	struct dp_txrx_handle *dp_ext_hdl;
472 	QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
473 	struct dp_rx_refill_thread *refill_thread;
474 
475 	if (!soc) {
476 		qdf_status = QDF_STATUS_E_INVAL;
477 		goto ret;
478 	}
479 
480 	dp_ext_hdl = cdp_soc_get_dp_txrx_handle(soc);
481 	if (!dp_ext_hdl) {
482 		qdf_status = QDF_STATUS_E_FAULT;
483 		goto ret;
484 	}
485 
486 	refill_thread = &dp_ext_hdl->refill_thread;
487 	if (refill_thread->enabled) {
488 		qdf_status = dp_rx_refill_thread_resume(refill_thread);
489 		if (qdf_status != QDF_STATUS_SUCCESS)
490 			return qdf_status;
491 	}
492 
493 	qdf_status = dp_rx_tm_resume(&dp_ext_hdl->rx_tm_hdl);
494 ret:
495 	return qdf_status;
496 }
497 
498 /**
499  * dp_txrx_suspend() - suspend all threads
500  * @soc: ol_txrx_soc_handle object
501  *
502  * Return: QDF_STATUS_SUCCESS on success, error qdf status on failure
503  */
dp_txrx_suspend(ol_txrx_soc_handle soc)504 static inline QDF_STATUS dp_txrx_suspend(ol_txrx_soc_handle soc)
505 {
506 	struct dp_txrx_handle *dp_ext_hdl;
507 	QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
508 	struct dp_rx_refill_thread *refill_thread;
509 
510 	if (!soc) {
511 		qdf_status = QDF_STATUS_E_INVAL;
512 		goto ret;
513 	}
514 
515 	dp_ext_hdl = cdp_soc_get_dp_txrx_handle(soc);
516 	if (!dp_ext_hdl) {
517 		qdf_status = QDF_STATUS_E_FAULT;
518 		goto ret;
519 	}
520 
521 	refill_thread = &dp_ext_hdl->refill_thread;
522 	if (refill_thread->enabled) {
523 		qdf_status = dp_rx_refill_thread_suspend(refill_thread);
524 		if (qdf_status != QDF_STATUS_SUCCESS)
525 			return qdf_status;
526 	}
527 
528 	qdf_status = dp_rx_tm_suspend(&dp_ext_hdl->rx_tm_hdl);
529 	if (QDF_IS_STATUS_ERROR(qdf_status) && refill_thread->enabled)
530 		dp_rx_refill_thread_resume(refill_thread);
531 
532 ret:
533 	return qdf_status;
534 }
535 
536 /**
537  * dp_rx_enqueue_pkt() - enqueue packet(s) into the thread
538  * @soc: ol_txrx_soc_handle object
539  * @nbuf_list: list of packets to be queued into the rx_thread
540  *
541  * The function accepts a list of skbs connected by the skb->next pointer and
542  * queues them into a RX thread to be sent to the stack.
543  *
544  * Return: QDF_STATUS_SUCCESS on success, error qdf status on failure
545  */
546 static inline
dp_rx_enqueue_pkt(ol_txrx_soc_handle soc,qdf_nbuf_t nbuf_list)547 QDF_STATUS dp_rx_enqueue_pkt(ol_txrx_soc_handle soc, qdf_nbuf_t nbuf_list)
548 {
549 	struct dp_txrx_handle *dp_ext_hdl;
550 	QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
551 
552 	if (!soc || !nbuf_list) {
553 		qdf_status = QDF_STATUS_E_INVAL;
554 		dp_err("invalid input params soc %pK nbuf %pK"
555 		       , soc, nbuf_list);
556 		goto ret;
557 	}
558 
559 	dp_ext_hdl = cdp_soc_get_dp_txrx_handle(soc);
560 	if (!dp_ext_hdl) {
561 		qdf_status = QDF_STATUS_E_FAULT;
562 		goto ret;
563 	}
564 
565 	qdf_status = dp_rx_tm_enqueue_pkt(&dp_ext_hdl->rx_tm_hdl, nbuf_list);
566 ret:
567 	return qdf_status;
568 }
569 
570 /**
571  * dp_rx_gro_flush_ind() - Flush GRO packets for a given RX CTX Id
572  * @soc: ol_txrx_soc_handle object
573  * @rx_ctx_id: Context Id (Thread for which GRO packets need to be flushed)
574  * @flush_code: flush_code differentiating normal_flush from low_tput_flush
575  *
576  * Return: QDF_STATUS_SUCCESS on success, error qdf status on failure
577  */
578 static inline
dp_rx_gro_flush_ind(ol_txrx_soc_handle soc,int rx_ctx_id,enum dp_rx_gro_flush_code flush_code)579 QDF_STATUS dp_rx_gro_flush_ind(ol_txrx_soc_handle soc, int rx_ctx_id,
580 			       enum dp_rx_gro_flush_code flush_code)
581 {
582 	struct dp_txrx_handle *dp_ext_hdl;
583 	QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
584 
585 	if (!soc) {
586 		qdf_status = QDF_STATUS_E_INVAL;
587 		dp_err("invalid input param soc %pK", soc);
588 		goto ret;
589 	}
590 
591 	dp_ext_hdl = cdp_soc_get_dp_txrx_handle(soc);
592 	if (!dp_ext_hdl) {
593 		qdf_status = QDF_STATUS_E_FAULT;
594 		goto ret;
595 	}
596 
597 	qdf_status = dp_rx_tm_gro_flush_ind(&dp_ext_hdl->rx_tm_hdl, rx_ctx_id,
598 					    flush_code);
599 ret:
600 	return qdf_status;
601 }
602 
603 /**
604  * dp_txrx_ext_dump_stats() - dump txrx external module stats
605  * @soc: ol_txrx_soc_handle object
606  * @stats_id: id  for the module whose stats are needed
607  *
608  * Return: QDF_STATUS_SUCCESS on success, error qdf status on failure
609  */
dp_txrx_ext_dump_stats(ol_txrx_soc_handle soc,uint8_t stats_id)610 static inline QDF_STATUS dp_txrx_ext_dump_stats(ol_txrx_soc_handle soc,
611 						uint8_t stats_id)
612 {
613 	struct dp_txrx_handle *dp_ext_hdl;
614 	QDF_STATUS qdf_status;
615 
616 	if (!soc) {
617 		dp_err("invalid input params soc %pK", soc);
618 		return QDF_STATUS_E_INVAL;
619 	}
620 
621 	dp_ext_hdl = cdp_soc_get_dp_txrx_handle(soc);
622 	if (!dp_ext_hdl)
623 		return QDF_STATUS_E_FAULT;
624 
625 	if (stats_id == CDP_DP_RX_THREAD_STATS)
626 		qdf_status = dp_rx_tm_dump_stats(&dp_ext_hdl->rx_tm_hdl);
627 	else
628 		qdf_status = QDF_STATUS_E_INVAL;
629 
630 	return qdf_status;
631 }
632 
633 /**
634  * dp_rx_get_napi_context() - get NAPI context for a RX CTX ID
635  * @soc: ol_txrx_soc_handle object
636  * @rx_ctx_id: RX context ID (RX thread ID) corresponding to which NAPI is
637  *             needed
638  *
639  * Return: NULL on failure, else pointer to NAPI corresponding to rx_ctx_id
640  */
641 static inline
dp_rx_get_napi_context(ol_txrx_soc_handle soc,uint8_t rx_ctx_id)642 qdf_napi_struct *dp_rx_get_napi_context(ol_txrx_soc_handle soc,
643 					uint8_t rx_ctx_id)
644 {
645 	struct dp_txrx_handle *dp_ext_hdl;
646 
647 	if (!soc) {
648 		dp_err("soc in NULL!");
649 		return NULL;
650 	}
651 
652 	dp_ext_hdl = cdp_soc_get_dp_txrx_handle(soc);
653 	if (!dp_ext_hdl) {
654 		dp_err("dp_ext_hdl in NULL!");
655 		return NULL;
656 	}
657 
658 	return dp_rx_tm_get_napi_context(&dp_ext_hdl->rx_tm_hdl, rx_ctx_id);
659 }
660 
661 /**
662  * dp_txrx_set_cpu_mask() - set CPU mask for RX threads
663  * @soc: ol_txrx_soc_handle object
664  * @new_mask: New CPU mask pointer
665  *
666  * Return: QDF_STATUS_SUCCESS on success, error qdf status on failure
667  */
668 static inline
dp_txrx_set_cpu_mask(ol_txrx_soc_handle soc,qdf_cpu_mask * new_mask)669 QDF_STATUS dp_txrx_set_cpu_mask(ol_txrx_soc_handle soc, qdf_cpu_mask *new_mask)
670 {
671 	struct dp_txrx_handle *dp_ext_hdl;
672 	QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
673 
674 	if (!soc) {
675 		qdf_status = QDF_STATUS_E_INVAL;
676 		goto ret;
677 	}
678 
679 	dp_ext_hdl = cdp_soc_get_dp_txrx_handle(soc);
680 	if (!dp_ext_hdl) {
681 		qdf_status = QDF_STATUS_E_FAULT;
682 		goto ret;
683 	}
684 
685 	qdf_status = dp_rx_tm_set_cpu_mask(&dp_ext_hdl->rx_tm_hdl, new_mask);
686 
687 ret:
688 	return qdf_status;
689 }
690 
691 #else
692 
693 static inline
dp_txrx_init(ol_txrx_soc_handle soc,uint8_t pdev_id,struct dp_txrx_config * config)694 QDF_STATUS dp_txrx_init(ol_txrx_soc_handle soc, uint8_t pdev_id,
695 			struct dp_txrx_config *config)
696 {
697 	return QDF_STATUS_SUCCESS;
698 }
699 
dp_txrx_deinit(ol_txrx_soc_handle soc)700 static inline QDF_STATUS dp_txrx_deinit(ol_txrx_soc_handle soc)
701 {
702 	return QDF_STATUS_SUCCESS;
703 }
704 
dp_txrx_flush_pkts_by_vdev_id(ol_txrx_soc_handle soc,uint8_t vdev_id)705 static inline QDF_STATUS dp_txrx_flush_pkts_by_vdev_id(ol_txrx_soc_handle soc,
706 						       uint8_t vdev_id)
707 {
708 	return QDF_STATUS_SUCCESS;
709 }
710 
dp_txrx_resume(ol_txrx_soc_handle soc)711 static inline QDF_STATUS dp_txrx_resume(ol_txrx_soc_handle soc)
712 {
713 	return QDF_STATUS_SUCCESS;
714 }
715 
dp_txrx_suspend(ol_txrx_soc_handle soc)716 static inline QDF_STATUS dp_txrx_suspend(ol_txrx_soc_handle soc)
717 {
718 	return QDF_STATUS_SUCCESS;
719 }
720 
721 static inline
dp_rx_enqueue_pkt(ol_txrx_soc_handle soc,qdf_nbuf_t nbuf_list)722 QDF_STATUS dp_rx_enqueue_pkt(ol_txrx_soc_handle soc, qdf_nbuf_t nbuf_list)
723 {
724 	return QDF_STATUS_SUCCESS;
725 }
726 
727 static inline
dp_rx_gro_flush_ind(ol_txrx_soc_handle soc,int rx_ctx_id,enum dp_rx_gro_flush_code flush_code)728 QDF_STATUS dp_rx_gro_flush_ind(ol_txrx_soc_handle soc, int rx_ctx_id,
729 			       enum dp_rx_gro_flush_code flush_code)
730 {
731 	return QDF_STATUS_SUCCESS;
732 }
733 
dp_txrx_ext_dump_stats(ol_txrx_soc_handle soc,uint8_t stats_id)734 static inline QDF_STATUS dp_txrx_ext_dump_stats(ol_txrx_soc_handle soc,
735 						uint8_t stats_id)
736 {
737 	return QDF_STATUS_SUCCESS;
738 }
739 
740 static inline
dp_rx_get_napi_context(ol_txrx_soc_handle soc,uint8_t rx_ctx_id)741 qdf_napi_struct *dp_rx_get_napi_context(ol_txrx_soc_handle soc,
742 					uint8_t rx_ctx_id)
743 {
744 	return NULL;
745 }
746 
747 static inline
dp_txrx_set_cpu_mask(ol_txrx_soc_handle soc,qdf_cpu_mask * new_mask)748 QDF_STATUS dp_txrx_set_cpu_mask(ol_txrx_soc_handle soc, qdf_cpu_mask *new_mask)
749 {
750 	return QDF_STATUS_SUCCESS;
751 }
752 
753 #endif /* FEATURE_WLAN_DP_RX_THREADS */
754 
755 /**
756  * dp_rx_tm_get_pending() - get number of frame in thread
757  * nbuf queue pending
758  * @soc: ol_txrx_soc_handle object
759  *
760  * Return: number of frames
761  */
762 int dp_rx_tm_get_pending(ol_txrx_soc_handle soc);
763 #endif /* __WLAN_DP_RX_THREAD_H */
764