xref: /wlan-driver/qca-wifi-host-cmn/hif/src/ce/ce_tasklet.c (revision 5113495b16420b49004c444715d2daae2066e7dc)
1*5113495bSYour Name /*
2*5113495bSYour Name  * Copyright (c) 2015-2021 The Linux Foundation. All rights reserved.
3*5113495bSYour Name  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4*5113495bSYour Name  *
5*5113495bSYour Name  * Permission to use, copy, modify, and/or distribute this software for
6*5113495bSYour Name  * any purpose with or without fee is hereby granted, provided that the
7*5113495bSYour Name  * above copyright notice and this permission notice appear in all
8*5113495bSYour Name  * copies.
9*5113495bSYour Name  *
10*5113495bSYour Name  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11*5113495bSYour Name  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12*5113495bSYour Name  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13*5113495bSYour Name  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14*5113495bSYour Name  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15*5113495bSYour Name  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16*5113495bSYour Name  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17*5113495bSYour Name  * PERFORMANCE OF THIS SOFTWARE.
18*5113495bSYour Name  */
19*5113495bSYour Name 
20*5113495bSYour Name #include <linux/pci.h>
21*5113495bSYour Name #include <linux/slab.h>
22*5113495bSYour Name #include <linux/interrupt.h>
23*5113495bSYour Name #include <linux/if_arp.h>
24*5113495bSYour Name #include "qdf_lock.h"
25*5113495bSYour Name #include "qdf_types.h"
26*5113495bSYour Name #include "qdf_status.h"
27*5113495bSYour Name #include "regtable.h"
28*5113495bSYour Name #include "hif.h"
29*5113495bSYour Name #include "hif_io32.h"
30*5113495bSYour Name #include "ce_main.h"
31*5113495bSYour Name #include "ce_api.h"
32*5113495bSYour Name #include "ce_reg.h"
33*5113495bSYour Name #include "ce_internal.h"
34*5113495bSYour Name #include "ce_tasklet.h"
35*5113495bSYour Name #include "pld_common.h"
36*5113495bSYour Name #include "hif_debug.h"
37*5113495bSYour Name #include "hif_napi.h"
38*5113495bSYour Name 
39*5113495bSYour Name /**
40*5113495bSYour Name  * struct tasklet_work
41*5113495bSYour Name  *
42*5113495bSYour Name  * @id: ce_id
43*5113495bSYour Name  * @data: data
44*5113495bSYour Name  * @reg_work: work
45*5113495bSYour Name  */
46*5113495bSYour Name struct tasklet_work {
47*5113495bSYour Name 	enum ce_id_type id;
48*5113495bSYour Name 	void *data;
49*5113495bSYour Name 	qdf_work_t reg_work;
50*5113495bSYour Name };
51*5113495bSYour Name 
52*5113495bSYour Name 
53*5113495bSYour Name /**
54*5113495bSYour Name  * ce_tasklet_schedule() - schedule CE tasklet
55*5113495bSYour Name  * @tasklet_entry: ce tasklet entry
56*5113495bSYour Name  *
57*5113495bSYour Name  * Return: None
58*5113495bSYour Name  */
ce_tasklet_schedule(struct ce_tasklet_entry * tasklet_entry)59*5113495bSYour Name static inline void ce_tasklet_schedule(struct ce_tasklet_entry *tasklet_entry)
60*5113495bSYour Name {
61*5113495bSYour Name 	if (tasklet_entry->hi_tasklet_ce)
62*5113495bSYour Name 		tasklet_hi_schedule(&tasklet_entry->intr_tq);
63*5113495bSYour Name 	else
64*5113495bSYour Name 		tasklet_schedule(&tasklet_entry->intr_tq);
65*5113495bSYour Name }
66*5113495bSYour Name 
67*5113495bSYour Name /**
68*5113495bSYour Name  * reschedule_ce_tasklet_work_handler() - reschedule work
69*5113495bSYour Name  * @work: struct work_struct
70*5113495bSYour Name  *
71*5113495bSYour Name  * Return: N/A
72*5113495bSYour Name  */
reschedule_ce_tasklet_work_handler(struct work_struct * work)73*5113495bSYour Name static void reschedule_ce_tasklet_work_handler(struct work_struct *work)
74*5113495bSYour Name {
75*5113495bSYour Name 	qdf_work_t *reg_work = qdf_container_of(work, qdf_work_t, work);
76*5113495bSYour Name 	struct tasklet_work *ce_work = qdf_container_of(reg_work,
77*5113495bSYour Name 							struct tasklet_work,
78*5113495bSYour Name 							reg_work);
79*5113495bSYour Name 	struct hif_softc *scn = ce_work->data;
80*5113495bSYour Name 	struct HIF_CE_state *hif_ce_state;
81*5113495bSYour Name 
82*5113495bSYour Name 	if (!scn) {
83*5113495bSYour Name 		hif_err("tasklet scn is null");
84*5113495bSYour Name 		return;
85*5113495bSYour Name 	}
86*5113495bSYour Name 
87*5113495bSYour Name 	hif_ce_state = HIF_GET_CE_STATE(scn);
88*5113495bSYour Name 
89*5113495bSYour Name 	if (scn->hif_init_done == false) {
90*5113495bSYour Name 		hif_err("wlan driver is unloaded");
91*5113495bSYour Name 		return;
92*5113495bSYour Name 	}
93*5113495bSYour Name 	if (hif_ce_state->tasklets[ce_work->id].inited)
94*5113495bSYour Name 		ce_tasklet_schedule(&hif_ce_state->tasklets[ce_work->id]);
95*5113495bSYour Name }
96*5113495bSYour Name 
97*5113495bSYour Name static struct tasklet_work tasklet_workers[CE_ID_MAX];
98*5113495bSYour Name 
99*5113495bSYour Name /**
100*5113495bSYour Name  * init_tasklet_work() - init_tasklet_work
101*5113495bSYour Name  * @work: struct work_struct
102*5113495bSYour Name  * @work_handler: work_handler
103*5113495bSYour Name  *
104*5113495bSYour Name  * Return: N/A
105*5113495bSYour Name  */
init_tasklet_work(struct work_struct * work,work_func_t work_handler)106*5113495bSYour Name static void init_tasklet_work(struct work_struct *work,
107*5113495bSYour Name 			      work_func_t work_handler)
108*5113495bSYour Name {
109*5113495bSYour Name 	INIT_WORK(work, work_handler);
110*5113495bSYour Name }
111*5113495bSYour Name 
112*5113495bSYour Name /**
113*5113495bSYour Name  * init_tasklet_worker_by_ceid() - init_tasklet_workers
114*5113495bSYour Name  * @scn: HIF Context
115*5113495bSYour Name  * @ce_id: copy engine ID
116*5113495bSYour Name  *
117*5113495bSYour Name  * Return: N/A
118*5113495bSYour Name  */
init_tasklet_worker_by_ceid(struct hif_opaque_softc * scn,int ce_id)119*5113495bSYour Name void init_tasklet_worker_by_ceid(struct hif_opaque_softc *scn, int ce_id)
120*5113495bSYour Name {
121*5113495bSYour Name 
122*5113495bSYour Name 	tasklet_workers[ce_id].id = ce_id;
123*5113495bSYour Name 	tasklet_workers[ce_id].data = scn;
124*5113495bSYour Name 	init_tasklet_work(&tasklet_workers[ce_id].reg_work.work,
125*5113495bSYour Name 			  reschedule_ce_tasklet_work_handler);
126*5113495bSYour Name }
127*5113495bSYour Name 
128*5113495bSYour Name /**
129*5113495bSYour Name  * deinit_tasklet_workers() - deinit_tasklet_workers
130*5113495bSYour Name  * @scn: HIF Context
131*5113495bSYour Name  *
132*5113495bSYour Name  * Return: N/A
133*5113495bSYour Name  */
deinit_tasklet_workers(struct hif_opaque_softc * scn)134*5113495bSYour Name void deinit_tasklet_workers(struct hif_opaque_softc *scn)
135*5113495bSYour Name {
136*5113495bSYour Name 	u32 id;
137*5113495bSYour Name 
138*5113495bSYour Name 	for (id = 0; id < CE_ID_MAX; id++)
139*5113495bSYour Name 		qdf_cancel_work(&tasklet_workers[id].reg_work);
140*5113495bSYour Name }
141*5113495bSYour Name 
142*5113495bSYour Name #ifdef CE_TASKLET_DEBUG_ENABLE
143*5113495bSYour Name /**
144*5113495bSYour Name  * hif_record_tasklet_exec_entry_ts() - Record ce tasklet execution
145*5113495bSYour Name  *                                      entry time
146*5113495bSYour Name  * @scn: hif_softc
147*5113495bSYour Name  * @ce_id: ce_id
148*5113495bSYour Name  *
149*5113495bSYour Name  * Return: None
150*5113495bSYour Name  */
151*5113495bSYour Name static inline void
hif_record_tasklet_exec_entry_ts(struct hif_softc * scn,uint8_t ce_id)152*5113495bSYour Name hif_record_tasklet_exec_entry_ts(struct hif_softc *scn, uint8_t ce_id)
153*5113495bSYour Name {
154*5113495bSYour Name 	struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(scn);
155*5113495bSYour Name 
156*5113495bSYour Name 	hif_ce_state->stats.tasklet_exec_entry_ts[ce_id] =
157*5113495bSYour Name 					qdf_get_log_timestamp_usecs();
158*5113495bSYour Name }
159*5113495bSYour Name 
160*5113495bSYour Name /**
161*5113495bSYour Name  * hif_record_tasklet_sched_entry_ts() - Record ce tasklet scheduled
162*5113495bSYour Name  *                                       entry time
163*5113495bSYour Name  * @scn: hif_softc
164*5113495bSYour Name  * @ce_id: ce_id
165*5113495bSYour Name  *
166*5113495bSYour Name  * Return: None
167*5113495bSYour Name  */
168*5113495bSYour Name static inline void
hif_record_tasklet_sched_entry_ts(struct hif_softc * scn,uint8_t ce_id)169*5113495bSYour Name hif_record_tasklet_sched_entry_ts(struct hif_softc *scn, uint8_t ce_id)
170*5113495bSYour Name {
171*5113495bSYour Name 	struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(scn);
172*5113495bSYour Name 
173*5113495bSYour Name 	hif_ce_state->stats.tasklet_sched_entry_ts[ce_id] =
174*5113495bSYour Name 					qdf_get_log_timestamp_usecs();
175*5113495bSYour Name }
176*5113495bSYour Name 
177*5113495bSYour Name /**
178*5113495bSYour Name  * hif_ce_latency_stats() - Display ce latency information
179*5113495bSYour Name  * @hif_ctx: hif_softc struct
180*5113495bSYour Name  *
181*5113495bSYour Name  * Return: None
182*5113495bSYour Name  */
183*5113495bSYour Name static void
hif_ce_latency_stats(struct hif_softc * hif_ctx)184*5113495bSYour Name hif_ce_latency_stats(struct hif_softc *hif_ctx)
185*5113495bSYour Name {
186*5113495bSYour Name 	uint8_t i, j;
187*5113495bSYour Name 	uint32_t index, start_index;
188*5113495bSYour Name 	uint64_t secs, usecs;
189*5113495bSYour Name 	static const char * const buck_str[] = {"0 - 0.5", "0.5 - 1", "1  -  2",
190*5113495bSYour Name 					       "2  -  5", "5  - 10", "  >  10"};
191*5113495bSYour Name 	struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(hif_ctx);
192*5113495bSYour Name 	struct ce_stats *stats = &hif_ce_state->stats;
193*5113495bSYour Name 
194*5113495bSYour Name 	hif_err("\tCE TASKLET ARRIVAL AND EXECUTION STATS");
195*5113495bSYour Name 	for (i = 0; i < CE_COUNT_MAX; i++) {
196*5113495bSYour Name 		hif_nofl_err("\n\t\tCE Ring %d Tasklet Execution Bucket", i);
197*5113495bSYour Name 		for (j = 0; j < CE_BUCKET_MAX; j++) {
198*5113495bSYour Name 			qdf_log_timestamp_to_secs(
199*5113495bSYour Name 				       stats->ce_tasklet_exec_last_update[i][j],
200*5113495bSYour Name 				       &secs, &usecs);
201*5113495bSYour Name 			hif_nofl_err("\t Bucket %sms :%llu\t last update:% 8lld.%06lld",
202*5113495bSYour Name 				     buck_str[j],
203*5113495bSYour Name 				     stats->ce_tasklet_exec_bucket[i][j],
204*5113495bSYour Name 				     secs, usecs);
205*5113495bSYour Name 		}
206*5113495bSYour Name 
207*5113495bSYour Name 		hif_nofl_err("\n\t\tCE Ring %d Tasklet Scheduled Bucket", i);
208*5113495bSYour Name 		for (j = 0; j < CE_BUCKET_MAX; j++) {
209*5113495bSYour Name 			qdf_log_timestamp_to_secs(
210*5113495bSYour Name 				      stats->ce_tasklet_sched_last_update[i][j],
211*5113495bSYour Name 				      &secs, &usecs);
212*5113495bSYour Name 			hif_nofl_err("\t Bucket %sms :%llu\t last update :% 8lld.%06lld",
213*5113495bSYour Name 				     buck_str[j],
214*5113495bSYour Name 				     stats->ce_tasklet_sched_bucket[i][j],
215*5113495bSYour Name 				     secs, usecs);
216*5113495bSYour Name 		}
217*5113495bSYour Name 
218*5113495bSYour Name 		hif_nofl_err("\n\t\t CE RING %d Last %d time records",
219*5113495bSYour Name 			     i, HIF_REQUESTED_EVENTS);
220*5113495bSYour Name 		index = stats->record_index[i];
221*5113495bSYour Name 		start_index = stats->record_index[i];
222*5113495bSYour Name 
223*5113495bSYour Name 		for (j = 0; j < HIF_REQUESTED_EVENTS; j++) {
224*5113495bSYour Name 			hif_nofl_err("\tExecution time: %lluus Total Scheduled time: %lluus",
225*5113495bSYour Name 				     stats->tasklet_exec_time_record[i][index],
226*5113495bSYour Name 				     stats->
227*5113495bSYour Name 					   tasklet_sched_time_record[i][index]);
228*5113495bSYour Name 			if (index)
229*5113495bSYour Name 				index = (index - 1) % HIF_REQUESTED_EVENTS;
230*5113495bSYour Name 			else
231*5113495bSYour Name 				index = HIF_REQUESTED_EVENTS - 1;
232*5113495bSYour Name 			if (index == start_index)
233*5113495bSYour Name 				break;
234*5113495bSYour Name 		}
235*5113495bSYour Name 	}
236*5113495bSYour Name }
237*5113495bSYour Name 
238*5113495bSYour Name /**
239*5113495bSYour Name  * ce_tasklet_update_bucket() - update ce execution and scehduled time latency
240*5113495bSYour Name  *                              in corresponding time buckets
241*5113495bSYour Name  * @hif_ce_state: HIF CE state
242*5113495bSYour Name  * @ce_id: ce_id_type
243*5113495bSYour Name  *
244*5113495bSYour Name  * Return: N/A
245*5113495bSYour Name  */
ce_tasklet_update_bucket(struct HIF_CE_state * hif_ce_state,uint8_t ce_id)246*5113495bSYour Name static void ce_tasklet_update_bucket(struct HIF_CE_state *hif_ce_state,
247*5113495bSYour Name 				     uint8_t ce_id)
248*5113495bSYour Name {
249*5113495bSYour Name 	uint32_t index;
250*5113495bSYour Name 	uint64_t exec_time, exec_ms;
251*5113495bSYour Name 	uint64_t sched_time, sched_ms;
252*5113495bSYour Name 	uint64_t curr_time = qdf_get_log_timestamp_usecs();
253*5113495bSYour Name 	struct ce_stats *stats = &hif_ce_state->stats;
254*5113495bSYour Name 
255*5113495bSYour Name 	exec_time = curr_time - (stats->tasklet_exec_entry_ts[ce_id]);
256*5113495bSYour Name 	sched_time = (stats->tasklet_exec_entry_ts[ce_id]) -
257*5113495bSYour Name 		      (stats->tasklet_sched_entry_ts[ce_id]);
258*5113495bSYour Name 
259*5113495bSYour Name 	index = stats->record_index[ce_id];
260*5113495bSYour Name 	index = (index + 1) % HIF_REQUESTED_EVENTS;
261*5113495bSYour Name 
262*5113495bSYour Name 	stats->tasklet_exec_time_record[ce_id][index] = exec_time;
263*5113495bSYour Name 	stats->tasklet_sched_time_record[ce_id][index] = sched_time;
264*5113495bSYour Name 	stats->record_index[ce_id] = index;
265*5113495bSYour Name 
266*5113495bSYour Name 	exec_ms = qdf_do_div(exec_time, 1000);
267*5113495bSYour Name 	sched_ms = qdf_do_div(sched_time, 1000);
268*5113495bSYour Name 
269*5113495bSYour Name 	if (exec_ms > 10) {
270*5113495bSYour Name 		stats->ce_tasklet_exec_bucket[ce_id][CE_BUCKET_BEYOND]++;
271*5113495bSYour Name 		stats->ce_tasklet_exec_last_update[ce_id][CE_BUCKET_BEYOND]
272*5113495bSYour Name 								= curr_time;
273*5113495bSYour Name 	} else if (exec_ms > 5) {
274*5113495bSYour Name 		stats->ce_tasklet_exec_bucket[ce_id][CE_BUCKET_10_MS]++;
275*5113495bSYour Name 		stats->ce_tasklet_exec_last_update[ce_id][CE_BUCKET_10_MS]
276*5113495bSYour Name 								= curr_time;
277*5113495bSYour Name 	} else if (exec_ms > 2) {
278*5113495bSYour Name 		stats->ce_tasklet_exec_bucket[ce_id][CE_BUCKET_5_MS]++;
279*5113495bSYour Name 		stats->ce_tasklet_exec_last_update[ce_id][CE_BUCKET_5_MS]
280*5113495bSYour Name 								= curr_time;
281*5113495bSYour Name 	} else if (exec_ms > 1) {
282*5113495bSYour Name 		stats->ce_tasklet_exec_bucket[ce_id][CE_BUCKET_2_MS]++;
283*5113495bSYour Name 		stats->ce_tasklet_exec_last_update[ce_id][CE_BUCKET_2_MS]
284*5113495bSYour Name 								= curr_time;
285*5113495bSYour Name 	} else if (exec_time > 500) {
286*5113495bSYour Name 		stats->ce_tasklet_exec_bucket[ce_id][CE_BUCKET_1_MS]++;
287*5113495bSYour Name 		stats->ce_tasklet_exec_last_update[ce_id][CE_BUCKET_1_MS]
288*5113495bSYour Name 								= curr_time;
289*5113495bSYour Name 	} else {
290*5113495bSYour Name 		stats->ce_tasklet_exec_bucket[ce_id][CE_BUCKET_500_US]++;
291*5113495bSYour Name 		stats->ce_tasklet_exec_last_update[ce_id][CE_BUCKET_500_US]
292*5113495bSYour Name 								= curr_time;
293*5113495bSYour Name 	}
294*5113495bSYour Name 
295*5113495bSYour Name 	if (sched_ms > 10) {
296*5113495bSYour Name 		stats->ce_tasklet_sched_bucket[ce_id][CE_BUCKET_BEYOND]++;
297*5113495bSYour Name 		stats->ce_tasklet_sched_last_update[ce_id][CE_BUCKET_BEYOND]
298*5113495bSYour Name 								= curr_time;
299*5113495bSYour Name 	} else if (sched_ms > 5) {
300*5113495bSYour Name 		stats->ce_tasklet_sched_bucket[ce_id][CE_BUCKET_10_MS]++;
301*5113495bSYour Name 		stats->ce_tasklet_sched_last_update[ce_id][CE_BUCKET_10_MS]
302*5113495bSYour Name 								= curr_time;
303*5113495bSYour Name 	} else if (sched_ms > 2) {
304*5113495bSYour Name 		stats->ce_tasklet_sched_bucket[ce_id][CE_BUCKET_5_MS]++;
305*5113495bSYour Name 		stats->ce_tasklet_sched_last_update[ce_id][CE_BUCKET_5_MS]
306*5113495bSYour Name 								= curr_time;
307*5113495bSYour Name 	} else if (sched_ms > 1) {
308*5113495bSYour Name 		stats->ce_tasklet_sched_bucket[ce_id][CE_BUCKET_2_MS]++;
309*5113495bSYour Name 		stats->ce_tasklet_sched_last_update[ce_id][CE_BUCKET_2_MS]
310*5113495bSYour Name 								= curr_time;
311*5113495bSYour Name 	} else if (sched_time > 500) {
312*5113495bSYour Name 		stats->ce_tasklet_sched_bucket[ce_id][CE_BUCKET_1_MS]++;
313*5113495bSYour Name 		stats->ce_tasklet_sched_last_update[ce_id][CE_BUCKET_1_MS]
314*5113495bSYour Name 								= curr_time;
315*5113495bSYour Name 	} else {
316*5113495bSYour Name 		stats->ce_tasklet_sched_bucket[ce_id][CE_BUCKET_500_US]++;
317*5113495bSYour Name 		stats->ce_tasklet_sched_last_update[ce_id][CE_BUCKET_500_US]
318*5113495bSYour Name 								= curr_time;
319*5113495bSYour Name 	}
320*5113495bSYour Name }
321*5113495bSYour Name #else
322*5113495bSYour Name static inline void
hif_record_tasklet_exec_entry_ts(struct hif_softc * scn,uint8_t ce_id)323*5113495bSYour Name hif_record_tasklet_exec_entry_ts(struct hif_softc *scn, uint8_t ce_id)
324*5113495bSYour Name {
325*5113495bSYour Name }
326*5113495bSYour Name 
ce_tasklet_update_bucket(struct HIF_CE_state * hif_ce_state,uint8_t ce_id)327*5113495bSYour Name static void ce_tasklet_update_bucket(struct HIF_CE_state *hif_ce_state,
328*5113495bSYour Name 				     uint8_t ce_id)
329*5113495bSYour Name {
330*5113495bSYour Name }
331*5113495bSYour Name 
332*5113495bSYour Name static inline void
hif_record_tasklet_sched_entry_ts(struct hif_softc * scn,uint8_t ce_id)333*5113495bSYour Name hif_record_tasklet_sched_entry_ts(struct hif_softc *scn, uint8_t ce_id)
334*5113495bSYour Name {
335*5113495bSYour Name }
336*5113495bSYour Name 
337*5113495bSYour Name static void
hif_ce_latency_stats(struct hif_softc * hif_ctx)338*5113495bSYour Name hif_ce_latency_stats(struct hif_softc *hif_ctx)
339*5113495bSYour Name {
340*5113495bSYour Name }
341*5113495bSYour Name #endif /*CE_TASKLET_DEBUG_ENABLE*/
342*5113495bSYour Name 
343*5113495bSYour Name #if defined(CE_TASKLET_DEBUG_ENABLE) && defined(CE_TASKLET_SCHEDULE_ON_FULL)
344*5113495bSYour Name /**
345*5113495bSYour Name  * hif_reset_ce_full_count() - Reset ce full count
346*5113495bSYour Name  * @scn: hif_softc
347*5113495bSYour Name  * @ce_id: ce_id
348*5113495bSYour Name  *
349*5113495bSYour Name  * Return: None
350*5113495bSYour Name  */
351*5113495bSYour Name static inline void
hif_reset_ce_full_count(struct hif_softc * scn,uint8_t ce_id)352*5113495bSYour Name hif_reset_ce_full_count(struct hif_softc *scn, uint8_t ce_id)
353*5113495bSYour Name {
354*5113495bSYour Name 	struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(scn);
355*5113495bSYour Name 
356*5113495bSYour Name 	hif_ce_state->stats.ce_ring_full_count[ce_id] = 0;
357*5113495bSYour Name }
358*5113495bSYour Name #else
359*5113495bSYour Name static inline void
hif_reset_ce_full_count(struct hif_softc * scn,uint8_t ce_id)360*5113495bSYour Name hif_reset_ce_full_count(struct hif_softc *scn, uint8_t ce_id)
361*5113495bSYour Name {
362*5113495bSYour Name }
363*5113495bSYour Name #endif
364*5113495bSYour Name 
365*5113495bSYour Name #ifdef CUSTOM_CB_SCHEDULER_SUPPORT
366*5113495bSYour Name /**
367*5113495bSYour Name  * ce_get_custom_cb_pending() - Helper API to check whether the custom
368*5113495bSYour Name  * callback is pending
369*5113495bSYour Name  * @CE_state: Pointer to CE state
370*5113495bSYour Name  *
371*5113495bSYour Name  * return: bool
372*5113495bSYour Name  */
373*5113495bSYour Name static bool
ce_get_custom_cb_pending(struct CE_state * CE_state)374*5113495bSYour Name ce_get_custom_cb_pending(struct CE_state *CE_state)
375*5113495bSYour Name {
376*5113495bSYour Name 	return (qdf_atomic_dec_if_positive(&CE_state->custom_cb_pending) >= 0);
377*5113495bSYour Name }
378*5113495bSYour Name 
379*5113495bSYour Name /**
380*5113495bSYour Name  * ce_execute_custom_cb() - Helper API to execute custom callback
381*5113495bSYour Name  * @CE_state: Pointer to CE state
382*5113495bSYour Name  *
383*5113495bSYour Name  * return: void
384*5113495bSYour Name  */
385*5113495bSYour Name static void
ce_execute_custom_cb(struct CE_state * CE_state)386*5113495bSYour Name ce_execute_custom_cb(struct CE_state *CE_state)
387*5113495bSYour Name {
388*5113495bSYour Name 	while (ce_get_custom_cb_pending(CE_state) && CE_state->custom_cb &&
389*5113495bSYour Name 	       CE_state->custom_cb_context)
390*5113495bSYour Name 		CE_state->custom_cb(CE_state->custom_cb_context);
391*5113495bSYour Name }
392*5113495bSYour Name #else
393*5113495bSYour Name /**
394*5113495bSYour Name  * ce_execute_custom_cb() - Helper API to execute custom callback
395*5113495bSYour Name  * @CE_state: Pointer to CE state
396*5113495bSYour Name  *
397*5113495bSYour Name  * return: void
398*5113495bSYour Name  */
399*5113495bSYour Name static void
ce_execute_custom_cb(struct CE_state * CE_state)400*5113495bSYour Name ce_execute_custom_cb(struct CE_state *CE_state)
401*5113495bSYour Name {
402*5113495bSYour Name }
403*5113495bSYour Name #endif /* CUSTOM_CB_SCHEDULER_SUPPORT */
404*5113495bSYour Name 
405*5113495bSYour Name /**
406*5113495bSYour Name  * ce_tasklet() - ce_tasklet
407*5113495bSYour Name  * @data: data
408*5113495bSYour Name  *
409*5113495bSYour Name  * Return: N/A
410*5113495bSYour Name  */
ce_tasklet(unsigned long data)411*5113495bSYour Name static void ce_tasklet(unsigned long data)
412*5113495bSYour Name {
413*5113495bSYour Name 	struct ce_tasklet_entry *tasklet_entry =
414*5113495bSYour Name 		(struct ce_tasklet_entry *)data;
415*5113495bSYour Name 	struct HIF_CE_state *hif_ce_state = tasklet_entry->hif_ce_state;
416*5113495bSYour Name 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state);
417*5113495bSYour Name 	struct CE_state *CE_state = scn->ce_id_to_state[tasklet_entry->ce_id];
418*5113495bSYour Name 
419*5113495bSYour Name 	hif_record_ce_desc_event(scn, tasklet_entry->ce_id,
420*5113495bSYour Name 				 HIF_CE_TASKLET_ENTRY, NULL, NULL, -1, 0);
421*5113495bSYour Name 
422*5113495bSYour Name 	if (scn->ce_latency_stats)
423*5113495bSYour Name 		hif_record_tasklet_exec_entry_ts(scn, tasklet_entry->ce_id);
424*5113495bSYour Name 
425*5113495bSYour Name 	hif_tasklet_latency_record_exec(scn, tasklet_entry->ce_id);
426*5113495bSYour Name 
427*5113495bSYour Name 	if (qdf_atomic_read(&scn->link_suspended)) {
428*5113495bSYour Name 		hif_err("ce %d tasklet fired after link suspend",
429*5113495bSYour Name 			tasklet_entry->ce_id);
430*5113495bSYour Name 		QDF_BUG(0);
431*5113495bSYour Name 	}
432*5113495bSYour Name 
433*5113495bSYour Name 	ce_execute_custom_cb(CE_state);
434*5113495bSYour Name 
435*5113495bSYour Name 	ce_per_engine_service(scn, tasklet_entry->ce_id);
436*5113495bSYour Name 
437*5113495bSYour Name 	if (ce_check_rx_pending(CE_state) && tasklet_entry->inited) {
438*5113495bSYour Name 		/*
439*5113495bSYour Name 		 * There are frames pending, schedule tasklet to process them.
440*5113495bSYour Name 		 * Enable the interrupt only when there is no pending frames in
441*5113495bSYour Name 		 * any of the Copy Engine pipes.
442*5113495bSYour Name 		 */
443*5113495bSYour Name 		if (test_bit(TASKLET_STATE_SCHED,
444*5113495bSYour Name 			     &tasklet_entry->intr_tq.state)) {
445*5113495bSYour Name 			hif_info("ce_id%d tasklet was scheduled, return",
446*5113495bSYour Name 				 tasklet_entry->ce_id);
447*5113495bSYour Name 			qdf_atomic_dec(&scn->active_tasklet_cnt);
448*5113495bSYour Name 			return;
449*5113495bSYour Name 		}
450*5113495bSYour Name 
451*5113495bSYour Name 		hif_record_ce_desc_event(scn, tasklet_entry->ce_id,
452*5113495bSYour Name 					 HIF_CE_TASKLET_RESCHEDULE,
453*5113495bSYour Name 					 NULL, NULL, -1, 0);
454*5113495bSYour Name 
455*5113495bSYour Name 		ce_tasklet_schedule(tasklet_entry);
456*5113495bSYour Name 		hif_tasklet_latency_record_sched(scn, tasklet_entry->ce_id);
457*5113495bSYour Name 
458*5113495bSYour Name 		hif_reset_ce_full_count(scn, tasklet_entry->ce_id);
459*5113495bSYour Name 		if (scn->ce_latency_stats) {
460*5113495bSYour Name 			ce_tasklet_update_bucket(hif_ce_state,
461*5113495bSYour Name 						 tasklet_entry->ce_id);
462*5113495bSYour Name 			hif_record_tasklet_sched_entry_ts(scn,
463*5113495bSYour Name 							  tasklet_entry->ce_id);
464*5113495bSYour Name 		}
465*5113495bSYour Name 		return;
466*5113495bSYour Name 	}
467*5113495bSYour Name 
468*5113495bSYour Name 	hif_record_ce_desc_event(scn, tasklet_entry->ce_id, HIF_CE_TASKLET_EXIT,
469*5113495bSYour Name 				NULL, NULL, -1, 0);
470*5113495bSYour Name 
471*5113495bSYour Name 	if (scn->ce_latency_stats)
472*5113495bSYour Name 		ce_tasklet_update_bucket(hif_ce_state, tasklet_entry->ce_id);
473*5113495bSYour Name 
474*5113495bSYour Name 	if ((scn->target_status != TARGET_STATUS_RESET) &&
475*5113495bSYour Name 	    !scn->free_irq_done)
476*5113495bSYour Name 		hif_irq_enable(scn, tasklet_entry->ce_id);
477*5113495bSYour Name 
478*5113495bSYour Name 	qdf_atomic_dec(&scn->active_tasklet_cnt);
479*5113495bSYour Name }
480*5113495bSYour Name 
481*5113495bSYour Name /**
482*5113495bSYour Name  * ce_tasklet_init() - ce_tasklet_init
483*5113495bSYour Name  * @hif_ce_state: hif_ce_state
484*5113495bSYour Name  * @mask: mask
485*5113495bSYour Name  *
486*5113495bSYour Name  * Return: N/A
487*5113495bSYour Name  */
ce_tasklet_init(struct HIF_CE_state * hif_ce_state,uint32_t mask)488*5113495bSYour Name void ce_tasklet_init(struct HIF_CE_state *hif_ce_state, uint32_t mask)
489*5113495bSYour Name {
490*5113495bSYour Name 	int i;
491*5113495bSYour Name 	struct CE_attr *attr;
492*5113495bSYour Name 
493*5113495bSYour Name 	for (i = 0; i < CE_COUNT_MAX; i++) {
494*5113495bSYour Name 		if (mask & (1 << i)) {
495*5113495bSYour Name 			hif_ce_state->tasklets[i].ce_id = i;
496*5113495bSYour Name 			hif_ce_state->tasklets[i].inited = true;
497*5113495bSYour Name 			hif_ce_state->tasklets[i].hif_ce_state = hif_ce_state;
498*5113495bSYour Name 
499*5113495bSYour Name 			attr = &hif_ce_state->host_ce_config[i];
500*5113495bSYour Name 			if (attr->flags & CE_ATTR_HI_TASKLET)
501*5113495bSYour Name 				hif_ce_state->tasklets[i].hi_tasklet_ce = true;
502*5113495bSYour Name 			else
503*5113495bSYour Name 				hif_ce_state->tasklets[i].hi_tasklet_ce = false;
504*5113495bSYour Name 
505*5113495bSYour Name 			tasklet_init(&hif_ce_state->tasklets[i].intr_tq,
506*5113495bSYour Name 				ce_tasklet,
507*5113495bSYour Name 				(unsigned long)&hif_ce_state->tasklets[i]);
508*5113495bSYour Name 		}
509*5113495bSYour Name 	}
510*5113495bSYour Name }
511*5113495bSYour Name /**
512*5113495bSYour Name  * ce_tasklet_kill() - ce_tasklet_kill
513*5113495bSYour Name  * @scn: HIF context
514*5113495bSYour Name  *
515*5113495bSYour Name  * Context: Non-Atomic context
516*5113495bSYour Name  * Return: N/A
517*5113495bSYour Name  */
ce_tasklet_kill(struct hif_softc * scn)518*5113495bSYour Name void ce_tasklet_kill(struct hif_softc *scn)
519*5113495bSYour Name {
520*5113495bSYour Name 	int i;
521*5113495bSYour Name 	struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(scn);
522*5113495bSYour Name 
523*5113495bSYour Name 	for (i = 0; i < CE_COUNT_MAX; i++) {
524*5113495bSYour Name 		if (hif_ce_state->tasklets[i].inited) {
525*5113495bSYour Name 			hif_ce_state->tasklets[i].inited = false;
526*5113495bSYour Name 			/*
527*5113495bSYour Name 			 * Cancel the tasklet work before tasklet_disable
528*5113495bSYour Name 			 * to avoid race between tasklet_schedule and
529*5113495bSYour Name 			 * tasklet_kill. Here cancel_work_sync() won't
530*5113495bSYour Name 			 * return before reschedule_ce_tasklet_work_handler()
531*5113495bSYour Name 			 * completes. Even if tasklet_schedule() happens
532*5113495bSYour Name 			 * tasklet_disable() will take care of that.
533*5113495bSYour Name 			 */
534*5113495bSYour Name 			qdf_cancel_work(&tasklet_workers[i].reg_work);
535*5113495bSYour Name 			tasklet_kill(&hif_ce_state->tasklets[i].intr_tq);
536*5113495bSYour Name 		}
537*5113495bSYour Name 	}
538*5113495bSYour Name 	qdf_atomic_set(&scn->active_tasklet_cnt, 0);
539*5113495bSYour Name }
540*5113495bSYour Name 
541*5113495bSYour Name /**
542*5113495bSYour Name  * ce_tasklet_entry_dump() - dump tasklet entries info
543*5113495bSYour Name  * @hif_ce_state: ce state
544*5113495bSYour Name  *
545*5113495bSYour Name  * This function will dump all tasklet entries info
546*5113495bSYour Name  *
547*5113495bSYour Name  * Return: None
548*5113495bSYour Name  */
ce_tasklet_entry_dump(struct HIF_CE_state * hif_ce_state)549*5113495bSYour Name static void ce_tasklet_entry_dump(struct HIF_CE_state *hif_ce_state)
550*5113495bSYour Name {
551*5113495bSYour Name 	struct ce_tasklet_entry *tasklet_entry;
552*5113495bSYour Name 	int i;
553*5113495bSYour Name 
554*5113495bSYour Name 	if (hif_ce_state) {
555*5113495bSYour Name 		for (i = 0; i < CE_COUNT_MAX; i++) {
556*5113495bSYour Name 			tasklet_entry = &hif_ce_state->tasklets[i];
557*5113495bSYour Name 
558*5113495bSYour Name 			hif_info("%02d: ce_id=%d, inited=%d, hi_tasklet_ce=%d hif_ce_state=%pK",
559*5113495bSYour Name 				 i,
560*5113495bSYour Name 				 tasklet_entry->ce_id,
561*5113495bSYour Name 				 tasklet_entry->inited,
562*5113495bSYour Name 				 tasklet_entry->hi_tasklet_ce,
563*5113495bSYour Name 				 tasklet_entry->hif_ce_state);
564*5113495bSYour Name 		}
565*5113495bSYour Name 	}
566*5113495bSYour Name }
567*5113495bSYour Name 
568*5113495bSYour Name #define HIF_CE_DRAIN_WAIT_CNT          20
569*5113495bSYour Name /**
570*5113495bSYour Name  * hif_drain_tasklets(): wait until no tasklet is pending
571*5113495bSYour Name  * @scn: hif context
572*5113495bSYour Name  *
573*5113495bSYour Name  * Let running tasklets clear pending traffic.
574*5113495bSYour Name  *
575*5113495bSYour Name  * Return: 0 if no bottom half is in progress when it returns.
576*5113495bSYour Name  *   -EFAULT if it times out.
577*5113495bSYour Name  */
hif_drain_tasklets(struct hif_softc * scn)578*5113495bSYour Name int hif_drain_tasklets(struct hif_softc *scn)
579*5113495bSYour Name {
580*5113495bSYour Name 	uint32_t ce_drain_wait_cnt = 0;
581*5113495bSYour Name 	int32_t tasklet_cnt;
582*5113495bSYour Name 
583*5113495bSYour Name 	while ((tasklet_cnt = qdf_atomic_read(&scn->active_tasklet_cnt))) {
584*5113495bSYour Name 		if (++ce_drain_wait_cnt > HIF_CE_DRAIN_WAIT_CNT) {
585*5113495bSYour Name 			hif_err("CE still not done with access: %d",
586*5113495bSYour Name 				tasklet_cnt);
587*5113495bSYour Name 
588*5113495bSYour Name 			return -EFAULT;
589*5113495bSYour Name 		}
590*5113495bSYour Name 		hif_info("Waiting for CE to finish access");
591*5113495bSYour Name 		msleep(10);
592*5113495bSYour Name 	}
593*5113495bSYour Name 	return 0;
594*5113495bSYour Name }
595*5113495bSYour Name 
596*5113495bSYour Name #ifdef WLAN_SUSPEND_RESUME_TEST
597*5113495bSYour Name /**
598*5113495bSYour Name  * hif_interrupt_is_ut_resume(): Tests if an irq on the given copy engine should
599*5113495bSYour Name  *	trigger a unit-test resume.
600*5113495bSYour Name  * @scn: The HIF context to operate on
601*5113495bSYour Name  * @ce_id: The copy engine Id from the originating interrupt
602*5113495bSYour Name  *
603*5113495bSYour Name  * Return: true if the raised irq should trigger a unit-test resume
604*5113495bSYour Name  */
hif_interrupt_is_ut_resume(struct hif_softc * scn,int ce_id)605*5113495bSYour Name static bool hif_interrupt_is_ut_resume(struct hif_softc *scn, int ce_id)
606*5113495bSYour Name {
607*5113495bSYour Name 	int errno;
608*5113495bSYour Name 	uint8_t wake_ce_id;
609*5113495bSYour Name 
610*5113495bSYour Name 	if (!hif_is_ut_suspended(scn))
611*5113495bSYour Name 		return false;
612*5113495bSYour Name 
613*5113495bSYour Name 	/* ensure passed ce_id matches wake ce_id */
614*5113495bSYour Name 	errno = hif_get_wake_ce_id(scn, &wake_ce_id);
615*5113495bSYour Name 	if (errno) {
616*5113495bSYour Name 		hif_err("Failed to get wake CE Id: %d", errno);
617*5113495bSYour Name 		return false;
618*5113495bSYour Name 	}
619*5113495bSYour Name 
620*5113495bSYour Name 	return ce_id == wake_ce_id;
621*5113495bSYour Name }
622*5113495bSYour Name #else
623*5113495bSYour Name static inline bool
hif_interrupt_is_ut_resume(struct hif_softc * scn,int ce_id)624*5113495bSYour Name hif_interrupt_is_ut_resume(struct hif_softc *scn, int ce_id)
625*5113495bSYour Name {
626*5113495bSYour Name 	return false;
627*5113495bSYour Name }
628*5113495bSYour Name #endif /* WLAN_SUSPEND_RESUME_TEST */
629*5113495bSYour Name 
630*5113495bSYour Name /**
631*5113495bSYour Name  * hif_snoc_interrupt_handler() - hif_snoc_interrupt_handler
632*5113495bSYour Name  * @irq: irq coming from kernel
633*5113495bSYour Name  * @context: context
634*5113495bSYour Name  *
635*5113495bSYour Name  * Return: N/A
636*5113495bSYour Name  */
hif_snoc_interrupt_handler(int irq,void * context)637*5113495bSYour Name static irqreturn_t hif_snoc_interrupt_handler(int irq, void *context)
638*5113495bSYour Name {
639*5113495bSYour Name 	struct ce_tasklet_entry *tasklet_entry = context;
640*5113495bSYour Name 	struct hif_softc *scn = HIF_GET_SOFTC(tasklet_entry->hif_ce_state);
641*5113495bSYour Name 
642*5113495bSYour Name 	return ce_dispatch_interrupt(pld_get_ce_id(scn->qdf_dev->dev, irq),
643*5113495bSYour Name 				     tasklet_entry);
644*5113495bSYour Name }
645*5113495bSYour Name 
646*5113495bSYour Name /**
647*5113495bSYour Name  * hif_ce_increment_interrupt_count() - update ce stats
648*5113495bSYour Name  * @hif_ce_state: ce state
649*5113495bSYour Name  * @ce_id: ce id
650*5113495bSYour Name  *
651*5113495bSYour Name  * Return: none
652*5113495bSYour Name  */
653*5113495bSYour Name static inline void
hif_ce_increment_interrupt_count(struct HIF_CE_state * hif_ce_state,int ce_id)654*5113495bSYour Name hif_ce_increment_interrupt_count(struct HIF_CE_state *hif_ce_state, int ce_id)
655*5113495bSYour Name {
656*5113495bSYour Name 	int cpu_id = qdf_get_cpu();
657*5113495bSYour Name 
658*5113495bSYour Name 	hif_ce_state->stats.ce_per_cpu[ce_id][cpu_id]++;
659*5113495bSYour Name }
660*5113495bSYour Name 
661*5113495bSYour Name /**
662*5113495bSYour Name  * hif_display_ce_stats() - display ce stats
663*5113495bSYour Name  * @hif_ctx: HIF context
664*5113495bSYour Name  *
665*5113495bSYour Name  * Return: none
666*5113495bSYour Name  */
hif_display_ce_stats(struct hif_softc * hif_ctx)667*5113495bSYour Name void hif_display_ce_stats(struct hif_softc *hif_ctx)
668*5113495bSYour Name {
669*5113495bSYour Name #define STR_SIZE 128
670*5113495bSYour Name 	uint8_t i, j, pos;
671*5113495bSYour Name 	char str_buffer[STR_SIZE];
672*5113495bSYour Name 	int size, ret;
673*5113495bSYour Name 	struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(hif_ctx);
674*5113495bSYour Name 
675*5113495bSYour Name 	qdf_debug("CE interrupt statistics:");
676*5113495bSYour Name 	for (i = 0; i < CE_COUNT_MAX; i++) {
677*5113495bSYour Name 		size = STR_SIZE;
678*5113495bSYour Name 		pos = 0;
679*5113495bSYour Name 		for (j = 0; j < QDF_MAX_AVAILABLE_CPU; j++) {
680*5113495bSYour Name 			ret = snprintf(str_buffer + pos, size, "[%d]:%d ",
681*5113495bSYour Name 				       j, hif_ce_state->stats.ce_per_cpu[i][j]);
682*5113495bSYour Name 			if (ret <= 0 || ret >= size)
683*5113495bSYour Name 				break;
684*5113495bSYour Name 			size -= ret;
685*5113495bSYour Name 			pos += ret;
686*5113495bSYour Name 		}
687*5113495bSYour Name 		qdf_debug("CE id[%2d] - %s", i, str_buffer);
688*5113495bSYour Name 	}
689*5113495bSYour Name 
690*5113495bSYour Name 	if (hif_ctx->ce_latency_stats)
691*5113495bSYour Name 		hif_ce_latency_stats(hif_ctx);
692*5113495bSYour Name #undef STR_SIZE
693*5113495bSYour Name }
694*5113495bSYour Name 
695*5113495bSYour Name /**
696*5113495bSYour Name  * hif_clear_ce_stats() - clear ce stats
697*5113495bSYour Name  * @hif_ce_state: ce state
698*5113495bSYour Name  *
699*5113495bSYour Name  * Return: none
700*5113495bSYour Name  */
hif_clear_ce_stats(struct HIF_CE_state * hif_ce_state)701*5113495bSYour Name void hif_clear_ce_stats(struct HIF_CE_state *hif_ce_state)
702*5113495bSYour Name {
703*5113495bSYour Name 	qdf_mem_zero(&hif_ce_state->stats, sizeof(struct ce_stats));
704*5113495bSYour Name }
705*5113495bSYour Name 
706*5113495bSYour Name #ifdef WLAN_TRACEPOINTS
707*5113495bSYour Name /**
708*5113495bSYour Name  * hif_set_ce_tasklet_sched_time() - Set tasklet schedule time for
709*5113495bSYour Name  *  CE with matching ce_id
710*5113495bSYour Name  * @scn: hif context
711*5113495bSYour Name  * @ce_id: CE id
712*5113495bSYour Name  *
713*5113495bSYour Name  * Return: None
714*5113495bSYour Name  */
715*5113495bSYour Name static inline
hif_set_ce_tasklet_sched_time(struct hif_softc * scn,uint8_t ce_id)716*5113495bSYour Name void hif_set_ce_tasklet_sched_time(struct hif_softc *scn, uint8_t ce_id)
717*5113495bSYour Name {
718*5113495bSYour Name 	struct CE_state *ce_state = scn->ce_id_to_state[ce_id];
719*5113495bSYour Name 
720*5113495bSYour Name 	ce_state->ce_tasklet_sched_time = qdf_time_sched_clock();
721*5113495bSYour Name }
722*5113495bSYour Name #else
723*5113495bSYour Name static inline
hif_set_ce_tasklet_sched_time(struct hif_softc * scn,uint8_t ce_id)724*5113495bSYour Name void hif_set_ce_tasklet_sched_time(struct hif_softc *scn, uint8_t ce_id)
725*5113495bSYour Name {
726*5113495bSYour Name }
727*5113495bSYour Name #endif
728*5113495bSYour Name 
729*5113495bSYour Name /**
730*5113495bSYour Name  * hif_tasklet_schedule() - schedule tasklet
731*5113495bSYour Name  * @hif_ctx: hif context
732*5113495bSYour Name  * @tasklet_entry: ce tasklet entry
733*5113495bSYour Name  *
734*5113495bSYour Name  * Return: false if tasklet already scheduled, otherwise true
735*5113495bSYour Name  */
hif_tasklet_schedule(struct hif_opaque_softc * hif_ctx,struct ce_tasklet_entry * tasklet_entry)736*5113495bSYour Name static inline bool hif_tasklet_schedule(struct hif_opaque_softc *hif_ctx,
737*5113495bSYour Name 					struct ce_tasklet_entry *tasklet_entry)
738*5113495bSYour Name {
739*5113495bSYour Name 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
740*5113495bSYour Name 
741*5113495bSYour Name 	if (test_bit(TASKLET_STATE_SCHED, &tasklet_entry->intr_tq.state)) {
742*5113495bSYour Name 		hif_debug("tasklet scheduled, return");
743*5113495bSYour Name 		qdf_atomic_dec(&scn->active_tasklet_cnt);
744*5113495bSYour Name 		return false;
745*5113495bSYour Name 	}
746*5113495bSYour Name 
747*5113495bSYour Name 	hif_set_ce_tasklet_sched_time(scn, tasklet_entry->ce_id);
748*5113495bSYour Name 	/* keep it before tasklet_schedule, this is to happy whunt.
749*5113495bSYour Name 	 * in whunt, tasklet may run before finished hif_tasklet_schedule.
750*5113495bSYour Name 	 */
751*5113495bSYour Name 	hif_tasklet_latency_record_sched(scn, tasklet_entry->ce_id);
752*5113495bSYour Name 	ce_tasklet_schedule(tasklet_entry);
753*5113495bSYour Name 
754*5113495bSYour Name 	hif_reset_ce_full_count(scn, tasklet_entry->ce_id);
755*5113495bSYour Name 	if (scn->ce_latency_stats)
756*5113495bSYour Name 		hif_record_tasklet_sched_entry_ts(scn, tasklet_entry->ce_id);
757*5113495bSYour Name 
758*5113495bSYour Name 	return true;
759*5113495bSYour Name }
760*5113495bSYour Name 
761*5113495bSYour Name #ifdef WLAN_FEATURE_WMI_DIAG_OVER_CE7
762*5113495bSYour Name #define CE_LOOP_MAX_COUNT	20
763*5113495bSYour Name /**
764*5113495bSYour Name  * ce_poll_reap_by_id() - reap the available frames from CE by polling per ce_id
765*5113495bSYour Name  * @scn: hif context
766*5113495bSYour Name  * @ce_id: CE id
767*5113495bSYour Name  *
768*5113495bSYour Name  * This function needs to be called once after all the irqs are disabled
769*5113495bSYour Name  * and tasklets are drained during bus suspend.
770*5113495bSYour Name  *
771*5113495bSYour Name  * Return: 0 on success, unlikely -EBUSY if reaping goes infinite loop
772*5113495bSYour Name  */
ce_poll_reap_by_id(struct hif_softc * scn,enum ce_id_type ce_id)773*5113495bSYour Name static int ce_poll_reap_by_id(struct hif_softc *scn, enum ce_id_type ce_id)
774*5113495bSYour Name {
775*5113495bSYour Name 	struct HIF_CE_state *hif_ce_state = (struct HIF_CE_state *)scn;
776*5113495bSYour Name 	struct CE_state *CE_state = scn->ce_id_to_state[ce_id];
777*5113495bSYour Name 	int i;
778*5113495bSYour Name 
779*5113495bSYour Name 	if (scn->ce_latency_stats)
780*5113495bSYour Name 		hif_record_tasklet_exec_entry_ts(scn, ce_id);
781*5113495bSYour Name 
782*5113495bSYour Name 	hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_ENTRY,
783*5113495bSYour Name 				 NULL, NULL, -1, 0);
784*5113495bSYour Name 
785*5113495bSYour Name 	for (i = 0; i < CE_LOOP_MAX_COUNT; i++) {
786*5113495bSYour Name 		ce_per_engine_service(scn, ce_id);
787*5113495bSYour Name 
788*5113495bSYour Name 		if (ce_check_rx_pending(CE_state))
789*5113495bSYour Name 			hif_record_ce_desc_event(scn, ce_id,
790*5113495bSYour Name 						 HIF_CE_TASKLET_REAP_REPOLL,
791*5113495bSYour Name 						 NULL, NULL, -1, 0);
792*5113495bSYour Name 		else
793*5113495bSYour Name 			break;
794*5113495bSYour Name 	}
795*5113495bSYour Name 
796*5113495bSYour Name 	/*
797*5113495bSYour Name 	 * In an unlikely case, if frames are still pending to reap,
798*5113495bSYour Name 	 * could be an infinite loop, so return -EBUSY.
799*5113495bSYour Name 	 */
800*5113495bSYour Name 	if (ce_check_rx_pending(CE_state) &&
801*5113495bSYour Name 	    i == CE_LOOP_MAX_COUNT)
802*5113495bSYour Name 		return -EBUSY;
803*5113495bSYour Name 
804*5113495bSYour Name 	hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_EXIT,
805*5113495bSYour Name 				 NULL, NULL, -1, 0);
806*5113495bSYour Name 
807*5113495bSYour Name 	if (scn->ce_latency_stats)
808*5113495bSYour Name 		ce_tasklet_update_bucket(hif_ce_state, ce_id);
809*5113495bSYour Name 
810*5113495bSYour Name 	return 0;
811*5113495bSYour Name }
812*5113495bSYour Name 
813*5113495bSYour Name /**
814*5113495bSYour Name  * hif_drain_fw_diag_ce() - reap all the available FW diag logs from CE
815*5113495bSYour Name  * @scn: hif context
816*5113495bSYour Name  *
817*5113495bSYour Name  * This function needs to be called once after all the irqs are disabled
818*5113495bSYour Name  * and tasklets are drained during bus suspend.
819*5113495bSYour Name  *
820*5113495bSYour Name  * Return: 0 on success, unlikely -EBUSY if reaping goes infinite loop
821*5113495bSYour Name  */
hif_drain_fw_diag_ce(struct hif_softc * scn)822*5113495bSYour Name int hif_drain_fw_diag_ce(struct hif_softc *scn)
823*5113495bSYour Name {
824*5113495bSYour Name 	uint8_t ce_id;
825*5113495bSYour Name 	struct HIF_CE_state *hif_ce_state = (struct HIF_CE_state *)scn;
826*5113495bSYour Name 	struct ce_tasklet_entry *tasklet_entry;
827*5113495bSYour Name 
828*5113495bSYour Name 	if (hif_get_fw_diag_ce_id(scn, &ce_id))
829*5113495bSYour Name 		return 0;
830*5113495bSYour Name 
831*5113495bSYour Name 	tasklet_entry = &hif_ce_state->tasklets[ce_id];
832*5113495bSYour Name 
833*5113495bSYour Name 	/* If CE7 tasklet is triggered, no need to poll CE explicitly,
834*5113495bSYour Name 	 * CE7 SIRQ could reschedule until there is no pending entries
835*5113495bSYour Name 	 */
836*5113495bSYour Name 	if (test_bit(TASKLET_STATE_SCHED, &tasklet_entry->intr_tq.state) ||
837*5113495bSYour Name 	    test_bit(TASKLET_STATE_RUN, &tasklet_entry->intr_tq.state))
838*5113495bSYour Name 		return -EBUSY;
839*5113495bSYour Name 
840*5113495bSYour Name 	return ce_poll_reap_by_id(scn, ce_id);
841*5113495bSYour Name }
842*5113495bSYour Name #endif
843*5113495bSYour Name 
844*5113495bSYour Name #ifdef CE_TASKLET_SCHEDULE_ON_FULL
ce_check_tasklet_status(int ce_id,struct ce_tasklet_entry * entry)845*5113495bSYour Name static inline int ce_check_tasklet_status(int ce_id,
846*5113495bSYour Name 					  struct ce_tasklet_entry *entry)
847*5113495bSYour Name {
848*5113495bSYour Name 	struct HIF_CE_state *hif_ce_state = entry->hif_ce_state;
849*5113495bSYour Name 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state);
850*5113495bSYour Name 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
851*5113495bSYour Name 
852*5113495bSYour Name 	if (hif_napi_enabled(hif_hdl, ce_id)) {
853*5113495bSYour Name 		struct qca_napi_info *napi;
854*5113495bSYour Name 
855*5113495bSYour Name 		napi = scn->napi_data.napis[ce_id];
856*5113495bSYour Name 		if (test_bit(NAPI_STATE_SCHED, &napi->napi.state))
857*5113495bSYour Name 			return -EBUSY;
858*5113495bSYour Name 	} else {
859*5113495bSYour Name 		if (test_bit(TASKLET_STATE_SCHED,
860*5113495bSYour Name 			     &hif_ce_state->tasklets[ce_id].intr_tq.state))
861*5113495bSYour Name 			return -EBUSY;
862*5113495bSYour Name 	}
863*5113495bSYour Name 	return 0;
864*5113495bSYour Name }
865*5113495bSYour Name 
ce_interrupt_lock(struct CE_state * ce_state)866*5113495bSYour Name static inline void ce_interrupt_lock(struct CE_state *ce_state)
867*5113495bSYour Name {
868*5113495bSYour Name 	qdf_spin_lock_irqsave(&ce_state->ce_interrupt_lock);
869*5113495bSYour Name }
870*5113495bSYour Name 
ce_interrupt_unlock(struct CE_state * ce_state)871*5113495bSYour Name static inline void ce_interrupt_unlock(struct CE_state *ce_state)
872*5113495bSYour Name {
873*5113495bSYour Name 	qdf_spin_unlock_irqrestore(&ce_state->ce_interrupt_lock);
874*5113495bSYour Name }
875*5113495bSYour Name #else
ce_check_tasklet_status(int ce_id,struct ce_tasklet_entry * entry)876*5113495bSYour Name static inline int ce_check_tasklet_status(int ce_id,
877*5113495bSYour Name 					  struct ce_tasklet_entry *entry)
878*5113495bSYour Name {
879*5113495bSYour Name 	return 0;
880*5113495bSYour Name }
881*5113495bSYour Name 
ce_interrupt_lock(struct CE_state * ce_state)882*5113495bSYour Name static inline void ce_interrupt_lock(struct CE_state *ce_state)
883*5113495bSYour Name {
884*5113495bSYour Name }
885*5113495bSYour Name 
ce_interrupt_unlock(struct CE_state * ce_state)886*5113495bSYour Name static inline void ce_interrupt_unlock(struct CE_state *ce_state)
887*5113495bSYour Name {
888*5113495bSYour Name }
889*5113495bSYour Name #endif
890*5113495bSYour Name 
891*5113495bSYour Name /**
892*5113495bSYour Name  * ce_dispatch_interrupt() - dispatch an interrupt to a processing context
893*5113495bSYour Name  * @ce_id: ce_id
894*5113495bSYour Name  * @tasklet_entry: context
895*5113495bSYour Name  *
896*5113495bSYour Name  * Return: N/A
897*5113495bSYour Name  */
ce_dispatch_interrupt(int ce_id,struct ce_tasklet_entry * tasklet_entry)898*5113495bSYour Name irqreturn_t ce_dispatch_interrupt(int ce_id,
899*5113495bSYour Name 				  struct ce_tasklet_entry *tasklet_entry)
900*5113495bSYour Name {
901*5113495bSYour Name 	struct HIF_CE_state *hif_ce_state = tasklet_entry->hif_ce_state;
902*5113495bSYour Name 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state);
903*5113495bSYour Name 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
904*5113495bSYour Name 	struct CE_state *ce_state;
905*5113495bSYour Name 
906*5113495bSYour Name 	if (tasklet_entry->ce_id != ce_id) {
907*5113495bSYour Name 		bool rl;
908*5113495bSYour Name 
909*5113495bSYour Name 		rl = hif_err_rl("ce_id (expect %d, received %d) does not match, inited=%d, ce_count=%u",
910*5113495bSYour Name 				tasklet_entry->ce_id, ce_id,
911*5113495bSYour Name 				tasklet_entry->inited,
912*5113495bSYour Name 				scn->ce_count);
913*5113495bSYour Name 
914*5113495bSYour Name 		if (!rl)
915*5113495bSYour Name 			ce_tasklet_entry_dump(hif_ce_state);
916*5113495bSYour Name 
917*5113495bSYour Name 		return IRQ_NONE;
918*5113495bSYour Name 	}
919*5113495bSYour Name 	if (unlikely(ce_id >= CE_COUNT_MAX)) {
920*5113495bSYour Name 		hif_err("ce_id=%d > CE_COUNT_MAX=%d",
921*5113495bSYour Name 			tasklet_entry->ce_id, CE_COUNT_MAX);
922*5113495bSYour Name 		return IRQ_NONE;
923*5113495bSYour Name 	}
924*5113495bSYour Name 
925*5113495bSYour Name 	ce_state = scn->ce_id_to_state[ce_id];
926*5113495bSYour Name 
927*5113495bSYour Name 	ce_interrupt_lock(ce_state);
928*5113495bSYour Name 	if (ce_check_tasklet_status(ce_id, tasklet_entry)) {
929*5113495bSYour Name 		ce_interrupt_unlock(ce_state);
930*5113495bSYour Name 		return IRQ_NONE;
931*5113495bSYour Name 	}
932*5113495bSYour Name 
933*5113495bSYour Name 	if (!TARGET_REGISTER_ACCESS_ALLOWED(scn)) {
934*5113495bSYour Name 		ce_interrupt_unlock(ce_state);
935*5113495bSYour Name 		return IRQ_HANDLED;
936*5113495bSYour Name 	}
937*5113495bSYour Name 
938*5113495bSYour Name 	hif_irq_disable(scn, ce_id);
939*5113495bSYour Name 
940*5113495bSYour Name 	hif_record_ce_desc_event(scn, ce_id, HIF_IRQ_EVENT,
941*5113495bSYour Name 				NULL, NULL, 0, 0);
942*5113495bSYour Name 	hif_ce_increment_interrupt_count(hif_ce_state, ce_id);
943*5113495bSYour Name 
944*5113495bSYour Name 	if (unlikely(hif_interrupt_is_ut_resume(scn, ce_id))) {
945*5113495bSYour Name 		hif_ut_fw_resume(scn);
946*5113495bSYour Name 		hif_irq_enable(scn, ce_id);
947*5113495bSYour Name 		ce_interrupt_unlock(ce_state);
948*5113495bSYour Name 		return IRQ_HANDLED;
949*5113495bSYour Name 	}
950*5113495bSYour Name 
951*5113495bSYour Name 	qdf_atomic_inc(&scn->active_tasklet_cnt);
952*5113495bSYour Name 
953*5113495bSYour Name 	if (hif_napi_enabled(hif_hdl, ce_id))
954*5113495bSYour Name 		hif_napi_schedule(hif_hdl, ce_id);
955*5113495bSYour Name 	else
956*5113495bSYour Name 		hif_tasklet_schedule(hif_hdl, tasklet_entry);
957*5113495bSYour Name 
958*5113495bSYour Name 	ce_interrupt_unlock(ce_state);
959*5113495bSYour Name 
960*5113495bSYour Name 	return IRQ_HANDLED;
961*5113495bSYour Name }
962*5113495bSYour Name 
963*5113495bSYour Name const char *ce_name[CE_COUNT_MAX] = {
964*5113495bSYour Name 	"WLAN_CE_0",
965*5113495bSYour Name 	"WLAN_CE_1",
966*5113495bSYour Name 	"WLAN_CE_2",
967*5113495bSYour Name 	"WLAN_CE_3",
968*5113495bSYour Name 	"WLAN_CE_4",
969*5113495bSYour Name 	"WLAN_CE_5",
970*5113495bSYour Name 	"WLAN_CE_6",
971*5113495bSYour Name 	"WLAN_CE_7",
972*5113495bSYour Name 	"WLAN_CE_8",
973*5113495bSYour Name 	"WLAN_CE_9",
974*5113495bSYour Name 	"WLAN_CE_10",
975*5113495bSYour Name 	"WLAN_CE_11",
976*5113495bSYour Name #ifdef QCA_WIFI_QCN9224
977*5113495bSYour Name 	"WLAN_CE_12",
978*5113495bSYour Name 	"WLAN_CE_13",
979*5113495bSYour Name 	"WLAN_CE_14",
980*5113495bSYour Name 	"WLAN_CE_15",
981*5113495bSYour Name #endif
982*5113495bSYour Name };
983*5113495bSYour Name /**
984*5113495bSYour Name  * ce_unregister_irq() - ce_unregister_irq
985*5113495bSYour Name  * @hif_ce_state: hif_ce_state copy engine device handle
986*5113495bSYour Name  * @mask: which copy engines to unregister for.
987*5113495bSYour Name  *
988*5113495bSYour Name  * Unregisters copy engine irqs matching mask.  If a 1 is set at bit x,
989*5113495bSYour Name  * unregister for copy engine x.
990*5113495bSYour Name  *
991*5113495bSYour Name  * Return: QDF_STATUS
992*5113495bSYour Name  */
ce_unregister_irq(struct HIF_CE_state * hif_ce_state,uint32_t mask)993*5113495bSYour Name QDF_STATUS ce_unregister_irq(struct HIF_CE_state *hif_ce_state, uint32_t mask)
994*5113495bSYour Name {
995*5113495bSYour Name 	int id;
996*5113495bSYour Name 	int ce_count;
997*5113495bSYour Name 	int ret;
998*5113495bSYour Name 	struct hif_softc *scn;
999*5113495bSYour Name 
1000*5113495bSYour Name 	if (!hif_ce_state) {
1001*5113495bSYour Name 		hif_warn("hif_ce_state = NULL");
1002*5113495bSYour Name 		return QDF_STATUS_SUCCESS;
1003*5113495bSYour Name 	}
1004*5113495bSYour Name 
1005*5113495bSYour Name 	scn = HIF_GET_SOFTC(hif_ce_state);
1006*5113495bSYour Name 	ce_count = scn->ce_count;
1007*5113495bSYour Name 	/* we are removing interrupts, so better stop NAPI */
1008*5113495bSYour Name 	ret = hif_napi_event(GET_HIF_OPAQUE_HDL(scn),
1009*5113495bSYour Name 			     NAPI_EVT_INT_STATE, (void *)0);
1010*5113495bSYour Name 	if (ret != 0)
1011*5113495bSYour Name 		hif_err("napi_event INT_STATE returned %d", ret);
1012*5113495bSYour Name 	/* this is not fatal, continue */
1013*5113495bSYour Name 
1014*5113495bSYour Name 	/* filter mask to free only for ce's with irq registered */
1015*5113495bSYour Name 	mask &= hif_ce_state->ce_register_irq_done;
1016*5113495bSYour Name 	for (id = 0; id < ce_count; id++) {
1017*5113495bSYour Name 		if ((mask & (1 << id)) && hif_ce_state->tasklets[id].inited) {
1018*5113495bSYour Name 			ret = pld_ce_free_irq(scn->qdf_dev->dev, id,
1019*5113495bSYour Name 					&hif_ce_state->tasklets[id]);
1020*5113495bSYour Name 			if (ret < 0)
1021*5113495bSYour Name 				hif_err(
1022*5113495bSYour Name 					"pld_unregister_irq error - ce_id = %d, ret = %d",
1023*5113495bSYour Name 					id, ret);
1024*5113495bSYour Name 		}
1025*5113495bSYour Name 		ce_disable_polling(scn->ce_id_to_state[id]);
1026*5113495bSYour Name 	}
1027*5113495bSYour Name 	hif_ce_state->ce_register_irq_done &= ~mask;
1028*5113495bSYour Name 
1029*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
1030*5113495bSYour Name }
1031*5113495bSYour Name /**
1032*5113495bSYour Name  * ce_register_irq() - ce_register_irq
1033*5113495bSYour Name  * @hif_ce_state: hif_ce_state
1034*5113495bSYour Name  * @mask: which copy engines to unregister for.
1035*5113495bSYour Name  *
1036*5113495bSYour Name  * Registers copy engine irqs matching mask.  If a 1 is set at bit x,
1037*5113495bSYour Name  * Register for copy engine x.
1038*5113495bSYour Name  *
1039*5113495bSYour Name  * Return: QDF_STATUS
1040*5113495bSYour Name  */
ce_register_irq(struct HIF_CE_state * hif_ce_state,uint32_t mask)1041*5113495bSYour Name QDF_STATUS ce_register_irq(struct HIF_CE_state *hif_ce_state, uint32_t mask)
1042*5113495bSYour Name {
1043*5113495bSYour Name 	int id;
1044*5113495bSYour Name 	int ce_count;
1045*5113495bSYour Name 	int ret;
1046*5113495bSYour Name 	unsigned long irqflags = IRQF_TRIGGER_RISING;
1047*5113495bSYour Name 	uint32_t done_mask = 0;
1048*5113495bSYour Name 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state);
1049*5113495bSYour Name 
1050*5113495bSYour Name 	ce_count = scn->ce_count;
1051*5113495bSYour Name 
1052*5113495bSYour Name 	for (id = 0; id < ce_count; id++) {
1053*5113495bSYour Name 		if ((mask & (1 << id)) && hif_ce_state->tasklets[id].inited) {
1054*5113495bSYour Name 			ret = pld_ce_request_irq(scn->qdf_dev->dev, id,
1055*5113495bSYour Name 				hif_snoc_interrupt_handler,
1056*5113495bSYour Name 				irqflags, ce_name[id],
1057*5113495bSYour Name 				&hif_ce_state->tasklets[id]);
1058*5113495bSYour Name 			if (ret) {
1059*5113495bSYour Name 				hif_err(
1060*5113495bSYour Name 					"cannot register CE %d irq handler, ret = %d",
1061*5113495bSYour Name 					id, ret);
1062*5113495bSYour Name 				ce_unregister_irq(hif_ce_state, done_mask);
1063*5113495bSYour Name 				return QDF_STATUS_E_FAULT;
1064*5113495bSYour Name 			}
1065*5113495bSYour Name 			done_mask |= 1 << id;
1066*5113495bSYour Name 		}
1067*5113495bSYour Name 	}
1068*5113495bSYour Name 	hif_ce_state->ce_register_irq_done |= done_mask;
1069*5113495bSYour Name 
1070*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
1071*5113495bSYour Name }
1072