1 /*
2 * Copyright (c) 2015-2021 The Linux Foundation. All rights reserved.
3 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for
6 * any purpose with or without fee is hereby granted, provided that the
7 * above copyright notice and this permission notice appear in all
8 * copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17 * PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 #include <linux/pci.h>
21 #include <linux/slab.h>
22 #include <linux/interrupt.h>
23 #include <linux/if_arp.h>
24 #include "qdf_lock.h"
25 #include "qdf_types.h"
26 #include "qdf_status.h"
27 #include "regtable.h"
28 #include "hif.h"
29 #include "hif_io32.h"
30 #include "ce_main.h"
31 #include "ce_api.h"
32 #include "ce_reg.h"
33 #include "ce_internal.h"
34 #include "ce_tasklet.h"
35 #include "pld_common.h"
36 #include "hif_debug.h"
37 #include "hif_napi.h"
38
39 /**
40 * struct tasklet_work
41 *
42 * @id: ce_id
43 * @data: data
44 * @reg_work: work
45 */
46 struct tasklet_work {
47 enum ce_id_type id;
48 void *data;
49 qdf_work_t reg_work;
50 };
51
52
53 /**
54 * ce_tasklet_schedule() - schedule CE tasklet
55 * @tasklet_entry: ce tasklet entry
56 *
57 * Return: None
58 */
ce_tasklet_schedule(struct ce_tasklet_entry * tasklet_entry)59 static inline void ce_tasklet_schedule(struct ce_tasklet_entry *tasklet_entry)
60 {
61 if (tasklet_entry->hi_tasklet_ce)
62 tasklet_hi_schedule(&tasklet_entry->intr_tq);
63 else
64 tasklet_schedule(&tasklet_entry->intr_tq);
65 }
66
67 /**
68 * reschedule_ce_tasklet_work_handler() - reschedule work
69 * @work: struct work_struct
70 *
71 * Return: N/A
72 */
reschedule_ce_tasklet_work_handler(struct work_struct * work)73 static void reschedule_ce_tasklet_work_handler(struct work_struct *work)
74 {
75 qdf_work_t *reg_work = qdf_container_of(work, qdf_work_t, work);
76 struct tasklet_work *ce_work = qdf_container_of(reg_work,
77 struct tasklet_work,
78 reg_work);
79 struct hif_softc *scn = ce_work->data;
80 struct HIF_CE_state *hif_ce_state;
81
82 if (!scn) {
83 hif_err("tasklet scn is null");
84 return;
85 }
86
87 hif_ce_state = HIF_GET_CE_STATE(scn);
88
89 if (scn->hif_init_done == false) {
90 hif_err("wlan driver is unloaded");
91 return;
92 }
93 if (hif_ce_state->tasklets[ce_work->id].inited)
94 ce_tasklet_schedule(&hif_ce_state->tasklets[ce_work->id]);
95 }
96
97 static struct tasklet_work tasklet_workers[CE_ID_MAX];
98
99 /**
100 * init_tasklet_work() - init_tasklet_work
101 * @work: struct work_struct
102 * @work_handler: work_handler
103 *
104 * Return: N/A
105 */
init_tasklet_work(struct work_struct * work,work_func_t work_handler)106 static void init_tasklet_work(struct work_struct *work,
107 work_func_t work_handler)
108 {
109 INIT_WORK(work, work_handler);
110 }
111
112 /**
113 * init_tasklet_worker_by_ceid() - init_tasklet_workers
114 * @scn: HIF Context
115 * @ce_id: copy engine ID
116 *
117 * Return: N/A
118 */
init_tasklet_worker_by_ceid(struct hif_opaque_softc * scn,int ce_id)119 void init_tasklet_worker_by_ceid(struct hif_opaque_softc *scn, int ce_id)
120 {
121
122 tasklet_workers[ce_id].id = ce_id;
123 tasklet_workers[ce_id].data = scn;
124 init_tasklet_work(&tasklet_workers[ce_id].reg_work.work,
125 reschedule_ce_tasklet_work_handler);
126 }
127
128 /**
129 * deinit_tasklet_workers() - deinit_tasklet_workers
130 * @scn: HIF Context
131 *
132 * Return: N/A
133 */
deinit_tasklet_workers(struct hif_opaque_softc * scn)134 void deinit_tasklet_workers(struct hif_opaque_softc *scn)
135 {
136 u32 id;
137
138 for (id = 0; id < CE_ID_MAX; id++)
139 qdf_cancel_work(&tasklet_workers[id].reg_work);
140 }
141
142 #ifdef CE_TASKLET_DEBUG_ENABLE
143 /**
144 * hif_record_tasklet_exec_entry_ts() - Record ce tasklet execution
145 * entry time
146 * @scn: hif_softc
147 * @ce_id: ce_id
148 *
149 * Return: None
150 */
151 static inline void
hif_record_tasklet_exec_entry_ts(struct hif_softc * scn,uint8_t ce_id)152 hif_record_tasklet_exec_entry_ts(struct hif_softc *scn, uint8_t ce_id)
153 {
154 struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(scn);
155
156 hif_ce_state->stats.tasklet_exec_entry_ts[ce_id] =
157 qdf_get_log_timestamp_usecs();
158 }
159
160 /**
161 * hif_record_tasklet_sched_entry_ts() - Record ce tasklet scheduled
162 * entry time
163 * @scn: hif_softc
164 * @ce_id: ce_id
165 *
166 * Return: None
167 */
168 static inline void
hif_record_tasklet_sched_entry_ts(struct hif_softc * scn,uint8_t ce_id)169 hif_record_tasklet_sched_entry_ts(struct hif_softc *scn, uint8_t ce_id)
170 {
171 struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(scn);
172
173 hif_ce_state->stats.tasklet_sched_entry_ts[ce_id] =
174 qdf_get_log_timestamp_usecs();
175 }
176
177 /**
178 * hif_ce_latency_stats() - Display ce latency information
179 * @hif_ctx: hif_softc struct
180 *
181 * Return: None
182 */
183 static void
hif_ce_latency_stats(struct hif_softc * hif_ctx)184 hif_ce_latency_stats(struct hif_softc *hif_ctx)
185 {
186 uint8_t i, j;
187 uint32_t index, start_index;
188 uint64_t secs, usecs;
189 static const char * const buck_str[] = {"0 - 0.5", "0.5 - 1", "1 - 2",
190 "2 - 5", "5 - 10", " > 10"};
191 struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(hif_ctx);
192 struct ce_stats *stats = &hif_ce_state->stats;
193
194 hif_err("\tCE TASKLET ARRIVAL AND EXECUTION STATS");
195 for (i = 0; i < CE_COUNT_MAX; i++) {
196 hif_nofl_err("\n\t\tCE Ring %d Tasklet Execution Bucket", i);
197 for (j = 0; j < CE_BUCKET_MAX; j++) {
198 qdf_log_timestamp_to_secs(
199 stats->ce_tasklet_exec_last_update[i][j],
200 &secs, &usecs);
201 hif_nofl_err("\t Bucket %sms :%llu\t last update:% 8lld.%06lld",
202 buck_str[j],
203 stats->ce_tasklet_exec_bucket[i][j],
204 secs, usecs);
205 }
206
207 hif_nofl_err("\n\t\tCE Ring %d Tasklet Scheduled Bucket", i);
208 for (j = 0; j < CE_BUCKET_MAX; j++) {
209 qdf_log_timestamp_to_secs(
210 stats->ce_tasklet_sched_last_update[i][j],
211 &secs, &usecs);
212 hif_nofl_err("\t Bucket %sms :%llu\t last update :% 8lld.%06lld",
213 buck_str[j],
214 stats->ce_tasklet_sched_bucket[i][j],
215 secs, usecs);
216 }
217
218 hif_nofl_err("\n\t\t CE RING %d Last %d time records",
219 i, HIF_REQUESTED_EVENTS);
220 index = stats->record_index[i];
221 start_index = stats->record_index[i];
222
223 for (j = 0; j < HIF_REQUESTED_EVENTS; j++) {
224 hif_nofl_err("\tExecution time: %lluus Total Scheduled time: %lluus",
225 stats->tasklet_exec_time_record[i][index],
226 stats->
227 tasklet_sched_time_record[i][index]);
228 if (index)
229 index = (index - 1) % HIF_REQUESTED_EVENTS;
230 else
231 index = HIF_REQUESTED_EVENTS - 1;
232 if (index == start_index)
233 break;
234 }
235 }
236 }
237
238 /**
239 * ce_tasklet_update_bucket() - update ce execution and scehduled time latency
240 * in corresponding time buckets
241 * @hif_ce_state: HIF CE state
242 * @ce_id: ce_id_type
243 *
244 * Return: N/A
245 */
ce_tasklet_update_bucket(struct HIF_CE_state * hif_ce_state,uint8_t ce_id)246 static void ce_tasklet_update_bucket(struct HIF_CE_state *hif_ce_state,
247 uint8_t ce_id)
248 {
249 uint32_t index;
250 uint64_t exec_time, exec_ms;
251 uint64_t sched_time, sched_ms;
252 uint64_t curr_time = qdf_get_log_timestamp_usecs();
253 struct ce_stats *stats = &hif_ce_state->stats;
254
255 exec_time = curr_time - (stats->tasklet_exec_entry_ts[ce_id]);
256 sched_time = (stats->tasklet_exec_entry_ts[ce_id]) -
257 (stats->tasklet_sched_entry_ts[ce_id]);
258
259 index = stats->record_index[ce_id];
260 index = (index + 1) % HIF_REQUESTED_EVENTS;
261
262 stats->tasklet_exec_time_record[ce_id][index] = exec_time;
263 stats->tasklet_sched_time_record[ce_id][index] = sched_time;
264 stats->record_index[ce_id] = index;
265
266 exec_ms = qdf_do_div(exec_time, 1000);
267 sched_ms = qdf_do_div(sched_time, 1000);
268
269 if (exec_ms > 10) {
270 stats->ce_tasklet_exec_bucket[ce_id][CE_BUCKET_BEYOND]++;
271 stats->ce_tasklet_exec_last_update[ce_id][CE_BUCKET_BEYOND]
272 = curr_time;
273 } else if (exec_ms > 5) {
274 stats->ce_tasklet_exec_bucket[ce_id][CE_BUCKET_10_MS]++;
275 stats->ce_tasklet_exec_last_update[ce_id][CE_BUCKET_10_MS]
276 = curr_time;
277 } else if (exec_ms > 2) {
278 stats->ce_tasklet_exec_bucket[ce_id][CE_BUCKET_5_MS]++;
279 stats->ce_tasklet_exec_last_update[ce_id][CE_BUCKET_5_MS]
280 = curr_time;
281 } else if (exec_ms > 1) {
282 stats->ce_tasklet_exec_bucket[ce_id][CE_BUCKET_2_MS]++;
283 stats->ce_tasklet_exec_last_update[ce_id][CE_BUCKET_2_MS]
284 = curr_time;
285 } else if (exec_time > 500) {
286 stats->ce_tasklet_exec_bucket[ce_id][CE_BUCKET_1_MS]++;
287 stats->ce_tasklet_exec_last_update[ce_id][CE_BUCKET_1_MS]
288 = curr_time;
289 } else {
290 stats->ce_tasklet_exec_bucket[ce_id][CE_BUCKET_500_US]++;
291 stats->ce_tasklet_exec_last_update[ce_id][CE_BUCKET_500_US]
292 = curr_time;
293 }
294
295 if (sched_ms > 10) {
296 stats->ce_tasklet_sched_bucket[ce_id][CE_BUCKET_BEYOND]++;
297 stats->ce_tasklet_sched_last_update[ce_id][CE_BUCKET_BEYOND]
298 = curr_time;
299 } else if (sched_ms > 5) {
300 stats->ce_tasklet_sched_bucket[ce_id][CE_BUCKET_10_MS]++;
301 stats->ce_tasklet_sched_last_update[ce_id][CE_BUCKET_10_MS]
302 = curr_time;
303 } else if (sched_ms > 2) {
304 stats->ce_tasklet_sched_bucket[ce_id][CE_BUCKET_5_MS]++;
305 stats->ce_tasklet_sched_last_update[ce_id][CE_BUCKET_5_MS]
306 = curr_time;
307 } else if (sched_ms > 1) {
308 stats->ce_tasklet_sched_bucket[ce_id][CE_BUCKET_2_MS]++;
309 stats->ce_tasklet_sched_last_update[ce_id][CE_BUCKET_2_MS]
310 = curr_time;
311 } else if (sched_time > 500) {
312 stats->ce_tasklet_sched_bucket[ce_id][CE_BUCKET_1_MS]++;
313 stats->ce_tasklet_sched_last_update[ce_id][CE_BUCKET_1_MS]
314 = curr_time;
315 } else {
316 stats->ce_tasklet_sched_bucket[ce_id][CE_BUCKET_500_US]++;
317 stats->ce_tasklet_sched_last_update[ce_id][CE_BUCKET_500_US]
318 = curr_time;
319 }
320 }
321 #else
322 static inline void
hif_record_tasklet_exec_entry_ts(struct hif_softc * scn,uint8_t ce_id)323 hif_record_tasklet_exec_entry_ts(struct hif_softc *scn, uint8_t ce_id)
324 {
325 }
326
ce_tasklet_update_bucket(struct HIF_CE_state * hif_ce_state,uint8_t ce_id)327 static void ce_tasklet_update_bucket(struct HIF_CE_state *hif_ce_state,
328 uint8_t ce_id)
329 {
330 }
331
332 static inline void
hif_record_tasklet_sched_entry_ts(struct hif_softc * scn,uint8_t ce_id)333 hif_record_tasklet_sched_entry_ts(struct hif_softc *scn, uint8_t ce_id)
334 {
335 }
336
337 static void
hif_ce_latency_stats(struct hif_softc * hif_ctx)338 hif_ce_latency_stats(struct hif_softc *hif_ctx)
339 {
340 }
341 #endif /*CE_TASKLET_DEBUG_ENABLE*/
342
343 #if defined(CE_TASKLET_DEBUG_ENABLE) && defined(CE_TASKLET_SCHEDULE_ON_FULL)
344 /**
345 * hif_reset_ce_full_count() - Reset ce full count
346 * @scn: hif_softc
347 * @ce_id: ce_id
348 *
349 * Return: None
350 */
351 static inline void
hif_reset_ce_full_count(struct hif_softc * scn,uint8_t ce_id)352 hif_reset_ce_full_count(struct hif_softc *scn, uint8_t ce_id)
353 {
354 struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(scn);
355
356 hif_ce_state->stats.ce_ring_full_count[ce_id] = 0;
357 }
358 #else
359 static inline void
hif_reset_ce_full_count(struct hif_softc * scn,uint8_t ce_id)360 hif_reset_ce_full_count(struct hif_softc *scn, uint8_t ce_id)
361 {
362 }
363 #endif
364
365 #ifdef CUSTOM_CB_SCHEDULER_SUPPORT
366 /**
367 * ce_get_custom_cb_pending() - Helper API to check whether the custom
368 * callback is pending
369 * @CE_state: Pointer to CE state
370 *
371 * return: bool
372 */
373 static bool
ce_get_custom_cb_pending(struct CE_state * CE_state)374 ce_get_custom_cb_pending(struct CE_state *CE_state)
375 {
376 return (qdf_atomic_dec_if_positive(&CE_state->custom_cb_pending) >= 0);
377 }
378
379 /**
380 * ce_execute_custom_cb() - Helper API to execute custom callback
381 * @CE_state: Pointer to CE state
382 *
383 * return: void
384 */
385 static void
ce_execute_custom_cb(struct CE_state * CE_state)386 ce_execute_custom_cb(struct CE_state *CE_state)
387 {
388 while (ce_get_custom_cb_pending(CE_state) && CE_state->custom_cb &&
389 CE_state->custom_cb_context)
390 CE_state->custom_cb(CE_state->custom_cb_context);
391 }
392 #else
393 /**
394 * ce_execute_custom_cb() - Helper API to execute custom callback
395 * @CE_state: Pointer to CE state
396 *
397 * return: void
398 */
399 static void
ce_execute_custom_cb(struct CE_state * CE_state)400 ce_execute_custom_cb(struct CE_state *CE_state)
401 {
402 }
403 #endif /* CUSTOM_CB_SCHEDULER_SUPPORT */
404
405 /**
406 * ce_tasklet() - ce_tasklet
407 * @data: data
408 *
409 * Return: N/A
410 */
ce_tasklet(unsigned long data)411 static void ce_tasklet(unsigned long data)
412 {
413 struct ce_tasklet_entry *tasklet_entry =
414 (struct ce_tasklet_entry *)data;
415 struct HIF_CE_state *hif_ce_state = tasklet_entry->hif_ce_state;
416 struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state);
417 struct CE_state *CE_state = scn->ce_id_to_state[tasklet_entry->ce_id];
418
419 hif_record_ce_desc_event(scn, tasklet_entry->ce_id,
420 HIF_CE_TASKLET_ENTRY, NULL, NULL, -1, 0);
421
422 if (scn->ce_latency_stats)
423 hif_record_tasklet_exec_entry_ts(scn, tasklet_entry->ce_id);
424
425 hif_tasklet_latency_record_exec(scn, tasklet_entry->ce_id);
426
427 if (qdf_atomic_read(&scn->link_suspended)) {
428 hif_err("ce %d tasklet fired after link suspend",
429 tasklet_entry->ce_id);
430 QDF_BUG(0);
431 }
432
433 ce_execute_custom_cb(CE_state);
434
435 ce_per_engine_service(scn, tasklet_entry->ce_id);
436
437 if (ce_check_rx_pending(CE_state) && tasklet_entry->inited) {
438 /*
439 * There are frames pending, schedule tasklet to process them.
440 * Enable the interrupt only when there is no pending frames in
441 * any of the Copy Engine pipes.
442 */
443 if (test_bit(TASKLET_STATE_SCHED,
444 &tasklet_entry->intr_tq.state)) {
445 hif_info("ce_id%d tasklet was scheduled, return",
446 tasklet_entry->ce_id);
447 qdf_atomic_dec(&scn->active_tasklet_cnt);
448 return;
449 }
450
451 hif_record_ce_desc_event(scn, tasklet_entry->ce_id,
452 HIF_CE_TASKLET_RESCHEDULE,
453 NULL, NULL, -1, 0);
454
455 ce_tasklet_schedule(tasklet_entry);
456 hif_tasklet_latency_record_sched(scn, tasklet_entry->ce_id);
457
458 hif_reset_ce_full_count(scn, tasklet_entry->ce_id);
459 if (scn->ce_latency_stats) {
460 ce_tasklet_update_bucket(hif_ce_state,
461 tasklet_entry->ce_id);
462 hif_record_tasklet_sched_entry_ts(scn,
463 tasklet_entry->ce_id);
464 }
465 return;
466 }
467
468 hif_record_ce_desc_event(scn, tasklet_entry->ce_id, HIF_CE_TASKLET_EXIT,
469 NULL, NULL, -1, 0);
470
471 if (scn->ce_latency_stats)
472 ce_tasklet_update_bucket(hif_ce_state, tasklet_entry->ce_id);
473
474 if ((scn->target_status != TARGET_STATUS_RESET) &&
475 !scn->free_irq_done)
476 hif_irq_enable(scn, tasklet_entry->ce_id);
477
478 qdf_atomic_dec(&scn->active_tasklet_cnt);
479 }
480
481 /**
482 * ce_tasklet_init() - ce_tasklet_init
483 * @hif_ce_state: hif_ce_state
484 * @mask: mask
485 *
486 * Return: N/A
487 */
ce_tasklet_init(struct HIF_CE_state * hif_ce_state,uint32_t mask)488 void ce_tasklet_init(struct HIF_CE_state *hif_ce_state, uint32_t mask)
489 {
490 int i;
491 struct CE_attr *attr;
492
493 for (i = 0; i < CE_COUNT_MAX; i++) {
494 if (mask & (1 << i)) {
495 hif_ce_state->tasklets[i].ce_id = i;
496 hif_ce_state->tasklets[i].inited = true;
497 hif_ce_state->tasklets[i].hif_ce_state = hif_ce_state;
498
499 attr = &hif_ce_state->host_ce_config[i];
500 if (attr->flags & CE_ATTR_HI_TASKLET)
501 hif_ce_state->tasklets[i].hi_tasklet_ce = true;
502 else
503 hif_ce_state->tasklets[i].hi_tasklet_ce = false;
504
505 tasklet_init(&hif_ce_state->tasklets[i].intr_tq,
506 ce_tasklet,
507 (unsigned long)&hif_ce_state->tasklets[i]);
508 }
509 }
510 }
511 /**
512 * ce_tasklet_kill() - ce_tasklet_kill
513 * @scn: HIF context
514 *
515 * Context: Non-Atomic context
516 * Return: N/A
517 */
ce_tasklet_kill(struct hif_softc * scn)518 void ce_tasklet_kill(struct hif_softc *scn)
519 {
520 int i;
521 struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(scn);
522
523 for (i = 0; i < CE_COUNT_MAX; i++) {
524 if (hif_ce_state->tasklets[i].inited) {
525 hif_ce_state->tasklets[i].inited = false;
526 /*
527 * Cancel the tasklet work before tasklet_disable
528 * to avoid race between tasklet_schedule and
529 * tasklet_kill. Here cancel_work_sync() won't
530 * return before reschedule_ce_tasklet_work_handler()
531 * completes. Even if tasklet_schedule() happens
532 * tasklet_disable() will take care of that.
533 */
534 qdf_cancel_work(&tasklet_workers[i].reg_work);
535 tasklet_kill(&hif_ce_state->tasklets[i].intr_tq);
536 }
537 }
538 qdf_atomic_set(&scn->active_tasklet_cnt, 0);
539 }
540
541 /**
542 * ce_tasklet_entry_dump() - dump tasklet entries info
543 * @hif_ce_state: ce state
544 *
545 * This function will dump all tasklet entries info
546 *
547 * Return: None
548 */
ce_tasklet_entry_dump(struct HIF_CE_state * hif_ce_state)549 static void ce_tasklet_entry_dump(struct HIF_CE_state *hif_ce_state)
550 {
551 struct ce_tasklet_entry *tasklet_entry;
552 int i;
553
554 if (hif_ce_state) {
555 for (i = 0; i < CE_COUNT_MAX; i++) {
556 tasklet_entry = &hif_ce_state->tasklets[i];
557
558 hif_info("%02d: ce_id=%d, inited=%d, hi_tasklet_ce=%d hif_ce_state=%pK",
559 i,
560 tasklet_entry->ce_id,
561 tasklet_entry->inited,
562 tasklet_entry->hi_tasklet_ce,
563 tasklet_entry->hif_ce_state);
564 }
565 }
566 }
567
568 #define HIF_CE_DRAIN_WAIT_CNT 20
569 /**
570 * hif_drain_tasklets(): wait until no tasklet is pending
571 * @scn: hif context
572 *
573 * Let running tasklets clear pending traffic.
574 *
575 * Return: 0 if no bottom half is in progress when it returns.
576 * -EFAULT if it times out.
577 */
hif_drain_tasklets(struct hif_softc * scn)578 int hif_drain_tasklets(struct hif_softc *scn)
579 {
580 uint32_t ce_drain_wait_cnt = 0;
581 int32_t tasklet_cnt;
582
583 while ((tasklet_cnt = qdf_atomic_read(&scn->active_tasklet_cnt))) {
584 if (++ce_drain_wait_cnt > HIF_CE_DRAIN_WAIT_CNT) {
585 hif_err("CE still not done with access: %d",
586 tasklet_cnt);
587
588 return -EFAULT;
589 }
590 hif_info("Waiting for CE to finish access");
591 msleep(10);
592 }
593 return 0;
594 }
595
596 #ifdef WLAN_SUSPEND_RESUME_TEST
597 /**
598 * hif_interrupt_is_ut_resume(): Tests if an irq on the given copy engine should
599 * trigger a unit-test resume.
600 * @scn: The HIF context to operate on
601 * @ce_id: The copy engine Id from the originating interrupt
602 *
603 * Return: true if the raised irq should trigger a unit-test resume
604 */
hif_interrupt_is_ut_resume(struct hif_softc * scn,int ce_id)605 static bool hif_interrupt_is_ut_resume(struct hif_softc *scn, int ce_id)
606 {
607 int errno;
608 uint8_t wake_ce_id;
609
610 if (!hif_is_ut_suspended(scn))
611 return false;
612
613 /* ensure passed ce_id matches wake ce_id */
614 errno = hif_get_wake_ce_id(scn, &wake_ce_id);
615 if (errno) {
616 hif_err("Failed to get wake CE Id: %d", errno);
617 return false;
618 }
619
620 return ce_id == wake_ce_id;
621 }
622 #else
623 static inline bool
hif_interrupt_is_ut_resume(struct hif_softc * scn,int ce_id)624 hif_interrupt_is_ut_resume(struct hif_softc *scn, int ce_id)
625 {
626 return false;
627 }
628 #endif /* WLAN_SUSPEND_RESUME_TEST */
629
630 /**
631 * hif_snoc_interrupt_handler() - hif_snoc_interrupt_handler
632 * @irq: irq coming from kernel
633 * @context: context
634 *
635 * Return: N/A
636 */
hif_snoc_interrupt_handler(int irq,void * context)637 static irqreturn_t hif_snoc_interrupt_handler(int irq, void *context)
638 {
639 struct ce_tasklet_entry *tasklet_entry = context;
640 struct hif_softc *scn = HIF_GET_SOFTC(tasklet_entry->hif_ce_state);
641
642 return ce_dispatch_interrupt(pld_get_ce_id(scn->qdf_dev->dev, irq),
643 tasklet_entry);
644 }
645
646 /**
647 * hif_ce_increment_interrupt_count() - update ce stats
648 * @hif_ce_state: ce state
649 * @ce_id: ce id
650 *
651 * Return: none
652 */
653 static inline void
hif_ce_increment_interrupt_count(struct HIF_CE_state * hif_ce_state,int ce_id)654 hif_ce_increment_interrupt_count(struct HIF_CE_state *hif_ce_state, int ce_id)
655 {
656 int cpu_id = qdf_get_cpu();
657
658 hif_ce_state->stats.ce_per_cpu[ce_id][cpu_id]++;
659 }
660
661 /**
662 * hif_display_ce_stats() - display ce stats
663 * @hif_ctx: HIF context
664 *
665 * Return: none
666 */
hif_display_ce_stats(struct hif_softc * hif_ctx)667 void hif_display_ce_stats(struct hif_softc *hif_ctx)
668 {
669 #define STR_SIZE 128
670 uint8_t i, j, pos;
671 char str_buffer[STR_SIZE];
672 int size, ret;
673 struct HIF_CE_state *hif_ce_state = HIF_GET_CE_STATE(hif_ctx);
674
675 qdf_debug("CE interrupt statistics:");
676 for (i = 0; i < CE_COUNT_MAX; i++) {
677 size = STR_SIZE;
678 pos = 0;
679 for (j = 0; j < QDF_MAX_AVAILABLE_CPU; j++) {
680 ret = snprintf(str_buffer + pos, size, "[%d]:%d ",
681 j, hif_ce_state->stats.ce_per_cpu[i][j]);
682 if (ret <= 0 || ret >= size)
683 break;
684 size -= ret;
685 pos += ret;
686 }
687 qdf_debug("CE id[%2d] - %s", i, str_buffer);
688 }
689
690 if (hif_ctx->ce_latency_stats)
691 hif_ce_latency_stats(hif_ctx);
692 #undef STR_SIZE
693 }
694
695 /**
696 * hif_clear_ce_stats() - clear ce stats
697 * @hif_ce_state: ce state
698 *
699 * Return: none
700 */
hif_clear_ce_stats(struct HIF_CE_state * hif_ce_state)701 void hif_clear_ce_stats(struct HIF_CE_state *hif_ce_state)
702 {
703 qdf_mem_zero(&hif_ce_state->stats, sizeof(struct ce_stats));
704 }
705
706 #ifdef WLAN_TRACEPOINTS
707 /**
708 * hif_set_ce_tasklet_sched_time() - Set tasklet schedule time for
709 * CE with matching ce_id
710 * @scn: hif context
711 * @ce_id: CE id
712 *
713 * Return: None
714 */
715 static inline
hif_set_ce_tasklet_sched_time(struct hif_softc * scn,uint8_t ce_id)716 void hif_set_ce_tasklet_sched_time(struct hif_softc *scn, uint8_t ce_id)
717 {
718 struct CE_state *ce_state = scn->ce_id_to_state[ce_id];
719
720 ce_state->ce_tasklet_sched_time = qdf_time_sched_clock();
721 }
722 #else
723 static inline
hif_set_ce_tasklet_sched_time(struct hif_softc * scn,uint8_t ce_id)724 void hif_set_ce_tasklet_sched_time(struct hif_softc *scn, uint8_t ce_id)
725 {
726 }
727 #endif
728
729 /**
730 * hif_tasklet_schedule() - schedule tasklet
731 * @hif_ctx: hif context
732 * @tasklet_entry: ce tasklet entry
733 *
734 * Return: false if tasklet already scheduled, otherwise true
735 */
hif_tasklet_schedule(struct hif_opaque_softc * hif_ctx,struct ce_tasklet_entry * tasklet_entry)736 static inline bool hif_tasklet_schedule(struct hif_opaque_softc *hif_ctx,
737 struct ce_tasklet_entry *tasklet_entry)
738 {
739 struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
740
741 if (test_bit(TASKLET_STATE_SCHED, &tasklet_entry->intr_tq.state)) {
742 hif_debug("tasklet scheduled, return");
743 qdf_atomic_dec(&scn->active_tasklet_cnt);
744 return false;
745 }
746
747 hif_set_ce_tasklet_sched_time(scn, tasklet_entry->ce_id);
748 /* keep it before tasklet_schedule, this is to happy whunt.
749 * in whunt, tasklet may run before finished hif_tasklet_schedule.
750 */
751 hif_tasklet_latency_record_sched(scn, tasklet_entry->ce_id);
752 ce_tasklet_schedule(tasklet_entry);
753
754 hif_reset_ce_full_count(scn, tasklet_entry->ce_id);
755 if (scn->ce_latency_stats)
756 hif_record_tasklet_sched_entry_ts(scn, tasklet_entry->ce_id);
757
758 return true;
759 }
760
761 #ifdef WLAN_FEATURE_WMI_DIAG_OVER_CE7
762 #define CE_LOOP_MAX_COUNT 20
763 /**
764 * ce_poll_reap_by_id() - reap the available frames from CE by polling per ce_id
765 * @scn: hif context
766 * @ce_id: CE id
767 *
768 * This function needs to be called once after all the irqs are disabled
769 * and tasklets are drained during bus suspend.
770 *
771 * Return: 0 on success, unlikely -EBUSY if reaping goes infinite loop
772 */
ce_poll_reap_by_id(struct hif_softc * scn,enum ce_id_type ce_id)773 static int ce_poll_reap_by_id(struct hif_softc *scn, enum ce_id_type ce_id)
774 {
775 struct HIF_CE_state *hif_ce_state = (struct HIF_CE_state *)scn;
776 struct CE_state *CE_state = scn->ce_id_to_state[ce_id];
777 int i;
778
779 if (scn->ce_latency_stats)
780 hif_record_tasklet_exec_entry_ts(scn, ce_id);
781
782 hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_ENTRY,
783 NULL, NULL, -1, 0);
784
785 for (i = 0; i < CE_LOOP_MAX_COUNT; i++) {
786 ce_per_engine_service(scn, ce_id);
787
788 if (ce_check_rx_pending(CE_state))
789 hif_record_ce_desc_event(scn, ce_id,
790 HIF_CE_TASKLET_REAP_REPOLL,
791 NULL, NULL, -1, 0);
792 else
793 break;
794 }
795
796 /*
797 * In an unlikely case, if frames are still pending to reap,
798 * could be an infinite loop, so return -EBUSY.
799 */
800 if (ce_check_rx_pending(CE_state) &&
801 i == CE_LOOP_MAX_COUNT)
802 return -EBUSY;
803
804 hif_record_ce_desc_event(scn, ce_id, HIF_CE_REAP_EXIT,
805 NULL, NULL, -1, 0);
806
807 if (scn->ce_latency_stats)
808 ce_tasklet_update_bucket(hif_ce_state, ce_id);
809
810 return 0;
811 }
812
813 /**
814 * hif_drain_fw_diag_ce() - reap all the available FW diag logs from CE
815 * @scn: hif context
816 *
817 * This function needs to be called once after all the irqs are disabled
818 * and tasklets are drained during bus suspend.
819 *
820 * Return: 0 on success, unlikely -EBUSY if reaping goes infinite loop
821 */
hif_drain_fw_diag_ce(struct hif_softc * scn)822 int hif_drain_fw_diag_ce(struct hif_softc *scn)
823 {
824 uint8_t ce_id;
825 struct HIF_CE_state *hif_ce_state = (struct HIF_CE_state *)scn;
826 struct ce_tasklet_entry *tasklet_entry;
827
828 if (hif_get_fw_diag_ce_id(scn, &ce_id))
829 return 0;
830
831 tasklet_entry = &hif_ce_state->tasklets[ce_id];
832
833 /* If CE7 tasklet is triggered, no need to poll CE explicitly,
834 * CE7 SIRQ could reschedule until there is no pending entries
835 */
836 if (test_bit(TASKLET_STATE_SCHED, &tasklet_entry->intr_tq.state) ||
837 test_bit(TASKLET_STATE_RUN, &tasklet_entry->intr_tq.state))
838 return -EBUSY;
839
840 return ce_poll_reap_by_id(scn, ce_id);
841 }
842 #endif
843
844 #ifdef CE_TASKLET_SCHEDULE_ON_FULL
ce_check_tasklet_status(int ce_id,struct ce_tasklet_entry * entry)845 static inline int ce_check_tasklet_status(int ce_id,
846 struct ce_tasklet_entry *entry)
847 {
848 struct HIF_CE_state *hif_ce_state = entry->hif_ce_state;
849 struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state);
850 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
851
852 if (hif_napi_enabled(hif_hdl, ce_id)) {
853 struct qca_napi_info *napi;
854
855 napi = scn->napi_data.napis[ce_id];
856 if (test_bit(NAPI_STATE_SCHED, &napi->napi.state))
857 return -EBUSY;
858 } else {
859 if (test_bit(TASKLET_STATE_SCHED,
860 &hif_ce_state->tasklets[ce_id].intr_tq.state))
861 return -EBUSY;
862 }
863 return 0;
864 }
865
ce_interrupt_lock(struct CE_state * ce_state)866 static inline void ce_interrupt_lock(struct CE_state *ce_state)
867 {
868 qdf_spin_lock_irqsave(&ce_state->ce_interrupt_lock);
869 }
870
ce_interrupt_unlock(struct CE_state * ce_state)871 static inline void ce_interrupt_unlock(struct CE_state *ce_state)
872 {
873 qdf_spin_unlock_irqrestore(&ce_state->ce_interrupt_lock);
874 }
875 #else
ce_check_tasklet_status(int ce_id,struct ce_tasklet_entry * entry)876 static inline int ce_check_tasklet_status(int ce_id,
877 struct ce_tasklet_entry *entry)
878 {
879 return 0;
880 }
881
ce_interrupt_lock(struct CE_state * ce_state)882 static inline void ce_interrupt_lock(struct CE_state *ce_state)
883 {
884 }
885
ce_interrupt_unlock(struct CE_state * ce_state)886 static inline void ce_interrupt_unlock(struct CE_state *ce_state)
887 {
888 }
889 #endif
890
891 /**
892 * ce_dispatch_interrupt() - dispatch an interrupt to a processing context
893 * @ce_id: ce_id
894 * @tasklet_entry: context
895 *
896 * Return: N/A
897 */
ce_dispatch_interrupt(int ce_id,struct ce_tasklet_entry * tasklet_entry)898 irqreturn_t ce_dispatch_interrupt(int ce_id,
899 struct ce_tasklet_entry *tasklet_entry)
900 {
901 struct HIF_CE_state *hif_ce_state = tasklet_entry->hif_ce_state;
902 struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state);
903 struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
904 struct CE_state *ce_state;
905
906 if (tasklet_entry->ce_id != ce_id) {
907 bool rl;
908
909 rl = hif_err_rl("ce_id (expect %d, received %d) does not match, inited=%d, ce_count=%u",
910 tasklet_entry->ce_id, ce_id,
911 tasklet_entry->inited,
912 scn->ce_count);
913
914 if (!rl)
915 ce_tasklet_entry_dump(hif_ce_state);
916
917 return IRQ_NONE;
918 }
919 if (unlikely(ce_id >= CE_COUNT_MAX)) {
920 hif_err("ce_id=%d > CE_COUNT_MAX=%d",
921 tasklet_entry->ce_id, CE_COUNT_MAX);
922 return IRQ_NONE;
923 }
924
925 ce_state = scn->ce_id_to_state[ce_id];
926
927 ce_interrupt_lock(ce_state);
928 if (ce_check_tasklet_status(ce_id, tasklet_entry)) {
929 ce_interrupt_unlock(ce_state);
930 return IRQ_NONE;
931 }
932
933 if (!TARGET_REGISTER_ACCESS_ALLOWED(scn)) {
934 ce_interrupt_unlock(ce_state);
935 return IRQ_HANDLED;
936 }
937
938 hif_irq_disable(scn, ce_id);
939
940 hif_record_ce_desc_event(scn, ce_id, HIF_IRQ_EVENT,
941 NULL, NULL, 0, 0);
942 hif_ce_increment_interrupt_count(hif_ce_state, ce_id);
943
944 if (unlikely(hif_interrupt_is_ut_resume(scn, ce_id))) {
945 hif_ut_fw_resume(scn);
946 hif_irq_enable(scn, ce_id);
947 ce_interrupt_unlock(ce_state);
948 return IRQ_HANDLED;
949 }
950
951 qdf_atomic_inc(&scn->active_tasklet_cnt);
952
953 if (hif_napi_enabled(hif_hdl, ce_id))
954 hif_napi_schedule(hif_hdl, ce_id);
955 else
956 hif_tasklet_schedule(hif_hdl, tasklet_entry);
957
958 ce_interrupt_unlock(ce_state);
959
960 return IRQ_HANDLED;
961 }
962
963 const char *ce_name[CE_COUNT_MAX] = {
964 "WLAN_CE_0",
965 "WLAN_CE_1",
966 "WLAN_CE_2",
967 "WLAN_CE_3",
968 "WLAN_CE_4",
969 "WLAN_CE_5",
970 "WLAN_CE_6",
971 "WLAN_CE_7",
972 "WLAN_CE_8",
973 "WLAN_CE_9",
974 "WLAN_CE_10",
975 "WLAN_CE_11",
976 #ifdef QCA_WIFI_QCN9224
977 "WLAN_CE_12",
978 "WLAN_CE_13",
979 "WLAN_CE_14",
980 "WLAN_CE_15",
981 #endif
982 };
983 /**
984 * ce_unregister_irq() - ce_unregister_irq
985 * @hif_ce_state: hif_ce_state copy engine device handle
986 * @mask: which copy engines to unregister for.
987 *
988 * Unregisters copy engine irqs matching mask. If a 1 is set at bit x,
989 * unregister for copy engine x.
990 *
991 * Return: QDF_STATUS
992 */
ce_unregister_irq(struct HIF_CE_state * hif_ce_state,uint32_t mask)993 QDF_STATUS ce_unregister_irq(struct HIF_CE_state *hif_ce_state, uint32_t mask)
994 {
995 int id;
996 int ce_count;
997 int ret;
998 struct hif_softc *scn;
999
1000 if (!hif_ce_state) {
1001 hif_warn("hif_ce_state = NULL");
1002 return QDF_STATUS_SUCCESS;
1003 }
1004
1005 scn = HIF_GET_SOFTC(hif_ce_state);
1006 ce_count = scn->ce_count;
1007 /* we are removing interrupts, so better stop NAPI */
1008 ret = hif_napi_event(GET_HIF_OPAQUE_HDL(scn),
1009 NAPI_EVT_INT_STATE, (void *)0);
1010 if (ret != 0)
1011 hif_err("napi_event INT_STATE returned %d", ret);
1012 /* this is not fatal, continue */
1013
1014 /* filter mask to free only for ce's with irq registered */
1015 mask &= hif_ce_state->ce_register_irq_done;
1016 for (id = 0; id < ce_count; id++) {
1017 if ((mask & (1 << id)) && hif_ce_state->tasklets[id].inited) {
1018 ret = pld_ce_free_irq(scn->qdf_dev->dev, id,
1019 &hif_ce_state->tasklets[id]);
1020 if (ret < 0)
1021 hif_err(
1022 "pld_unregister_irq error - ce_id = %d, ret = %d",
1023 id, ret);
1024 }
1025 ce_disable_polling(scn->ce_id_to_state[id]);
1026 }
1027 hif_ce_state->ce_register_irq_done &= ~mask;
1028
1029 return QDF_STATUS_SUCCESS;
1030 }
1031 /**
1032 * ce_register_irq() - ce_register_irq
1033 * @hif_ce_state: hif_ce_state
1034 * @mask: which copy engines to unregister for.
1035 *
1036 * Registers copy engine irqs matching mask. If a 1 is set at bit x,
1037 * Register for copy engine x.
1038 *
1039 * Return: QDF_STATUS
1040 */
ce_register_irq(struct HIF_CE_state * hif_ce_state,uint32_t mask)1041 QDF_STATUS ce_register_irq(struct HIF_CE_state *hif_ce_state, uint32_t mask)
1042 {
1043 int id;
1044 int ce_count;
1045 int ret;
1046 unsigned long irqflags = IRQF_TRIGGER_RISING;
1047 uint32_t done_mask = 0;
1048 struct hif_softc *scn = HIF_GET_SOFTC(hif_ce_state);
1049
1050 ce_count = scn->ce_count;
1051
1052 for (id = 0; id < ce_count; id++) {
1053 if ((mask & (1 << id)) && hif_ce_state->tasklets[id].inited) {
1054 ret = pld_ce_request_irq(scn->qdf_dev->dev, id,
1055 hif_snoc_interrupt_handler,
1056 irqflags, ce_name[id],
1057 &hif_ce_state->tasklets[id]);
1058 if (ret) {
1059 hif_err(
1060 "cannot register CE %d irq handler, ret = %d",
1061 id, ret);
1062 ce_unregister_irq(hif_ce_state, done_mask);
1063 return QDF_STATUS_E_FAULT;
1064 }
1065 done_mask |= 1 << id;
1066 }
1067 }
1068 hif_ce_state->ce_register_irq_done |= done_mask;
1069
1070 return QDF_STATUS_SUCCESS;
1071 }
1072