1 /*
2 * Copyright (c) 2012-2021 The Linux Foundation. All rights reserved.
3 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for
6 * any purpose with or without fee is hereby granted, provided that the
7 * above copyright notice and this permission notice appear in all
8 * copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17 * PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 /*
21 *
22 * Permission to use, copy, modify, and/or distribute this software for any
23 * purpose with or without fee is hereby granted, provided that the above
24 * copyright notice and this permission notice appear in all copies.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
27 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
28 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
29 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
30 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
31 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
32 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
33 */
34
35 #ifndef REMOVE_PKT_LOG
36
37 #include "qdf_mem.h"
38 #include "athdefs.h"
39 #include "pktlog_ac_i.h"
40 #include "cds_api.h"
41 #include "wma_types.h"
42 #include "htc.h"
43 #include <cdp_txrx_cmn_struct.h>
44 #include <cdp_txrx_ctrl.h>
45 #ifdef PKTLOG_LEGACY
46 #include "pktlog_wifi2.h"
47 #else
48 #include "pktlog_wifi3.h"
49 #endif /* PKTLOG_LEGACY */
50
51 wdi_event_subscribe PKTLOG_TX_SUBSCRIBER;
52 wdi_event_subscribe PKTLOG_RX_SUBSCRIBER;
53 wdi_event_subscribe PKTLOG_RX_REMOTE_SUBSCRIBER;
54 wdi_event_subscribe PKTLOG_RCFIND_SUBSCRIBER;
55 wdi_event_subscribe PKTLOG_RCUPDATE_SUBSCRIBER;
56 wdi_event_subscribe PKTLOG_SW_EVENT_SUBSCRIBER;
57 wdi_event_subscribe PKTLOG_LITE_T2H_SUBSCRIBER;
58 wdi_event_subscribe PKTLOG_LITE_RX_SUBSCRIBER;
59 wdi_event_subscribe PKTLOG_OFFLOAD_SUBSCRIBER;
60 #ifdef BE_PKTLOG_SUPPORT
61 wdi_event_subscribe PKTLOG_HYBRID_SUBSCRIBER;
62 #endif
63
64 struct ol_pl_arch_dep_funcs ol_pl_funcs = {
65 .pktlog_init = pktlog_init,
66 .pktlog_enable = pktlog_enable,
67 .pktlog_setsize = pktlog_setsize,
68 .pktlog_disable = pktlog_disable, /* valid for f/w disable */
69 };
70
71 struct pktlog_dev_t pl_dev = {
72 .pl_funcs = &ol_pl_funcs,
73 };
74
pktlog_sethandle(struct pktlog_dev_t ** pl_handle,struct hif_opaque_softc * scn)75 void pktlog_sethandle(struct pktlog_dev_t **pl_handle,
76 struct hif_opaque_softc *scn)
77 {
78 pl_dev.scn = (ol_ath_generic_softc_handle) scn;
79 *pl_handle = &pl_dev;
80 }
81
pktlog_set_pdev_id(struct pktlog_dev_t * pl_dev,uint8_t pdev_id)82 void pktlog_set_pdev_id(struct pktlog_dev_t *pl_dev, uint8_t pdev_id)
83 {
84 pl_dev->pdev_id = pdev_id;
85 }
86
pktlog_set_callback_regtype(enum pktlog_callback_regtype callback_type)87 void pktlog_set_callback_regtype(
88 enum pktlog_callback_regtype callback_type)
89 {
90 struct pktlog_dev_t *pl_dev = get_pktlog_handle();
91
92 if (!pl_dev) {
93 qdf_print("Invalid pl_dev");
94 return;
95 }
96
97 pl_dev->callback_type = callback_type;
98 }
99
get_pktlog_handle(void)100 struct pktlog_dev_t *get_pktlog_handle(void)
101 {
102 uint8_t pdev_id = WMI_PDEV_ID_SOC;
103 void *soc = cds_get_context(QDF_MODULE_ID_SOC);
104
105 return cdp_get_pldev(soc, pdev_id);
106 }
107
pktlog_wma_post_msg(WMI_PKTLOG_EVENT event_types,WMI_CMD_ID cmd_id,bool ini_triggered,uint8_t user_triggered)108 static A_STATUS pktlog_wma_post_msg(WMI_PKTLOG_EVENT event_types,
109 WMI_CMD_ID cmd_id, bool ini_triggered,
110 uint8_t user_triggered)
111 {
112 struct scheduler_msg msg = { 0 };
113 QDF_STATUS status;
114 struct ath_pktlog_wmi_params *param;
115
116 param = qdf_mem_malloc(sizeof(struct ath_pktlog_wmi_params));
117
118 if (!param)
119 return A_NO_MEMORY;
120
121 param->cmd_id = cmd_id;
122 param->pktlog_event = event_types;
123 param->ini_triggered = ini_triggered;
124 param->user_triggered = user_triggered;
125
126 msg.type = WMA_PKTLOG_ENABLE_REQ;
127 msg.bodyptr = param;
128 msg.bodyval = 0;
129
130 status = scheduler_post_message(QDF_MODULE_ID_WMA,
131 QDF_MODULE_ID_WMA,
132 QDF_MODULE_ID_WMA, &msg);
133
134 if (status != QDF_STATUS_SUCCESS) {
135 qdf_mem_free(param);
136 return A_ERROR;
137 }
138
139 return A_OK;
140 }
141
142 static inline A_STATUS
pktlog_enable_tgt(struct hif_opaque_softc * _scn,uint32_t log_state,bool ini_triggered,uint8_t user_triggered)143 pktlog_enable_tgt(struct hif_opaque_softc *_scn, uint32_t log_state,
144 bool ini_triggered, uint8_t user_triggered)
145 {
146 uint32_t types = 0;
147
148 if (log_state & ATH_PKTLOG_TX)
149 types |= WMI_PKTLOG_EVENT_TX;
150
151 if (log_state & ATH_PKTLOG_RX)
152 types |= WMI_PKTLOG_EVENT_RX;
153
154 if (log_state & ATH_PKTLOG_RCFIND)
155 types |= WMI_PKTLOG_EVENT_RCF;
156
157 if (log_state & ATH_PKTLOG_RCUPDATE)
158 types |= WMI_PKTLOG_EVENT_RCU;
159
160 if (log_state & ATH_PKTLOG_SW_EVENT)
161 types |= WMI_PKTLOG_EVENT_SW;
162
163 QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
164 "%s: Pktlog events: %d", __func__, types);
165
166 return pktlog_wma_post_msg(types, WMI_PDEV_PKTLOG_ENABLE_CMDID,
167 ini_triggered, user_triggered);
168 }
169
170 #ifdef PKTLOG_LEGACY
171 /**
172 * wdi_pktlog_subscribe() - Subscribe pktlog callbacks
173 * @pdev_id: pdev id
174 * @log_state: Pktlog registration
175 *
176 * Return: zero on success, non-zero on failure
177 */
178 static inline A_STATUS
wdi_pktlog_subscribe(uint8_t pdev_id,int32_t log_state)179 wdi_pktlog_subscribe(uint8_t pdev_id, int32_t log_state)
180 {
181 void *soc = cds_get_context(QDF_MODULE_ID_SOC);
182
183 if (pdev_id < 0) {
184 qdf_print("Invalid pdev");
185 return A_ERROR;
186 }
187
188 if (log_state & ATH_PKTLOG_TX) {
189 if (cdp_wdi_event_sub(soc, pdev_id, &PKTLOG_TX_SUBSCRIBER,
190 WDI_EVENT_TX_STATUS)) {
191 return A_ERROR;
192 }
193 }
194 if (log_state & ATH_PKTLOG_RX) {
195 if (cdp_wdi_event_sub(soc, pdev_id, &PKTLOG_RX_SUBSCRIBER,
196 WDI_EVENT_RX_DESC)) {
197 return A_ERROR;
198 }
199 if (cdp_wdi_event_sub(soc, pdev_id,
200 &PKTLOG_RX_REMOTE_SUBSCRIBER,
201 WDI_EVENT_RX_DESC_REMOTE)) {
202 return A_ERROR;
203 }
204 }
205 if (log_state & ATH_PKTLOG_RCFIND) {
206 if (cdp_wdi_event_sub(soc, pdev_id,
207 &PKTLOG_RCFIND_SUBSCRIBER,
208 WDI_EVENT_RATE_FIND)) {
209 return A_ERROR;
210 }
211 }
212 if (log_state & ATH_PKTLOG_RCUPDATE) {
213 if (cdp_wdi_event_sub(soc, pdev_id,
214 &PKTLOG_RCUPDATE_SUBSCRIBER,
215 WDI_EVENT_RATE_UPDATE)) {
216 return A_ERROR;
217 }
218 }
219 if (log_state & ATH_PKTLOG_SW_EVENT) {
220 if (cdp_wdi_event_sub(soc, pdev_id,
221 &PKTLOG_SW_EVENT_SUBSCRIBER,
222 WDI_EVENT_SW_EVENT)) {
223 return A_ERROR;
224 }
225 }
226
227 return A_OK;
228 }
229 #else
230 static inline A_STATUS
wdi_pktlog_subscribe(uint8_t pdev_id,int32_t log_state)231 wdi_pktlog_subscribe(uint8_t pdev_id, int32_t log_state)
232 {
233 void *soc = cds_get_context(QDF_MODULE_ID_SOC);
234
235 if (pdev_id < 0) {
236 qdf_print("Invalid pdev");
237 return A_ERROR;
238 }
239
240 if ((log_state & ATH_PKTLOG_TX) ||
241 (log_state & ATH_PKTLOG_RCFIND) ||
242 (log_state & ATH_PKTLOG_RCUPDATE) ||
243 (log_state & ATH_PKTLOG_SW_EVENT)) {
244 if (cdp_wdi_event_sub(soc,
245 pdev_id,
246 &PKTLOG_OFFLOAD_SUBSCRIBER,
247 WDI_EVENT_OFFLOAD_ALL)) {
248 return A_ERROR;
249 }
250 }
251
252 if (log_state & ATH_PKTLOG_RX) {
253 if (cdp_wdi_event_sub(soc, pdev_id,
254 &PKTLOG_RX_SUBSCRIBER,
255 WDI_EVENT_RX_DESC)) {
256 return A_ERROR;
257 }
258 }
259
260 if (log_state & ATH_PKTLOG_SW_EVENT) {
261 if (cdp_wdi_event_sub(soc, pdev_id,
262 &PKTLOG_SW_EVENT_SUBSCRIBER,
263 WDI_EVENT_SW_EVENT)) {
264 return A_ERROR;
265 }
266 }
267
268 if (log_state & ATH_PKTLOG_LITE_T2H) {
269 if (cdp_wdi_event_sub(soc, pdev_id,
270 &PKTLOG_LITE_T2H_SUBSCRIBER,
271 WDI_EVENT_LITE_T2H)) {
272 return A_ERROR;
273 }
274 }
275
276 if (log_state & ATH_PKTLOG_LITE_RX) {
277 if (cdp_wdi_event_sub(soc, pdev_id,
278 &PKTLOG_LITE_RX_SUBSCRIBER,
279 WDI_EVENT_LITE_RX)) {
280 return A_ERROR;
281 }
282 }
283
284 return A_OK;
285 }
286 #endif
287
pktlog_callback(void * pdev,enum WDI_EVENT event,void * log_data,u_int16_t peer_id,uint32_t status)288 void pktlog_callback(void *pdev, enum WDI_EVENT event, void *log_data,
289 u_int16_t peer_id, uint32_t status)
290 {
291 switch (event) {
292 case WDI_EVENT_OFFLOAD_ALL:
293 {
294 if (process_offload_pktlog_wifi3(pdev, log_data)) {
295 qdf_print("Unable to process offload info");
296 return;
297 }
298 break;
299 }
300 case WDI_EVENT_TX_STATUS:
301 {
302 /*
303 * process TX message
304 */
305 if (process_tx_info(pdev, log_data)) {
306 qdf_print("Unable to process TX info");
307 return;
308 }
309 break;
310 }
311 case WDI_EVENT_RX_DESC:
312 {
313 /*
314 * process RX message for local frames
315 */
316 if (process_rx_info(pdev, log_data)) {
317 qdf_print("Unable to process RX info");
318 return;
319 }
320 break;
321 }
322 case WDI_EVENT_RX_DESC_REMOTE:
323 {
324 /*
325 * process RX message for remote frames
326 */
327 if (process_rx_info_remote(pdev, log_data)) {
328 qdf_print("Unable to process RX info");
329 return;
330 }
331 break;
332 }
333 case WDI_EVENT_RATE_FIND:
334 {
335 /*
336 * process RATE_FIND message
337 */
338 if (process_rate_find(pdev, log_data)) {
339 qdf_print("Unable to process RC_FIND info");
340 return;
341 }
342 break;
343 }
344 case WDI_EVENT_RATE_UPDATE:
345 {
346 /*
347 * process RATE_UPDATE message
348 */
349 if (process_rate_update(pdev, log_data)) {
350 qdf_print("Unable to process RC_UPDATE");
351 return;
352 }
353 break;
354 }
355 case WDI_EVENT_SW_EVENT:
356 {
357 /*
358 * process SW EVENT message
359 */
360 if (process_sw_event(pdev, log_data)) {
361 qdf_print("Unable to process SW_EVENT");
362 return;
363 }
364 break;
365 }
366 default:
367 break;
368 }
369 }
370
371 void
lit_pktlog_callback(void * context,enum WDI_EVENT event,void * log_data,u_int16_t peer_id,uint32_t status)372 lit_pktlog_callback(void *context, enum WDI_EVENT event, void *log_data,
373 u_int16_t peer_id, uint32_t status)
374 {
375 switch (event) {
376 case WDI_EVENT_RX_DESC:
377 {
378 if (process_rx_desc_remote_wifi3(context, log_data)) {
379 qdf_print("Unable to process RX info");
380 return;
381 }
382 break;
383 }
384 case WDI_EVENT_LITE_T2H:
385 {
386 if (process_pktlog_lite_wifi3(context, log_data,
387 PKTLOG_TYPE_LITE_T2H)) {
388 qdf_print("Unable to process lite_t2h");
389 return;
390 }
391 break;
392 }
393 case WDI_EVENT_LITE_RX:
394 {
395 if (process_pktlog_lite_wifi3(context, log_data,
396 PKTLOG_TYPE_LITE_RX)) {
397 qdf_print("Unable to process lite_rx");
398 return;
399 }
400 break;
401 }
402 default:
403 break;
404 }
405 }
406
407 #ifdef PKTLOG_LEGACY
408 A_STATUS
wdi_pktlog_unsubscribe(uint8_t pdev_id,uint32_t log_state)409 wdi_pktlog_unsubscribe(uint8_t pdev_id, uint32_t log_state)
410 {
411 void *soc = cds_get_context(QDF_MODULE_ID_SOC);
412 /* TODO: WIN implementation to get soc */
413
414 if (log_state & ATH_PKTLOG_TX) {
415 if (cdp_wdi_event_unsub(soc, pdev_id,
416 &PKTLOG_TX_SUBSCRIBER,
417 WDI_EVENT_TX_STATUS)) {
418 return A_ERROR;
419 }
420 }
421 if (log_state & ATH_PKTLOG_RX) {
422 if (cdp_wdi_event_unsub(soc, pdev_id,
423 &PKTLOG_RX_SUBSCRIBER,
424 WDI_EVENT_RX_DESC)) {
425 return A_ERROR;
426 }
427 if (cdp_wdi_event_unsub(soc, pdev_id,
428 &PKTLOG_RX_REMOTE_SUBSCRIBER,
429 WDI_EVENT_RX_DESC_REMOTE)) {
430 return A_ERROR;
431 }
432 }
433
434 if (log_state & ATH_PKTLOG_RCFIND) {
435 if (cdp_wdi_event_unsub(soc, pdev_id,
436 &PKTLOG_RCFIND_SUBSCRIBER,
437 WDI_EVENT_RATE_FIND)) {
438 return A_ERROR;
439 }
440 }
441 if (log_state & ATH_PKTLOG_RCUPDATE) {
442 if (cdp_wdi_event_unsub(soc, pdev_id,
443 &PKTLOG_RCUPDATE_SUBSCRIBER,
444 WDI_EVENT_RATE_UPDATE)) {
445 return A_ERROR;
446 }
447 }
448 if (log_state & ATH_PKTLOG_RCUPDATE) {
449 if (cdp_wdi_event_unsub(soc, pdev_id,
450 &PKTLOG_SW_EVENT_SUBSCRIBER,
451 WDI_EVENT_SW_EVENT)) {
452 return A_ERROR;
453 }
454 }
455
456 return A_OK;
457 }
458 #else
459 A_STATUS
wdi_pktlog_unsubscribe(uint8_t pdev_id,uint32_t log_state)460 wdi_pktlog_unsubscribe(uint8_t pdev_id, uint32_t log_state)
461 {
462 void *soc = cds_get_context(QDF_MODULE_ID_SOC);
463
464 if ((log_state & ATH_PKTLOG_TX) ||
465 (log_state & ATH_PKTLOG_RCFIND) ||
466 (log_state & ATH_PKTLOG_RCUPDATE) ||
467 (log_state & ATH_PKTLOG_SW_EVENT)) {
468 if (cdp_wdi_event_unsub(soc,
469 pdev_id,
470 &PKTLOG_OFFLOAD_SUBSCRIBER,
471 WDI_EVENT_OFFLOAD_ALL)) {
472 return A_ERROR;
473 }
474 }
475 if (log_state & ATH_PKTLOG_RX) {
476 if (cdp_wdi_event_unsub(soc, pdev_id,
477 &PKTLOG_RX_SUBSCRIBER,
478 WDI_EVENT_RX_DESC)) {
479 return A_ERROR;
480 }
481 }
482 if (log_state & ATH_PKTLOG_LITE_T2H) {
483 if (cdp_wdi_event_unsub(soc, pdev_id,
484 &PKTLOG_LITE_T2H_SUBSCRIBER,
485 WDI_EVENT_LITE_T2H)) {
486 return A_ERROR;
487 }
488 }
489 if (log_state & ATH_PKTLOG_LITE_RX) {
490 if (cdp_wdi_event_unsub(soc, pdev_id,
491 &PKTLOG_LITE_RX_SUBSCRIBER,
492 WDI_EVENT_LITE_RX)) {
493 return A_ERROR;
494 }
495 }
496
497 return A_OK;
498 }
499 #endif
500
pktlog_disable(struct hif_opaque_softc * scn)501 int pktlog_disable(struct hif_opaque_softc *scn)
502 {
503 struct pktlog_dev_t *pl_dev;
504 struct ath_pktlog_info *pl_info;
505 uint8_t save_pktlog_state;
506 uint8_t pdev_id = WMI_PDEV_ID_SOC;
507
508 pl_dev = get_pktlog_handle();
509
510 if (!pl_dev) {
511 qdf_print("Invalid pl_dev");
512 return -EINVAL;
513 }
514
515 pl_info = pl_dev->pl_info;
516
517 if (!pl_dev->pl_info) {
518 qdf_print("Invalid pl_info");
519 return -EINVAL;
520 }
521
522 if (pdev_id < 0) {
523 qdf_print("Invalid pdev");
524 return -EINVAL;
525 }
526
527 if (pl_info->curr_pkt_state == PKTLOG_OPR_IN_PROGRESS ||
528 pl_info->curr_pkt_state ==
529 PKTLOG_OPR_IN_PROGRESS_READ_START_PKTLOG_DISABLED ||
530 pl_info->curr_pkt_state == PKTLOG_OPR_IN_PROGRESS_READ_COMPLETE ||
531 pl_info->curr_pkt_state ==
532 PKTLOG_OPR_IN_PROGRESS_CLEARBUFF_COMPLETE)
533 return -EBUSY;
534
535 save_pktlog_state = pl_info->curr_pkt_state;
536 pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS;
537
538 if (pktlog_wma_post_msg(0, WMI_PDEV_PKTLOG_DISABLE_CMDID, 0, 0)) {
539 pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
540 qdf_print("Failed to disable pktlog in target");
541 return -EINVAL;
542 }
543
544 if (pl_dev->is_pktlog_cb_subscribed &&
545 wdi_pktlog_unsubscribe(pdev_id, pl_info->log_state)) {
546 pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
547 qdf_print("Cannot unsubscribe pktlog from the WDI");
548 return -EINVAL;
549 }
550 pl_dev->is_pktlog_cb_subscribed = false;
551 if (save_pktlog_state == PKTLOG_OPR_IN_PROGRESS_READ_START)
552 pl_info->curr_pkt_state =
553 PKTLOG_OPR_IN_PROGRESS_READ_START_PKTLOG_DISABLED;
554 else
555 pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
556 return 0;
557 }
558
559 #ifdef PKTLOG_LEGACY
560 /**
561 * pktlog_callback_registration() - Register pktlog handlers based on
562 * on callback type
563 * @callback_type: pktlog full or lite registration
564 *
565 * Return: None
566 */
pktlog_callback_registration(uint8_t callback_type)567 static void pktlog_callback_registration(uint8_t callback_type)
568 {
569 if (callback_type == PKTLOG_DEFAULT_CALLBACK_REGISTRATION) {
570 PKTLOG_TX_SUBSCRIBER.callback = pktlog_callback;
571 PKTLOG_RX_SUBSCRIBER.callback = pktlog_callback;
572 PKTLOG_RX_REMOTE_SUBSCRIBER.callback = pktlog_callback;
573 PKTLOG_RCFIND_SUBSCRIBER.callback = pktlog_callback;
574 PKTLOG_RCUPDATE_SUBSCRIBER.callback = pktlog_callback;
575 PKTLOG_SW_EVENT_SUBSCRIBER.callback = pktlog_callback;
576 }
577 }
578 #else
pktlog_callback_registration(uint8_t callback_type)579 static void pktlog_callback_registration(uint8_t callback_type)
580 {
581 if (callback_type == PKTLOG_DEFAULT_CALLBACK_REGISTRATION) {
582 PKTLOG_RX_SUBSCRIBER.callback = lit_pktlog_callback;
583 PKTLOG_LITE_T2H_SUBSCRIBER.callback = lit_pktlog_callback;
584 PKTLOG_OFFLOAD_SUBSCRIBER.callback = pktlog_callback;
585 } else if (callback_type == PKTLOG_LITE_CALLBACK_REGISTRATION) {
586 PKTLOG_LITE_T2H_SUBSCRIBER.callback = lit_pktlog_callback;
587 PKTLOG_LITE_RX_SUBSCRIBER.callback = lit_pktlog_callback;
588 }
589 }
590 #endif
591
592 #define ONE_MEGABYTE (1024 * 1024)
593
pktlog_init(struct hif_opaque_softc * scn)594 void pktlog_init(struct hif_opaque_softc *scn)
595 {
596 struct pktlog_dev_t *pl_dev = get_pktlog_handle();
597 struct ath_pktlog_info *pl_info;
598 void *soc = cds_get_context(QDF_MODULE_ID_SOC);
599 uint32_t buff_size;
600
601 if (!pl_dev || !pl_dev->pl_info) {
602 qdf_print("pl_dev or pl_info is invalid");
603 return;
604 }
605
606 pl_info = pl_dev->pl_info;
607
608 OS_MEMZERO(pl_info, sizeof(*pl_info));
609 PKTLOG_LOCK_INIT(pl_info);
610 mutex_init(&pl_info->pktlog_mutex);
611
612 buff_size = cdp_cfg_get(soc, cfg_dp_pktlog_buffer_size) * ONE_MEGABYTE;
613
614 pl_info->buf_size = (buff_size ? buff_size : ONE_MEGABYTE);
615 pl_info->buf = NULL;
616 pl_info->log_state = 0;
617 pl_info->init_saved_state = 0;
618 pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
619 pl_info->sack_thr = PKTLOG_DEFAULT_SACK_THR;
620 pl_info->tail_length = PKTLOG_DEFAULT_TAIL_LENGTH;
621 pl_info->thruput_thresh = PKTLOG_DEFAULT_THRUPUT_THRESH;
622 pl_info->per_thresh = PKTLOG_DEFAULT_PER_THRESH;
623 pl_info->phyerr_thresh = PKTLOG_DEFAULT_PHYERR_THRESH;
624 pl_info->trigger_interval = PKTLOG_DEFAULT_TRIGGER_INTERVAL;
625 pl_info->pktlen = 0;
626 pl_info->start_time_thruput = 0;
627 pl_info->start_time_per = 0;
628 pl_dev->vendor_cmd_send = false;
629
630 pktlog_callback_registration(pl_dev->callback_type);
631 }
632
__pktlog_enable(struct hif_opaque_softc * scn,int32_t log_state,bool ini_triggered,uint8_t user_triggered,uint32_t is_iwpriv_command)633 int __pktlog_enable(struct hif_opaque_softc *scn, int32_t log_state,
634 bool ini_triggered, uint8_t user_triggered,
635 uint32_t is_iwpriv_command)
636 {
637 struct pktlog_dev_t *pl_dev;
638 struct ath_pktlog_info *pl_info;
639 uint8_t pdev_id;
640 int error;
641
642 if (!scn) {
643 qdf_print("Invalid scn context");
644 ASSERT(0);
645 return -EINVAL;
646 }
647
648 pl_dev = get_pktlog_handle();
649 if (!pl_dev) {
650 qdf_print("Invalid pktlog context");
651 ASSERT(0);
652 return -EINVAL;
653 }
654
655 pdev_id = WMI_PDEV_ID_SOC;
656 if (pdev_id < 0) {
657 qdf_print("Invalid txrx context");
658 ASSERT(0);
659 return -EINVAL;
660 }
661
662 pl_info = pl_dev->pl_info;
663 if (!pl_info) {
664 qdf_print("Invalid pl_info context");
665 ASSERT(0);
666 return -EINVAL;
667 }
668
669 if (pl_info->curr_pkt_state < PKTLOG_OPR_IN_PROGRESS_CLEARBUFF_COMPLETE)
670 return -EBUSY;
671
672 pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS;
673 /* is_iwpriv_command : 0 indicates its a vendor command
674 * log_state: 0 indicates pktlog disable command
675 * vendor_cmd_send flag; false means no vendor pktlog enable
676 * command was sent previously
677 */
678 if (is_iwpriv_command == 0 && log_state == 0 &&
679 pl_dev->vendor_cmd_send == false) {
680 pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
681 qdf_print("pktlog operation not in progress");
682 return 0;
683 }
684
685 if (!pl_dev->tgt_pktlog_alloced) {
686 if (!pl_info->buf) {
687 error = pktlog_alloc_buf(scn);
688
689 if (error != 0) {
690 pl_info->curr_pkt_state =
691 PKTLOG_OPR_NOT_IN_PROGRESS;
692 qdf_print("pktlog buff alloc failed");
693 return -ENOMEM;
694 }
695
696 if (!pl_info->buf) {
697 pl_info->curr_pkt_state =
698 PKTLOG_OPR_NOT_IN_PROGRESS;
699 qdf_print("pktlog buf alloc failed");
700 ASSERT(0);
701 return -ENOMEM;
702 }
703
704 }
705
706 qdf_spin_lock_bh(&pl_info->log_lock);
707 pl_info->buf->bufhdr.version = CUR_PKTLOG_VER;
708 pl_info->buf->bufhdr.magic_num = PKTLOG_MAGIC_NUM;
709 pl_info->buf->wr_offset = 0;
710 pl_info->buf->rd_offset = -1;
711 /* These below variables are used by per packet stats*/
712 pl_info->buf->bytes_written = 0;
713 pl_info->buf->msg_index = 1;
714 pl_info->buf->offset = PKTLOG_READ_OFFSET;
715 qdf_spin_unlock_bh(&pl_info->log_lock);
716
717 pl_info->start_time_thruput = os_get_timestamp();
718 pl_info->start_time_per = pl_info->start_time_thruput;
719
720 pl_dev->tgt_pktlog_alloced = true;
721 }
722 if (log_state != 0) {
723 /* WDI subscribe */
724 if (!pl_dev->is_pktlog_cb_subscribed) {
725 error = wdi_pktlog_subscribe(pdev_id, log_state);
726 if (error) {
727 pl_info->curr_pkt_state =
728 PKTLOG_OPR_NOT_IN_PROGRESS;
729 qdf_print("Unable to subscribe to the WDI");
730 return -EINVAL;
731 }
732 } else {
733 pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
734 qdf_print("Unable to subscribe %d to the WDI",
735 log_state);
736 return -EINVAL;
737 }
738 /* WMI command to enable pktlog on the firmware */
739 if (pktlog_enable_tgt(scn, log_state, ini_triggered,
740 user_triggered)) {
741 pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
742 qdf_print("Device cannot be enabled");
743 return -EINVAL;
744 }
745 pl_dev->is_pktlog_cb_subscribed = true;
746
747 if (is_iwpriv_command == 0)
748 pl_dev->vendor_cmd_send = true;
749 } else {
750 pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
751 pl_dev->pl_funcs->pktlog_disable(scn);
752 if (is_iwpriv_command == 0)
753 pl_dev->vendor_cmd_send = false;
754 }
755
756 pl_info->log_state = log_state;
757 pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
758 return 0;
759 }
760
pktlog_enable(struct hif_opaque_softc * scn,int32_t log_state,bool ini_triggered,uint8_t user_triggered,uint32_t is_iwpriv_command)761 int pktlog_enable(struct hif_opaque_softc *scn, int32_t log_state,
762 bool ini_triggered, uint8_t user_triggered,
763 uint32_t is_iwpriv_command)
764 {
765 struct pktlog_dev_t *pl_dev;
766 struct ath_pktlog_info *pl_info;
767 int err;
768
769 pl_dev = get_pktlog_handle();
770
771 if (!pl_dev) {
772 qdf_print("Invalid pl_dev handle");
773 return -EINVAL;
774 }
775
776 pl_info = pl_dev->pl_info;
777
778 if (!pl_info) {
779 qdf_print("Invalid pl_info handle");
780 return -EINVAL;
781 }
782
783 mutex_lock(&pl_info->pktlog_mutex);
784 err = __pktlog_enable(scn, log_state, ini_triggered,
785 user_triggered, is_iwpriv_command);
786 mutex_unlock(&pl_info->pktlog_mutex);
787 return err;
788 }
789
__pktlog_setsize(struct hif_opaque_softc * scn,int32_t size)790 static int __pktlog_setsize(struct hif_opaque_softc *scn, int32_t size)
791 {
792 struct pktlog_dev_t *pl_dev;
793 struct ath_pktlog_info *pl_info;
794 uint8_t pdev_id = WMI_PDEV_ID_SOC;
795 void *soc = cds_get_context(QDF_MODULE_ID_SOC);
796 uint32_t buff_size;
797 uint32_t max_allowed_buff_size;
798
799 pl_dev = get_pktlog_handle();
800
801 if (!pl_dev) {
802 qdf_print("Invalid pl_dev handle");
803 return -EINVAL;
804 }
805
806 pl_info = pl_dev->pl_info;
807
808 if (!pl_info) {
809 qdf_print("Invalid pl_dev handle");
810 return -EINVAL;
811 }
812
813 if (pdev_id < 0) {
814 qdf_print("Invalid pdev");
815 return -EINVAL;
816 }
817
818 if (pl_info->curr_pkt_state < PKTLOG_OPR_NOT_IN_PROGRESS) {
819 qdf_print("pktlog is not configured");
820 return -EBUSY;
821 }
822
823 pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS;
824
825 buff_size = cdp_cfg_get(soc, cfg_dp_pktlog_buffer_size) * ONE_MEGABYTE;
826 max_allowed_buff_size = (buff_size ? buff_size : ONE_MEGABYTE);
827
828 if (size < ONE_MEGABYTE || size > max_allowed_buff_size) {
829 qdf_print("Cannot Set Pktlog Buffer size of %d bytes.Min required is %d MB and Max allowed is %d MB",
830 size, (ONE_MEGABYTE / ONE_MEGABYTE),
831 (max_allowed_buff_size / ONE_MEGABYTE));
832 pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
833 qdf_print("Invalid requested buff size");
834 return -EINVAL;
835 }
836
837 if (size == pl_info->buf_size) {
838 pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
839 qdf_print("Pktlog Buff Size is already of same size");
840 return 0;
841 }
842
843 if (pl_info->log_state) {
844 pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
845 qdf_print("Logging should be disabled before changing buffer size");
846 return -EINVAL;
847 }
848
849 qdf_spin_lock_bh(&pl_info->log_lock);
850 if (pl_info->buf) {
851 if (pl_dev->is_pktlog_cb_subscribed &&
852 wdi_pktlog_unsubscribe(pdev_id, pl_info->log_state)) {
853 pl_info->curr_pkt_state =
854 PKTLOG_OPR_NOT_IN_PROGRESS;
855 qdf_spin_unlock_bh(&pl_info->log_lock);
856 qdf_print("Cannot unsubscribe pktlog from the WDI");
857 return -EFAULT;
858 }
859 pktlog_release_buf(scn);
860 pl_dev->is_pktlog_cb_subscribed = false;
861 pl_dev->tgt_pktlog_alloced = false;
862 }
863
864 if (size != 0) {
865 qdf_print("New Pktlog Buff Size is %d", size);
866 pl_info->buf_size = size;
867 }
868 pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
869 qdf_spin_unlock_bh(&pl_info->log_lock);
870 return 0;
871 }
872
pktlog_setsize(struct hif_opaque_softc * scn,int32_t size)873 int pktlog_setsize(struct hif_opaque_softc *scn, int32_t size)
874 {
875 struct pktlog_dev_t *pl_dev;
876 struct ath_pktlog_info *pl_info;
877 int status;
878
879 pl_dev = get_pktlog_handle();
880
881 if (!pl_dev) {
882 qdf_print("Invalid pl_dev handle");
883 return -EINVAL;
884 }
885
886 pl_info = pl_dev->pl_info;
887
888 if (!pl_info) {
889 qdf_print("Invalid pl_dev handle");
890 return -EINVAL;
891 }
892
893 mutex_lock(&pl_info->pktlog_mutex);
894 status = __pktlog_setsize(scn, size);
895 mutex_unlock(&pl_info->pktlog_mutex);
896
897 return status;
898 }
899
pktlog_clearbuff(struct hif_opaque_softc * scn,bool clear_buff)900 int pktlog_clearbuff(struct hif_opaque_softc *scn, bool clear_buff)
901 {
902 struct pktlog_dev_t *pl_dev;
903 struct ath_pktlog_info *pl_info;
904 uint8_t save_pktlog_state;
905
906 pl_dev = get_pktlog_handle();
907
908 if (!pl_dev) {
909 qdf_print("Invalid pl_dev handle");
910 return -EINVAL;
911 }
912
913 pl_info = pl_dev->pl_info;
914
915 if (!pl_info) {
916 qdf_print("Invalid pl_dev handle");
917 return -EINVAL;
918 }
919
920 if (!clear_buff)
921 return -EINVAL;
922
923 if (pl_info->curr_pkt_state < PKTLOG_OPR_IN_PROGRESS_READ_COMPLETE ||
924 pl_info->curr_pkt_state ==
925 PKTLOG_OPR_IN_PROGRESS_CLEARBUFF_COMPLETE)
926 return -EBUSY;
927
928 save_pktlog_state = pl_info->curr_pkt_state;
929 pl_info->curr_pkt_state = PKTLOG_OPR_IN_PROGRESS;
930
931 if (pl_info->log_state) {
932 pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
933 qdf_print("Logging should be disabled before clearing pktlog buffer");
934 return -EINVAL;
935 }
936
937 if (pl_info->buf) {
938 if (pl_info->buf_size > 0) {
939 qdf_debug("pktlog buffer is cleared");
940 memset(pl_info->buf, 0, pl_info->buf_size);
941 pl_dev->is_pktlog_cb_subscribed = false;
942 pl_dev->tgt_pktlog_alloced = false;
943 pl_info->buf->rd_offset = -1;
944 } else {
945 pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
946 qdf_print("pktlog buffer size is not proper. "
947 "Existing Buf size %d",
948 pl_info->buf_size);
949 return -EFAULT;
950 }
951 } else {
952 pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
953 qdf_print("pktlog buff is NULL");
954 return -EFAULT;
955 }
956
957 if (save_pktlog_state == PKTLOG_OPR_IN_PROGRESS_READ_COMPLETE)
958 pl_info->curr_pkt_state =
959 PKTLOG_OPR_IN_PROGRESS_CLEARBUFF_COMPLETE;
960 else
961 pl_info->curr_pkt_state = PKTLOG_OPR_NOT_IN_PROGRESS;
962
963 return 0;
964 }
965
pktlog_process_fw_msg(uint8_t pdev_id,uint32_t * buff,uint32_t len)966 void pktlog_process_fw_msg(uint8_t pdev_id, uint32_t *buff, uint32_t len)
967 {
968 uint32_t *pl_hdr;
969 uint32_t log_type;
970 struct ol_fw_data pl_fw_data;
971
972 if (pdev_id == OL_TXRX_INVALID_PDEV_ID) {
973 qdf_print("txrx pdev_id is invalid");
974 return;
975 }
976 pl_hdr = buff;
977 pl_fw_data.data = pl_hdr;
978 pl_fw_data.len = len;
979
980 log_type =
981 (*(pl_hdr + 1) & ATH_PKTLOG_HDR_LOG_TYPE_MASK) >>
982 ATH_PKTLOG_HDR_LOG_TYPE_SHIFT;
983
984 if ((log_type == PKTLOG_TYPE_TX_CTRL)
985 || (log_type == PKTLOG_TYPE_TX_STAT)
986 || (log_type == PKTLOG_TYPE_TX_MSDU_ID)
987 || (log_type == PKTLOG_TYPE_TX_FRM_HDR)
988 || (log_type == PKTLOG_TYPE_TX_VIRT_ADDR))
989 wdi_event_handler(WDI_EVENT_TX_STATUS,
990 pdev_id, &pl_fw_data);
991 else if (log_type == PKTLOG_TYPE_RC_FIND)
992 wdi_event_handler(WDI_EVENT_RATE_FIND,
993 pdev_id, &pl_fw_data);
994 else if (log_type == PKTLOG_TYPE_RC_UPDATE)
995 wdi_event_handler(WDI_EVENT_RATE_UPDATE,
996 pdev_id, &pl_fw_data);
997 else if (log_type == PKTLOG_TYPE_RX_STAT)
998 wdi_event_handler(WDI_EVENT_RX_DESC,
999 pdev_id, &pl_fw_data);
1000 else if (log_type == PKTLOG_TYPE_SW_EVENT)
1001 wdi_event_handler(WDI_EVENT_SW_EVENT,
1002 pdev_id, &pl_fw_data);
1003 }
1004
1005 #if defined(QCA_WIFI_3_0_ADRASTEA)
pktlog_nbuf_check_sanity(qdf_nbuf_t nbuf)1006 static inline int pktlog_nbuf_check_sanity(qdf_nbuf_t nbuf)
1007 {
1008 int rc = 0; /* sane */
1009
1010 if ((!nbuf) ||
1011 (nbuf->data < nbuf->head) ||
1012 ((nbuf->data + skb_headlen(nbuf)) > skb_end_pointer(nbuf)))
1013 rc = -EINVAL;
1014
1015 return rc;
1016 }
1017 /**
1018 * pktlog_t2h_msg_handler() - Target to host message handler
1019 * @context: pdev context
1020 * @pkt: HTC packet
1021 *
1022 * Return: None
1023 */
pktlog_t2h_msg_handler(void * context,HTC_PACKET * pkt)1024 static void pktlog_t2h_msg_handler(void *context, HTC_PACKET *pkt)
1025 {
1026 struct pktlog_dev_t *pdev = (struct pktlog_dev_t *)context;
1027 qdf_nbuf_t pktlog_t2h_msg = (qdf_nbuf_t) pkt->pPktContext;
1028 uint32_t *msg_word;
1029 uint32_t msg_len;
1030
1031 /* check for sanity of the packet, have seen corrupted pkts */
1032 if (pktlog_nbuf_check_sanity(pktlog_t2h_msg)) {
1033 qdf_print("packet 0x%pK corrupted? Leaking...",
1034 pktlog_t2h_msg);
1035 /* do not free; may crash! */
1036 QDF_ASSERT(0);
1037 return;
1038 }
1039
1040 /* check for successful message reception */
1041 if (pkt->Status != QDF_STATUS_SUCCESS) {
1042 if (pkt->Status != QDF_STATUS_E_CANCELED)
1043 pdev->htc_err_cnt++;
1044 qdf_nbuf_free(pktlog_t2h_msg);
1045 return;
1046 }
1047
1048 /* confirm alignment */
1049 qdf_assert((((unsigned long)qdf_nbuf_data(pktlog_t2h_msg)) & 0x3) == 0);
1050
1051 msg_word = (uint32_t *) qdf_nbuf_data(pktlog_t2h_msg);
1052 msg_len = qdf_nbuf_len(pktlog_t2h_msg);
1053 pktlog_process_fw_msg(pdev->pdev_id, msg_word, msg_len);
1054
1055 qdf_nbuf_free(pktlog_t2h_msg);
1056 }
1057
1058 /**
1059 * pktlog_tx_resume_handler() - resume callback
1060 * @context: pdev context
1061 *
1062 * Return: None
1063 */
pktlog_tx_resume_handler(void * context)1064 static void pktlog_tx_resume_handler(void *context)
1065 {
1066 qdf_print("Not expected");
1067 qdf_assert(0);
1068 }
1069
1070 /**
1071 * pktlog_h2t_send_complete() - send complete indication
1072 * @context: pdev context
1073 * @htc_pkt: HTC packet
1074 *
1075 * Return: None
1076 */
pktlog_h2t_send_complete(void * context,HTC_PACKET * htc_pkt)1077 static void pktlog_h2t_send_complete(void *context, HTC_PACKET *htc_pkt)
1078 {
1079 qdf_print("Not expected");
1080 qdf_assert(0);
1081 }
1082
1083 /**
1084 * pktlog_h2t_full() - queue full indication
1085 * @context: pdev context
1086 * @pkt: HTC packet
1087 *
1088 * Return: HTC action
1089 */
pktlog_h2t_full(void * context,HTC_PACKET * pkt)1090 static enum htc_send_full_action pktlog_h2t_full(void *context, HTC_PACKET *pkt)
1091 {
1092 return HTC_SEND_FULL_KEEP;
1093 }
1094
1095 /**
1096 * pktlog_htc_connect_service() - create new endpoint for packetlog
1097 * @pdev - pktlog pdev
1098 *
1099 * Return: 0 for success/failure
1100 */
pktlog_htc_connect_service(struct pktlog_dev_t * pdev)1101 static int pktlog_htc_connect_service(struct pktlog_dev_t *pdev)
1102 {
1103 struct htc_service_connect_req connect;
1104 struct htc_service_connect_resp response;
1105 QDF_STATUS status;
1106
1107 qdf_mem_zero(&connect, sizeof(connect));
1108 qdf_mem_zero(&response, sizeof(response));
1109
1110 connect.pMetaData = NULL;
1111 connect.MetaDataLength = 0;
1112 connect.EpCallbacks.pContext = pdev;
1113 connect.EpCallbacks.EpTxComplete = pktlog_h2t_send_complete;
1114 connect.EpCallbacks.EpTxCompleteMultiple = NULL;
1115 connect.EpCallbacks.EpRecv = pktlog_t2h_msg_handler;
1116 connect.EpCallbacks.ep_resume_tx_queue = pktlog_tx_resume_handler;
1117
1118 /* rx buffers currently are provided by HIF, not by EpRecvRefill */
1119 connect.EpCallbacks.EpRecvRefill = NULL;
1120 connect.EpCallbacks.RecvRefillWaterMark = 1;
1121 /* N/A, fill is done by HIF */
1122
1123 connect.EpCallbacks.EpSendFull = pktlog_h2t_full;
1124 /*
1125 * Specify how deep to let a queue get before htc_send_pkt will
1126 * call the EpSendFull function due to excessive send queue depth.
1127 */
1128 connect.MaxSendQueueDepth = PKTLOG_MAX_SEND_QUEUE_DEPTH;
1129
1130 /* disable flow control for HTT data message service */
1131 connect.ConnectionFlags |= HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
1132
1133 /* connect to control service */
1134 connect.service_id = PACKET_LOG_SVC;
1135
1136 status = htc_connect_service(pdev->htc_pdev, &connect, &response);
1137
1138 if (status != QDF_STATUS_SUCCESS) {
1139 pdev->mt_pktlog_enabled = false;
1140 return -EIO; /* failure */
1141 }
1142
1143 pdev->htc_endpoint = response.Endpoint;
1144 pdev->mt_pktlog_enabled = true;
1145
1146 return 0; /* success */
1147 }
1148
1149 /**
1150 * pktlog_htc_attach() - attach pktlog HTC service
1151 *
1152 * Return: 0 for success/failure
1153 */
pktlog_htc_attach(void)1154 int pktlog_htc_attach(void)
1155 {
1156 struct pktlog_dev_t *pl_pdev = get_pktlog_handle();
1157 void *htc_pdev = cds_get_context(QDF_MODULE_ID_HTC);
1158
1159 if ((!pl_pdev) || (!htc_pdev)) {
1160 qdf_print("Invalid pl_dev or htc_pdev handle");
1161 return -EINVAL;
1162 }
1163
1164 pl_pdev->htc_pdev = htc_pdev;
1165 return pktlog_htc_connect_service(pl_pdev);
1166 }
1167 #else
pktlog_htc_attach(void)1168 int pktlog_htc_attach(void)
1169 {
1170 struct pktlog_dev_t *pl_dev = get_pktlog_handle();
1171
1172 if (!pl_dev) {
1173 qdf_print("Invalid pl_dev handle");
1174 return -EINVAL;
1175 }
1176
1177 pl_dev->mt_pktlog_enabled = false;
1178 return 0;
1179 }
1180 #endif
1181 #endif /* REMOVE_PKT_LOG */
1182