1 /*
2 * Copyright (c) 2012-2021 The Linux Foundation. All rights reserved.
3 * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for
6 * any purpose with or without fee is hereby granted, provided that the
7 * above copyright notice and this permission notice appear in all
8 * copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17 * PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 /**
21 * DOC: cds_api.c
22 *
23 * Connectivity driver services APIs
24 */
25
26 #include <cds_api.h>
27 #include "sir_types.h"
28 #include "sir_api.h"
29 #include "sir_mac_prot_def.h"
30 #include "sme_api.h"
31 #include "mac_init_api.h"
32 #include "wlan_qct_sys.h"
33 #include "i_cds_packet.h"
34 #include "cds_reg_service.h"
35 #include "wma_types.h"
36 #include "wlan_hdd_main.h"
37 #include "wlan_hdd_power.h"
38 #include "wlan_hdd_tsf.h"
39 #include <linux/vmalloc.h>
40 #if (defined(__ANDROID_COMMON_KERNEL__) && \
41 (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 9, 0)) && \
42 defined(MSM_PLATFORM))
43 #include <linux/qcom-iommu-util.h>
44 #endif
45 #include <scheduler_core.h>
46
47 #include "pld_common.h"
48 #include "sap_api.h"
49 #include "bmi.h"
50 #include "ol_fw.h"
51 #include "ol_if_athvar.h"
52 #include "hif.h"
53 #include "wlan_policy_mgr_api.h"
54 #include "cds_utils.h"
55 #include "wlan_logging_sock_svc.h"
56 #include "wma.h"
57 #include "pktlog_ac.h"
58 #include "wlan_policy_mgr_api.h"
59
60 #include <cdp_txrx_cmn_reg.h>
61 #include <cdp_txrx_cfg.h>
62 #include <cdp_txrx_misc.h>
63 #include <ol_defines.h>
64 #include <dispatcher_init_deinit.h>
65 #include <cdp_txrx_handle.h>
66 #include <cdp_txrx_host_stats.h>
67 #include "target_type.h"
68 #include "wlan_ocb_ucfg_api.h"
69 #include "wlan_ipa_ucfg_api.h"
70
71 #ifdef ENABLE_SMMU_S1_TRANSLATION
72 #include "pld_common.h"
73 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0))
74 #include <asm/dma-iommu.h>
75 #endif
76 #include <linux/iommu.h>
77 #endif
78
79 #ifdef QCA_WIFI_QCA8074
80 #include <target_if_dp.h>
81 #endif
82 #include "wlan_mlme_ucfg_api.h"
83 #include "cfg_ucfg_api.h"
84 #include "wlan_cp_stats_mc_ucfg_api.h"
85 #include <qdf_hang_event_notifier.h>
86 #include <qdf_notifier.h>
87 #include <qwlan_version.h>
88 #include <qdf_trace.h>
89 #include <qdf_nbuf.h>
90 #include "wlan_dp_ucfg_api.h"
91 #include "wlan_dp_prealloc.h"
92 #include "wlan_dp_api.h"
93 #include "qdf_ipa.h"
94
95 /* Preprocessor Definitions and Constants */
96
97 /* Preprocessor Definitions and Constants */
98
99 /* Data definitions */
100 static struct cds_context g_cds_context;
101 static struct cds_context *gp_cds_context;
102 static struct __qdf_device g_qdf_ctx;
103
104 static uint8_t cds_multicast_logging;
105
106 #define DRIVER_VER_LEN (11)
107 #define HANG_EVENT_VER_LEN (1)
108
109 struct cds_hang_event_fixed_param {
110 uint16_t tlv_header;
111 uint8_t recovery_reason;
112 char driver_version[DRIVER_VER_LEN];
113 char hang_event_version[HANG_EVENT_VER_LEN];
114 } qdf_packed;
115
116 #ifdef QCA_WIFI_QCA8074
117 static inline int
cds_send_delba(struct cdp_ctrl_objmgr_psoc * psoc,uint8_t vdev_id,uint8_t * peer_macaddr,uint8_t tid,uint8_t reason_code,uint8_t cdp_reason_code)118 cds_send_delba(struct cdp_ctrl_objmgr_psoc *psoc,
119 uint8_t vdev_id, uint8_t *peer_macaddr,
120 uint8_t tid, uint8_t reason_code,
121 uint8_t cdp_reason_code)
122 {
123 return wma_dp_send_delba_ind(vdev_id, peer_macaddr, tid,
124 reason_code, cdp_reason_code);
125 }
126
127 static struct ol_if_ops dp_ol_if_ops = {
128 .peer_set_default_routing = target_if_peer_set_default_routing,
129 .peer_rx_reorder_queue_setup = target_if_peer_rx_reorder_queue_setup,
130 .peer_rx_reorder_queue_remove = target_if_peer_rx_reorder_queue_remove,
131 .peer_multi_rx_reorder_queue_setup =
132 target_if_peer_multi_rx_reorder_queue_setup,
133 .is_hw_dbs_capable = policy_mgr_is_dp_hw_dbs_capable,
134 .lro_hash_config = target_if_lro_hash_config,
135 .rx_invalid_peer = wma_rx_invalid_peer_ind,
136 .is_roam_inprogress = wma_is_roam_in_progress,
137 .get_con_mode = cds_get_conparam,
138 .send_delba = cds_send_delba,
139 .dp_rx_get_pending = dp_rx_tm_get_pending,
140 #ifdef DP_MEM_PRE_ALLOC
141 .dp_prealloc_get_context = dp_prealloc_get_context_memory,
142 .dp_prealloc_put_context = dp_prealloc_put_context_memory,
143 .dp_prealloc_get_consistent = dp_prealloc_get_coherent,
144 .dp_prealloc_put_consistent = dp_prealloc_put_coherent,
145 .dp_get_multi_pages = dp_prealloc_get_multi_pages,
146 .dp_put_multi_pages = dp_prealloc_put_multi_pages,
147 #endif
148 .dp_get_tx_inqueue = dp_get_tx_inqueue,
149 .dp_send_unit_test_cmd = wma_form_unit_test_cmd_and_send,
150 .dp_print_fisa_stats = wlan_dp_print_fisa_rx_stats,
151 /* TODO: Add any other control path calls required to OL_IF/WMA layer */
152 };
153 #else
154 static struct ol_if_ops dp_ol_if_ops = {
155 .dp_rx_get_pending = cds_get_rx_thread_pending,
156 };
157 #endif
158
159 static void cds_trigger_recovery_work(void *param);
160
161 /**
162 * struct cds_recovery_call_info - caller information for cds_trigger_recovery
163 * @func: caller's function name
164 * @line: caller's line number
165 */
166 struct cds_recovery_call_info {
167 const char *func;
168 uint32_t line;
169 } __cds_recovery_caller;
170
171 /**
172 * cds_recovery_work_init() - Initialize recovery work queue
173 *
174 * Return: none
175 */
cds_recovery_work_init(void)176 static QDF_STATUS cds_recovery_work_init(void)
177 {
178 qdf_create_work(0, &gp_cds_context->cds_recovery_work,
179 cds_trigger_recovery_work, &__cds_recovery_caller);
180 gp_cds_context->cds_recovery_wq =
181 qdf_create_workqueue("cds_recovery_workqueue");
182 if (!gp_cds_context->cds_recovery_wq) {
183 cds_err("Failed to create cds_recovery_workqueue");
184 return QDF_STATUS_E_FAILURE;
185 }
186
187 return QDF_STATUS_SUCCESS;
188 }
189
190 /**
191 * cds_recovery_work_deinit() - Initialize recovery work queue
192 *
193 * Return: none
194 */
cds_recovery_work_deinit(void)195 static void cds_recovery_work_deinit(void)
196 {
197 if (gp_cds_context->cds_recovery_wq) {
198 qdf_flush_workqueue(0, gp_cds_context->cds_recovery_wq);
199 qdf_destroy_workqueue(0, gp_cds_context->cds_recovery_wq);
200 }
201 }
202
cds_is_drv_connected(void)203 static bool cds_is_drv_connected(void)
204 {
205 int ret;
206 qdf_device_t qdf_ctx;
207
208 qdf_ctx = cds_get_context(QDF_MODULE_ID_QDF_DEVICE);
209 if (!qdf_ctx)
210 return false;
211
212 ret = pld_is_drv_connected(qdf_ctx->dev);
213
214 return ((ret > 0) ? true : false);
215 }
216
cds_is_drv_supported(void)217 static bool cds_is_drv_supported(void)
218 {
219 qdf_device_t qdf_ctx;
220 struct pld_platform_cap cap = {0};
221
222 qdf_ctx = cds_get_context(QDF_MODULE_ID_QDF_DEVICE);
223 if (!qdf_ctx)
224 return false;
225
226 pld_get_platform_cap(qdf_ctx->dev, &cap);
227
228 return ((cap.cap_flag & PLD_HAS_DRV_SUPPORT) ? true : false);
229 }
230
cds_wmi_send_recv_qmi(void * buf,uint32_t len,void * cb_ctx,qdf_wmi_recv_qmi_cb wmi_rx_cb)231 static QDF_STATUS cds_wmi_send_recv_qmi(void *buf, uint32_t len, void * cb_ctx,
232 qdf_wmi_recv_qmi_cb wmi_rx_cb)
233 {
234 qdf_device_t qdf_ctx;
235
236 qdf_ctx = cds_get_context(QDF_MODULE_ID_QDF_DEVICE);
237 if (!qdf_ctx)
238 return QDF_STATUS_E_INVAL;
239
240 if (pld_qmi_send(qdf_ctx->dev, 0, buf, len, cb_ctx, wmi_rx_cb))
241 return QDF_STATUS_E_INVAL;
242
243 return QDF_STATUS_SUCCESS;
244 }
245
cds_qmi_indication(void * cb_ctx,qdf_qmi_ind_cb qmi_ind_cb)246 static QDF_STATUS cds_qmi_indication(void *cb_ctx, qdf_qmi_ind_cb qmi_ind_cb)
247 {
248 qdf_device_t qdf_ctx;
249
250 qdf_ctx = cds_get_context(QDF_MODULE_ID_QDF_DEVICE);
251 if (!qdf_ctx)
252 return QDF_STATUS_E_INVAL;
253
254 if (pld_qmi_indication(qdf_ctx->dev, cb_ctx, qmi_ind_cb))
255 return QDF_STATUS_E_INVAL;
256
257 return QDF_STATUS_SUCCESS;
258 }
259
260 /**
261 * cds_update_recovery_reason() - update the recovery reason code
262 * @recovery_reason: recovery reason
263 *
264 * Return: None
265 */
cds_update_recovery_reason(enum qdf_hang_reason recovery_reason)266 static void cds_update_recovery_reason(enum qdf_hang_reason recovery_reason)
267 {
268 if (!gp_cds_context) {
269 cds_err("gp_cds_context is null");
270 return;
271 }
272
273 gp_cds_context->recovery_reason = recovery_reason;
274 }
275
276 /**
277 * cds_sys_reboot_lock_init() - Create lock for system reboot
278 *
279 * Return: QDF_STATUS_SUCCESS if the lock was created and an error on failure
280 */
cds_sys_reboot_lock_init(void)281 static QDF_STATUS cds_sys_reboot_lock_init(void)
282 {
283 return qdf_mutex_create(&gp_cds_context->sys_reboot_lock);
284 }
285
286 /**
287 * cds_sys_reboot_lock_deinit() - destroy lock for system reboot
288 *
289 * Return: none
290 */
cds_sys_reboot_lock_deinit(void)291 static void cds_sys_reboot_lock_deinit(void)
292 {
293 qdf_mutex_destroy(&gp_cds_context->sys_reboot_lock);
294 }
295
cds_set_sys_rebooting(void)296 void cds_set_sys_rebooting(void)
297 {
298 qdf_mutex_acquire(&gp_cds_context->sys_reboot_lock);
299 cds_set_driver_state(CDS_DRIVER_STATE_SYS_REBOOTING);
300 qdf_mutex_release(&gp_cds_context->sys_reboot_lock);
301 }
302
cds_sys_reboot_protect(void)303 bool cds_sys_reboot_protect(void)
304 {
305 enum cds_driver_state state;
306
307 qdf_mutex_acquire(&gp_cds_context->sys_reboot_lock);
308
309 state = cds_get_driver_state();
310 return __CDS_IS_DRIVER_STATE(state, CDS_DRIVER_STATE_SYS_REBOOTING);
311 }
312
cds_sys_reboot_unprotect(void)313 void cds_sys_reboot_unprotect(void)
314 {
315 qdf_mutex_release(&gp_cds_context->sys_reboot_lock);
316 }
317
cds_init(void)318 QDF_STATUS cds_init(void)
319 {
320 QDF_STATUS status;
321
322 gp_cds_context = &g_cds_context;
323
324 status = cds_sys_reboot_lock_init();
325 if (QDF_IS_STATUS_ERROR(status)) {
326 cds_err("Failed to init sys reboot lock; status:%u", status);
327 goto deinit;
328 }
329
330 status = cds_recovery_work_init();
331 if (QDF_IS_STATUS_ERROR(status)) {
332 cds_err("Failed to init recovery work; status:%u", status);
333 goto destroy_lock;
334 }
335
336 cds_ssr_protect_init();
337
338 gp_cds_context->qdf_ctx = &g_qdf_ctx;
339
340 qdf_register_self_recovery_callback(cds_trigger_recovery_psoc);
341 qdf_register_fw_down_callback(cds_is_fw_down);
342 qdf_register_is_driver_unloading_callback(cds_is_driver_unloading);
343 qdf_register_is_driver_state_module_stop_callback(
344 cds_is_driver_state_module_stop);
345 qdf_register_recovering_state_query_callback(cds_is_driver_recovering);
346 qdf_register_drv_connected_callback(cds_is_drv_connected);
347 qdf_register_drv_supported_callback(cds_is_drv_supported);
348 qdf_register_wmi_send_recv_qmi_callback(cds_wmi_send_recv_qmi);
349 qdf_register_qmi_indication_callback(cds_qmi_indication);
350 qdf_register_recovery_reason_update(cds_update_recovery_reason);
351 qdf_register_get_bus_reg_dump(pld_get_bus_reg_dump);
352
353 return QDF_STATUS_SUCCESS;
354
355 destroy_lock:
356 cds_sys_reboot_lock_deinit();
357 deinit:
358 gp_cds_context = NULL;
359 qdf_mem_zero(&g_cds_context, sizeof(g_cds_context));
360
361 return status;
362 }
363
364 /**
365 * cds_deinit() - Deinitialize CDS
366 *
367 * This function frees the CDS resources
368 */
cds_deinit(void)369 void cds_deinit(void)
370 {
371 QDF_BUG(gp_cds_context);
372 if (!gp_cds_context)
373 return;
374
375 qdf_register_get_bus_reg_dump(NULL);
376 qdf_register_recovery_reason_update(NULL);
377 qdf_register_recovering_state_query_callback(NULL);
378 qdf_register_fw_down_callback(NULL);
379 qdf_register_is_driver_unloading_callback(NULL);
380 qdf_register_is_driver_state_module_stop_callback(NULL);
381 qdf_register_self_recovery_callback(NULL);
382 qdf_register_wmi_send_recv_qmi_callback(NULL);
383 qdf_register_qmi_indication_callback(NULL);
384
385 gp_cds_context->qdf_ctx = NULL;
386 qdf_mem_zero(&g_qdf_ctx, sizeof(g_qdf_ctx));
387
388 /* currently, no ssr_protect_deinit */
389
390 cds_recovery_work_deinit();
391 cds_sys_reboot_lock_deinit();
392
393 gp_cds_context = NULL;
394 qdf_mem_zero(&g_cds_context, sizeof(g_cds_context));
395 }
396
397 #ifdef FEATURE_WLAN_DIAG_SUPPORT
398 /**
399 * cds_tdls_tx_rx_mgmt_event()- send tdls mgmt rx tx event
400 * @event_id: event id
401 * @tx_rx: tx or rx
402 * @type: type of frame
403 * @action_sub_type: action frame type
404 * @peer_mac: peer mac
405 *
406 * This Function sends tdls mgmt rx tx diag event
407 *
408 * Return: void.
409 */
cds_tdls_tx_rx_mgmt_event(uint8_t event_id,uint8_t tx_rx,uint8_t type,uint8_t action_sub_type,uint8_t * peer_mac)410 void cds_tdls_tx_rx_mgmt_event(uint8_t event_id, uint8_t tx_rx,
411 uint8_t type, uint8_t action_sub_type, uint8_t *peer_mac)
412 {
413 WLAN_HOST_DIAG_EVENT_DEF(tdls_tx_rx_mgmt,
414 struct host_event_tdls_tx_rx_mgmt);
415
416 tdls_tx_rx_mgmt.event_id = event_id;
417 tdls_tx_rx_mgmt.tx_rx = tx_rx;
418 tdls_tx_rx_mgmt.type = type;
419 tdls_tx_rx_mgmt.action_sub_type = action_sub_type;
420 qdf_mem_copy(tdls_tx_rx_mgmt.peer_mac,
421 peer_mac, CDS_MAC_ADDRESS_LEN);
422 WLAN_HOST_DIAG_EVENT_REPORT(&tdls_tx_rx_mgmt,
423 EVENT_WLAN_TDLS_TX_RX_MGMT);
424 }
425 #endif
426
427 /**
428 * cds_cfg_update_ac_specs_params() - update ac_specs params
429 * @olcfg: cfg handle
430 * @cds_cfg: pointer to cds config
431 *
432 * Return: none
433 */
434 static void
cds_cfg_update_ac_specs_params(struct txrx_pdev_cfg_param_t * olcfg,struct cds_config_info * cds_cfg)435 cds_cfg_update_ac_specs_params(struct txrx_pdev_cfg_param_t *olcfg,
436 struct cds_config_info *cds_cfg)
437 {
438 int i;
439
440 if (!olcfg)
441 return;
442
443 if (!cds_cfg)
444 return;
445
446 for (i = 0; i < QCA_WLAN_AC_ALL; i++) {
447 olcfg->ac_specs[i].wrr_skip_weight =
448 cds_cfg->ac_specs[i].wrr_skip_weight;
449 olcfg->ac_specs[i].credit_threshold =
450 cds_cfg->ac_specs[i].credit_threshold;
451 olcfg->ac_specs[i].send_limit =
452 cds_cfg->ac_specs[i].send_limit;
453 olcfg->ac_specs[i].credit_reserve =
454 cds_cfg->ac_specs[i].credit_reserve;
455 olcfg->ac_specs[i].discard_weight =
456 cds_cfg->ac_specs[i].discard_weight;
457 }
458 }
459
460 #if defined(QCA_LL_TX_FLOW_CONTROL_V2) || defined(QCA_LL_PDEV_TX_FLOW_CONTROL)
461 static inline void
cds_cdp_set_flow_control_params(struct wlan_objmgr_psoc * psoc,struct txrx_pdev_cfg_param_t * cdp_cfg)462 cds_cdp_set_flow_control_params(struct wlan_objmgr_psoc *psoc,
463 struct txrx_pdev_cfg_param_t *cdp_cfg)
464 {
465 cdp_cfg->tx_flow_stop_queue_th =
466 cfg_get(psoc, CFG_DP_TX_FLOW_STOP_QUEUE_TH);
467 cdp_cfg->tx_flow_start_queue_offset =
468 cfg_get(psoc, CFG_DP_TX_FLOW_START_QUEUE_OFFSET);
469 }
470 #else
471 static inline void
cds_cdp_set_flow_control_params(struct wlan_objmgr_psoc * psoc,struct txrx_pdev_cfg_param_t * cdp_cfg)472 cds_cdp_set_flow_control_params(struct wlan_objmgr_psoc *psoc,
473 struct txrx_pdev_cfg_param_t *cdp_cfg)
474 {}
475 #endif
476
477 #ifdef QCA_SUPPORT_TXRX_DRIVER_TCP_DEL_ACK
478 static inline void
cds_cdp_update_del_ack_params(struct wlan_objmgr_psoc * psoc,struct txrx_pdev_cfg_param_t * cdp_cfg)479 cds_cdp_update_del_ack_params(struct wlan_objmgr_psoc *psoc,
480 struct txrx_pdev_cfg_param_t *cdp_cfg)
481 {
482 cdp_cfg->del_ack_enable =
483 cfg_get(psoc, CFG_DP_DRIVER_TCP_DELACK_ENABLE);
484 cdp_cfg->del_ack_pkt_count =
485 cfg_get(psoc, CFG_DP_DRIVER_TCP_DELACK_PKT_CNT);
486 cdp_cfg->del_ack_timer_value =
487 cfg_get(psoc, CFG_DP_DRIVER_TCP_DELACK_TIMER_VALUE);
488 }
489 #else
490 static inline void
cds_cdp_update_del_ack_params(struct wlan_objmgr_psoc * psoc,struct txrx_pdev_cfg_param_t * cdp_cfg)491 cds_cdp_update_del_ack_params(struct wlan_objmgr_psoc *psoc,
492 struct txrx_pdev_cfg_param_t *cdp_cfg)
493 {}
494 #endif
495
496 #ifdef WLAN_SUPPORT_TXRX_HL_BUNDLE
497 static inline void
cds_cdp_update_bundle_params(struct wlan_objmgr_psoc * psoc,struct txrx_pdev_cfg_param_t * cdp_cfg)498 cds_cdp_update_bundle_params(struct wlan_objmgr_psoc *psoc,
499 struct txrx_pdev_cfg_param_t *cdp_cfg)
500 {
501 cdp_cfg->bundle_timer_value =
502 cfg_get(psoc, CFG_DP_HL_BUNDLE_TIMER_VALUE);
503 cdp_cfg->bundle_size =
504 cfg_get(psoc, CFG_DP_HL_BUNDLE_SIZE);
505 }
506 #else
507 static inline void
cds_cdp_update_bundle_params(struct wlan_objmgr_psoc * psoc,struct txrx_pdev_cfg_param_t * cdp_cfg)508 cds_cdp_update_bundle_params(struct wlan_objmgr_psoc *psoc,
509 struct txrx_pdev_cfg_param_t *cdp_cfg)
510 {
511 }
512 #endif
513
514 /**
515 * cds_cdp_cfg_attach() - attach data path config module
516 * @psoc: psoc handle
517 *
518 * Return: none
519 */
cds_cdp_cfg_attach(struct wlan_objmgr_psoc * psoc)520 static void cds_cdp_cfg_attach(struct wlan_objmgr_psoc *psoc)
521 {
522 struct txrx_pdev_cfg_param_t cdp_cfg = {0};
523 void *soc = cds_get_context(QDF_MODULE_ID_SOC);
524 uint32_t gro_bit_set;
525
526 cdp_cfg.is_full_reorder_offload = DP_REORDER_OFFLOAD_SUPPORT;
527 cdp_cfg.is_uc_offload_enabled = ucfg_ipa_uc_is_enabled();
528 cdp_cfg.uc_tx_buffer_count = cfg_get(psoc, CFG_DP_IPA_UC_TX_BUF_COUNT);
529 cdp_cfg.uc_tx_buffer_size =
530 cfg_get(psoc, CFG_DP_IPA_UC_TX_BUF_SIZE);
531 cdp_cfg.uc_rx_indication_ring_count =
532 cfg_get(psoc, CFG_DP_IPA_UC_RX_IND_RING_COUNT);
533 cdp_cfg.uc_tx_partition_base =
534 cfg_get(psoc, CFG_DP_IPA_UC_TX_PARTITION_BASE);
535 cdp_cfg.enable_rxthread = ucfg_dp_is_rx_common_thread_enabled(psoc);
536 cdp_cfg.ip_tcp_udp_checksum_offload =
537 cfg_get(psoc, CFG_DP_TCP_UDP_CKSUM_OFFLOAD);
538 cdp_cfg.nan_ip_tcp_udp_checksum_offload =
539 cfg_get(psoc, CFG_DP_NAN_TCP_UDP_CKSUM_OFFLOAD);
540 cdp_cfg.p2p_ip_tcp_udp_checksum_offload =
541 cfg_get(psoc, CFG_DP_P2P_TCP_UDP_CKSUM_OFFLOAD);
542 cdp_cfg.legacy_mode_csum_disable =
543 cfg_get(psoc, CFG_DP_LEGACY_MODE_CSUM_DISABLE);
544 cdp_cfg.ce_classify_enabled =
545 cfg_get(psoc, CFG_DP_CE_CLASSIFY_ENABLE);
546 cdp_cfg.tso_enable = cfg_get(psoc, CFG_DP_TSO);
547 cdp_cfg.lro_enable = cfg_get(psoc, CFG_DP_LRO);
548 cdp_cfg.sg_enable = cfg_get(psoc, CFG_DP_SG);
549 cdp_cfg.enable_data_stall_detection =
550 cfg_get(psoc, CFG_DP_ENABLE_DATA_STALL_DETECTION);
551 gro_bit_set = cfg_get(psoc, CFG_DP_GRO);
552 if (gro_bit_set & DP_GRO_ENABLE_BIT_SET)
553 cdp_cfg.gro_enable = true;
554 cdp_cfg.enable_flow_steering =
555 cfg_get(psoc, CFG_DP_FLOW_STEERING_ENABLED);
556 cdp_cfg.disable_intra_bss_fwd =
557 cfg_get(psoc, CFG_DP_AP_STA_SECURITY_SEPERATION);
558 cdp_cfg.pktlog_buffer_size =
559 cfg_get(psoc, CFG_DP_PKTLOG_BUFFER_SIZE);
560
561 cds_cdp_update_del_ack_params(psoc, &cdp_cfg);
562
563 cds_cdp_update_bundle_params(psoc, &cdp_cfg);
564
565 gp_cds_context->cfg_ctx = cdp_cfg_attach(soc, gp_cds_context->qdf_ctx,
566 (void *)(&cdp_cfg));
567 if (!gp_cds_context->cfg_ctx) {
568 cds_debug("failed to init cfg handle");
569 return;
570 }
571
572 /* Configure Receive flow steering */
573 cdp_cfg_set_flow_steering(soc, gp_cds_context->cfg_ctx,
574 cfg_get(psoc, CFG_DP_FLOW_STEERING_ENABLED));
575
576 cds_cdp_set_flow_control_params(psoc, &cdp_cfg);
577 cdp_cfg_set_flow_control_parameters(soc, gp_cds_context->cfg_ctx,
578 (void *)&cdp_cfg);
579
580 /* adjust the cfg_ctx default value based on setting */
581 cdp_cfg_set_rx_fwd_disabled(soc, gp_cds_context->cfg_ctx,
582 cfg_get(psoc,
583 CFG_DP_AP_STA_SECURITY_SEPERATION));
584
585 /*
586 * adjust the packet log enable default value
587 * based on CFG INI setting
588 */
589 cdp_cfg_set_packet_log_enabled(soc, gp_cds_context->cfg_ctx,
590 (uint8_t)cds_is_packet_log_enabled());
591
592 /* adjust the ptp rx option default value based on CFG INI setting */
593 cdp_cfg_set_ptp_rx_opt_enabled(soc, gp_cds_context->cfg_ctx,
594 (uint8_t)cds_is_ptp_rx_opt_enabled());
595 }
cds_register_all_modules(void)596 static QDF_STATUS cds_register_all_modules(void)
597 {
598 QDF_STATUS status;
599
600 scheduler_register_wma_legacy_handler(&wma_mc_process_handler);
601 scheduler_register_sys_legacy_handler(&sys_mc_process_handler);
602
603 /* Register message queues in given order such that queue priority is
604 * intact:
605 * 1) QDF_MODULE_ID_SYS: Timer queue(legacy SYS queue)
606 * 2) QDF_MODULE_ID_TARGET_IF: Target interface queue
607 * 3) QDF_MODULE_ID_PE: Legacy PE message queue
608 * 4) QDF_MODULE_ID_SME: Legacy SME message queue
609 * 5) QDF_MODULE_ID_OS_IF: OS IF message queue for new components
610 */
611 status = scheduler_register_module(QDF_MODULE_ID_SYS,
612 &scheduler_timer_q_mq_handler);
613 status = scheduler_register_module(QDF_MODULE_ID_TARGET_IF,
614 &scheduler_target_if_mq_handler);
615 status = scheduler_register_module(QDF_MODULE_ID_PE,
616 &pe_mc_process_handler);
617 status = scheduler_register_module(QDF_MODULE_ID_SME,
618 &sme_mc_process_handler);
619 status = scheduler_register_module(QDF_MODULE_ID_OS_IF,
620 &scheduler_os_if_mq_handler);
621 status = scheduler_register_module(QDF_MODULE_ID_SCAN,
622 &scheduler_scan_mq_handler);
623 return status;
624 }
625
cds_deregister_all_modules(void)626 static QDF_STATUS cds_deregister_all_modules(void)
627 {
628 QDF_STATUS status;
629
630 scheduler_deregister_wma_legacy_handler();
631 scheduler_deregister_sys_legacy_handler();
632 status = scheduler_deregister_module(QDF_MODULE_ID_SCAN);
633 status = scheduler_deregister_module(QDF_MODULE_ID_SYS);
634 status = scheduler_deregister_module(QDF_MODULE_ID_TARGET_IF);
635 status = scheduler_deregister_module(QDF_MODULE_ID_PE);
636 status = scheduler_deregister_module(QDF_MODULE_ID_SME);
637 status = scheduler_deregister_module(QDF_MODULE_ID_OS_IF);
638
639 return status;
640 }
641
642 /**
643 * cds_set_ac_specs_params() - set ac_specs params in cds_config_info
644 * @cds_cfg: Pointer to cds_config_info
645 *
646 * Return: none
647 */
648 static void
cds_set_ac_specs_params(struct cds_config_info * cds_cfg)649 cds_set_ac_specs_params(struct cds_config_info *cds_cfg)
650 {
651 int i;
652 struct cds_context *cds_ctx;
653
654 if (!cds_cfg)
655 return;
656
657 cds_ctx = cds_get_context(QDF_MODULE_ID_QDF);
658 if (!cds_ctx)
659 return;
660
661 for (i = 0; i < QCA_WLAN_AC_ALL; i++) {
662 cds_cfg->ac_specs[i] = cds_ctx->ac_specs[i];
663 }
664 }
665
cds_hang_event_notifier_call(struct notifier_block * block,unsigned long state,void * data)666 static int cds_hang_event_notifier_call(struct notifier_block *block,
667 unsigned long state,
668 void *data)
669 {
670 struct qdf_notifer_data *cds_hang_data = data;
671 uint32_t total_len;
672 struct cds_hang_event_fixed_param *cmd;
673 uint8_t *cds_hang_evt_buff;
674
675 if (!cds_hang_data)
676 return NOTIFY_STOP_MASK;
677
678 cds_hang_evt_buff = cds_hang_data->hang_data;
679
680 if (!cds_hang_evt_buff)
681 return NOTIFY_STOP_MASK;
682
683 total_len = sizeof(*cmd);
684 if (cds_hang_data->offset + total_len > QDF_WLAN_HANG_FW_OFFSET)
685 return NOTIFY_STOP_MASK;
686
687 cds_hang_evt_buff = cds_hang_data->hang_data + cds_hang_data->offset;
688 cmd = (struct cds_hang_event_fixed_param *)cds_hang_evt_buff;
689 QDF_HANG_EVT_SET_HDR(&cmd->tlv_header, HANG_EVT_TAG_CDS,
690 QDF_HANG_GET_STRUCT_TLVLEN(*cmd));
691
692 cmd->recovery_reason = gp_cds_context->recovery_reason;
693
694 /* userspace expects a fixed format */
695 qdf_mem_set(&cmd->driver_version, DRIVER_VER_LEN, ' ');
696 qdf_mem_copy(&cmd->driver_version, QWLAN_VERSIONSTR,
697 qdf_min(sizeof(QWLAN_VERSIONSTR) - 1,
698 (size_t)DRIVER_VER_LEN));
699
700 /* userspace expects a fixed format */
701 qdf_mem_set(&cmd->hang_event_version, HANG_EVENT_VER_LEN, ' ');
702 qdf_mem_copy(&cmd->hang_event_version, QDF_HANG_EVENT_VERSION,
703 qdf_min(sizeof(QDF_HANG_EVENT_VERSION) - 1,
704 (size_t)HANG_EVENT_VER_LEN));
705
706 cds_hang_data->offset += total_len;
707 return NOTIFY_OK;
708 }
709
710 static qdf_notif_block cds_hang_event_notifier = {
711 .notif_block.notifier_call = cds_hang_event_notifier_call,
712 };
713
714 /**
715 * cds_set_exclude_selftx_from_cca_busy_time() - Set exclude self tx time
716 * from cca busy time bool in cds config
717 * @exclude_selftx_from_cca_busy: Bool to be stored in cds config
718 * @cds_cfg: Pointer to cds config
719 *
720 * Return: None
721 */
722 static void
cds_set_exclude_selftx_from_cca_busy_time(bool exclude_selftx_from_cca_busy,struct cds_config_info * cds_cfg)723 cds_set_exclude_selftx_from_cca_busy_time(bool exclude_selftx_from_cca_busy,
724 struct cds_config_info *cds_cfg)
725 {
726 cds_cfg->exclude_selftx_from_cca_busy = exclude_selftx_from_cca_busy;
727 }
728
729 /**
730 * cds_open() - open the CDS Module
731 *
732 * cds_open() function opens the CDS Scheduler
733 * Upon successful initialization:
734 * - All CDS submodules should have been initialized
735 *
736 * - The CDS scheduler should have opened
737 *
738 * - All the WLAN SW components should have been opened. This includes
739 * SYS, MAC, SME, WMA and TL.
740 * @psoc: psoc handle
741 *
742 * Return: QDF status
743 */
cds_open(struct wlan_objmgr_psoc * psoc)744 QDF_STATUS cds_open(struct wlan_objmgr_psoc *psoc)
745 {
746 QDF_STATUS status;
747 struct cds_config_info *cds_cfg;
748 qdf_device_t qdf_ctx;
749 struct htc_init_info htcInfo = { 0 };
750 struct dp_txrx_soc_attach_params soc_attach_params = {0};
751 struct ol_context *ol_ctx;
752 struct hif_opaque_softc *scn;
753 void *HTCHandle;
754 struct hdd_context *hdd_ctx;
755 struct cds_context *cds_ctx;
756 mac_handle_t mac_handle;
757
758 cds_debug("Opening CDS");
759
760 cds_ctx = cds_get_context(QDF_MODULE_ID_QDF);
761 if (!cds_ctx)
762 return QDF_STATUS_E_FAILURE;
763
764 /* Initialize the timer module */
765 qdf_timer_module_init();
766
767 /* Initialize bug reporting structure */
768 cds_init_log_completion();
769
770 hdd_ctx = gp_cds_context->hdd_context;
771 if (!hdd_ctx || !hdd_ctx->config) {
772 cds_err("Hdd Context is Null");
773
774 status = QDF_STATUS_E_FAILURE;
775 return status;
776 }
777
778 status = dispatcher_enable();
779 if (QDF_IS_STATUS_ERROR(status)) {
780 cds_err("Failed to enable dispatcher; status:%d", status);
781 return status;
782 }
783
784 /* Now Open the CDS Scheduler */
785 status = cds_sched_open(gp_cds_context,
786 &gp_cds_context->qdf_sched,
787 sizeof(cds_sched_context));
788 if (QDF_IS_STATUS_ERROR(status)) {
789 cds_alert("Failed to open CDS Scheduler");
790 goto err_dispatcher_disable;
791 }
792
793 scn = cds_get_context(QDF_MODULE_ID_HIF);
794 if (!scn) {
795 status = QDF_STATUS_E_FAILURE;
796 goto err_sched_close;
797 }
798
799 cds_cfg = cds_get_ini_config();
800 if (!cds_cfg) {
801 cds_err("Cds config is NULL");
802
803 status = QDF_STATUS_E_FAILURE;
804 goto err_sched_close;
805 }
806
807 hdd_enable_fastpath(hdd_ctx, scn);
808
809 /* Initialize BMI and Download firmware */
810 ol_ctx = cds_get_context(QDF_MODULE_ID_BMI);
811 status = bmi_download_firmware(ol_ctx);
812 if (QDF_IS_STATUS_ERROR(status)) {
813 cds_alert("BMI FIALED status:%d", status);
814 goto err_bmi_close;
815 }
816
817 hdd_wlan_update_target_info(hdd_ctx, scn);
818
819 htcInfo.pContext = ol_ctx;
820 htcInfo.TargetFailure = ol_target_failure;
821 htcInfo.TargetSendSuspendComplete =
822 ucfg_pmo_psoc_target_suspend_acknowledge;
823 htcInfo.target_initial_wakeup_cb = ucfg_pmo_psoc_handle_initial_wake_up;
824 htcInfo.target_psoc = (void *)psoc;
825 htcInfo.cfg_wmi_credit_cnt = hdd_ctx->config->cfg_wmi_credit_cnt;
826 qdf_ctx = cds_get_context(QDF_MODULE_ID_QDF_DEVICE);
827
828 /* Create HTC */
829 gp_cds_context->htc_ctx =
830 htc_create(scn, &htcInfo, qdf_ctx, cds_get_conparam());
831 if (!gp_cds_context->htc_ctx) {
832 cds_alert("Failed to Create HTC");
833
834 status = QDF_STATUS_E_FAILURE;
835 goto err_bmi_close;
836 }
837 ucfg_pmo_psoc_update_htc_handle(psoc, (void *)gp_cds_context->htc_ctx);
838
839 status = bmi_done(ol_ctx);
840 if (QDF_IS_STATUS_ERROR(status)) {
841 cds_alert("Failed to complete BMI phase");
842 goto err_htc_close;
843 }
844
845 cds_set_exclude_selftx_from_cca_busy_time(
846 hdd_ctx->config->exclude_selftx_from_cca_busy,
847 cds_cfg);
848 /*Open the WMA module */
849 status = wma_open(psoc, hdd_update_tgt_cfg, cds_cfg,
850 hdd_ctx->target_type);
851 if (QDF_IS_STATUS_ERROR(status)) {
852 cds_alert("Failed to open WMA module");
853 goto err_htc_close;
854 }
855
856 /* Number of peers limit differs in each chip version. If peer max
857 * limit configured in ini exceeds more than supported, WMA adjusts
858 * and keeps correct limit in cds_cfg.max_station. So, make sure
859 * config entry hdd_ctx->config->maxNumberOfPeers has adjusted value
860 */
861 /* In FTM mode cds_cfg->max_stations will be zero. On updating same
862 * into hdd context config entry, leads to pe_open() to fail, if
863 * con_mode change happens from FTM mode to any other mode.
864 */
865 if (QDF_DRIVER_TYPE_PRODUCTION == cds_cfg->driver_type)
866 ucfg_mlme_set_sap_max_peers(psoc, cds_cfg->max_station);
867
868 HTCHandle = cds_get_context(QDF_MODULE_ID_HTC);
869 gp_cds_context->cfg_ctx = NULL;
870 if (!HTCHandle) {
871 status = QDF_STATUS_E_FAILURE;
872 goto err_wma_close;
873 }
874
875 status = htc_wait_target(HTCHandle);
876 if (QDF_IS_STATUS_ERROR(status)) {
877 cds_alert("Failed to complete BMI phase. status: %d", status);
878 QDF_BUG(status == QDF_STATUS_E_NOMEM || cds_is_fw_down());
879
880 goto err_wma_close;
881 }
882
883 cds_debug("target_type %d 8074:%d 6290:%d 6390: %d 6490: %d 6750: %d",
884 hdd_ctx->target_type,
885 TARGET_TYPE_QCA8074,
886 TARGET_TYPE_QCA6290,
887 TARGET_TYPE_QCA6390,
888 TARGET_TYPE_QCA6490,
889 TARGET_TYPE_QCA6750);
890
891 /* Set default value to false */
892 hdd_ctx->is_wifi3_0_target = false;
893
894 soc_attach_params.target_type = hdd_ctx->target_type;
895 soc_attach_params.target_psoc = htcInfo.target_psoc;
896 soc_attach_params.dp_ol_if_ops = &dp_ol_if_ops;
897 gp_cds_context->dp_soc =
898 ucfg_dp_txrx_soc_attach(&soc_attach_params,
899 &hdd_ctx->is_wifi3_0_target);
900 if (!gp_cds_context->dp_soc) {
901 status = QDF_STATUS_E_FAILURE;
902 goto err_wma_close;
903 }
904
905 wlan_psoc_set_dp_handle(psoc, gp_cds_context->dp_soc);
906 ucfg_dp_set_cmn_dp_handle(psoc, gp_cds_context->dp_soc);
907 ucfg_pmo_psoc_update_dp_handle(psoc, gp_cds_context->dp_soc);
908 ucfg_ocb_update_dp_handle(psoc, gp_cds_context->dp_soc);
909
910 cds_set_ac_specs_params(cds_cfg);
911 cds_cfg_update_ac_specs_params((struct txrx_pdev_cfg_param_t *)
912 gp_cds_context->cfg_ctx, cds_cfg);
913 cds_cdp_cfg_attach(psoc);
914
915 bmi_target_ready(scn, gp_cds_context->cfg_ctx);
916
917 /* Now proceed to open the MAC */
918 status = mac_open(psoc, &mac_handle,
919 gp_cds_context->hdd_context, cds_cfg);
920
921 if (QDF_STATUS_SUCCESS != status) {
922 cds_alert("Failed to open MAC");
923 goto err_soc_detach;
924 }
925 gp_cds_context->mac_context = mac_handle;
926
927 /* Now proceed to open the SME */
928 status = sme_open(mac_handle);
929 if (QDF_IS_STATUS_ERROR(status)) {
930 cds_alert("Failed to open SME");
931 goto err_mac_close;
932 }
933
934 cds_register_all_modules();
935
936 status = dispatcher_psoc_open(psoc);
937 if (QDF_IS_STATUS_ERROR(status)) {
938 cds_alert("Failed to open PSOC Components");
939 goto deregister_modules;
940 }
941
942 ucfg_mc_cp_stats_register_pmo_handler();
943 qdf_hang_event_register_notifier(&cds_hang_event_notifier);
944
945 return QDF_STATUS_SUCCESS;
946
947 deregister_modules:
948 cds_deregister_all_modules();
949 sme_close(mac_handle);
950
951 err_mac_close:
952 mac_close(mac_handle);
953 gp_cds_context->mac_context = NULL;
954
955 err_soc_detach:
956 ucfg_dp_txrx_soc_detach(gp_cds_context->dp_soc);
957 gp_cds_context->dp_soc = NULL;
958
959 ucfg_ocb_update_dp_handle(psoc, NULL);
960 ucfg_pmo_psoc_update_dp_handle(psoc, NULL);
961 wlan_psoc_set_dp_handle(psoc, NULL);
962
963 err_wma_close:
964 cds_shutdown_notifier_purge();
965 wma_close();
966 wma_wmi_service_close();
967
968 err_htc_close:
969 if (gp_cds_context->htc_ctx) {
970 htc_destroy(gp_cds_context->htc_ctx);
971 gp_cds_context->htc_ctx = NULL;
972 ucfg_pmo_psoc_update_htc_handle(psoc, NULL);
973 }
974
975 err_bmi_close:
976 bmi_cleanup(ol_ctx);
977
978 err_sched_close:
979 if (QDF_IS_STATUS_ERROR(cds_sched_close()))
980 QDF_DEBUG_PANIC("Failed to close CDS Scheduler");
981
982 err_dispatcher_disable:
983 if (QDF_IS_STATUS_ERROR(dispatcher_disable()))
984 QDF_DEBUG_PANIC("Failed to disable dispatcher");
985
986 return status;
987 } /* cds_open() */
988
cds_dp_open(struct wlan_objmgr_psoc * psoc)989 QDF_STATUS cds_dp_open(struct wlan_objmgr_psoc *psoc)
990 {
991 QDF_STATUS qdf_status;
992 struct dp_txrx_config dp_config;
993 struct hdd_context *hdd_ctx;
994
995 hdd_ctx = gp_cds_context->hdd_context;
996 if (!hdd_ctx) {
997 cds_err("HDD context is null");
998 return QDF_STATUS_E_FAILURE;
999 }
1000
1001 qdf_status =
1002 ucfg_dp_txrx_pdev_attach(cds_get_context(QDF_MODULE_ID_SOC));
1003 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
1004 /* Critical Error ... Cannot proceed further */
1005 cds_alert("Failed to open TXRX");
1006 QDF_ASSERT(0);
1007 goto close;
1008 }
1009
1010 if (hdd_ctx->target_type == TARGET_TYPE_QCA6290 ||
1011 hdd_ctx->target_type == TARGET_TYPE_QCA6390 ||
1012 hdd_ctx->target_type == TARGET_TYPE_QCA6490 ||
1013 hdd_ctx->target_type == TARGET_TYPE_QCA6750 ||
1014 hdd_ctx->target_type == TARGET_TYPE_KIWI ||
1015 hdd_ctx->target_type == TARGET_TYPE_MANGO ||
1016 hdd_ctx->target_type == TARGET_TYPE_PEACH ||
1017 hdd_ctx->target_type == TARGET_TYPE_WCN6450) {
1018 qdf_status = cdp_pdev_init(cds_get_context(QDF_MODULE_ID_SOC),
1019 gp_cds_context->htc_ctx,
1020 gp_cds_context->qdf_ctx, 0);
1021 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
1022 /* Critical Error ... Cannot proceed further */
1023 cds_alert("Failed to init TXRX");
1024 QDF_ASSERT(0);
1025 goto pdev_detach;
1026 }
1027 }
1028
1029 if (cdp_txrx_intr_attach(gp_cds_context->dp_soc)
1030 != QDF_STATUS_SUCCESS) {
1031 cds_alert("Failed to attach interrupts");
1032 goto pdev_deinit;
1033 }
1034
1035 dp_config.enable_rx_threads =
1036 (cds_get_conparam() == QDF_GLOBAL_MONITOR_MODE) ?
1037 false : gp_cds_context->cds_cfg->enable_dp_rx_threads;
1038
1039 qdf_status = ucfg_dp_txrx_init(cds_get_context(QDF_MODULE_ID_SOC),
1040 OL_TXRX_PDEV_ID,
1041 &dp_config);
1042
1043 if (!QDF_IS_STATUS_SUCCESS(qdf_status))
1044 goto intr_close;
1045
1046 ucfg_pmo_psoc_set_txrx_pdev_id(psoc, OL_TXRX_PDEV_ID);
1047 ucfg_ocb_set_txrx_pdev_id(psoc, OL_TXRX_PDEV_ID);
1048
1049 cdp_set_rtpm_tput_policy_requirement(cds_get_context(QDF_MODULE_ID_SOC),
1050 false);
1051
1052 cds_debug("CDS successfully Opened");
1053
1054 if (cdp_cfg_get(gp_cds_context->dp_soc, cfg_dp_tc_based_dyn_gro_enable))
1055 ucfg_dp_set_tc_based_dyn_gro(psoc, true);
1056 else
1057 ucfg_dp_set_tc_based_dyn_gro(psoc, false);
1058
1059 ucfg_dp_set_tc_ingress_prio(psoc, cdp_cfg_get(gp_cds_context->dp_soc,
1060 cfg_dp_tc_ingress_prio));
1061
1062 return 0;
1063
1064 intr_close:
1065 cdp_txrx_intr_detach(gp_cds_context->dp_soc);
1066
1067 pdev_deinit:
1068 cdp_pdev_deinit(gp_cds_context->dp_soc,
1069 OL_TXRX_PDEV_ID, false);
1070
1071 pdev_detach:
1072 ucfg_dp_txrx_pdev_detach(gp_cds_context->dp_soc, OL_TXRX_PDEV_ID,
1073 false);
1074
1075 close:
1076 return QDF_STATUS_E_FAILURE;
1077 }
1078
1079 /**
1080 * cds_should_suspend_target() - Get value whether target can suspend
1081 *
1082 * Return: true if target can suspend, otherwise false
1083 */
cds_should_suspend_target(void)1084 static bool cds_should_suspend_target(void)
1085 {
1086 struct hif_opaque_softc *hif_ctx;
1087 struct hif_target_info *tgt_info;
1088 uint32_t target_type = TARGET_TYPE_UNKNOWN;
1089
1090 /* don't suspend during SSR */
1091 if (cds_is_driver_recovering())
1092 return false;
1093
1094 /* don't suspend if the driver is in a bad state */
1095 if (cds_is_driver_in_bad_state())
1096 return false;
1097
1098 /* if we are in any mode other than FTM we should suspend */
1099 if (cds_get_conparam() != QDF_GLOBAL_FTM_MODE)
1100 return true;
1101
1102 hif_ctx = cds_get_context(QDF_MODULE_ID_HIF);
1103 if (hif_ctx) {
1104 tgt_info = hif_get_target_info_handle(hif_ctx);
1105 if (tgt_info)
1106 target_type = tgt_info->target_type;
1107 }
1108
1109 /*
1110 * for most target we also want to suspend in FTM mode,
1111 * but some targets do not support that.
1112 */
1113 if (target_type == TARGET_TYPE_AR6320 ||
1114 target_type == TARGET_TYPE_AR6320V1 ||
1115 target_type == TARGET_TYPE_AR6320V2 ||
1116 target_type == TARGET_TYPE_AR6320V3 ||
1117 target_type == TARGET_TYPE_QCN7605)
1118 return false;
1119
1120 /* target should support suspend in FTM mode */
1121 return true;
1122 }
1123
1124 #ifdef HIF_USB
cds_suspend_target(tp_wma_handle wma_handle)1125 static inline void cds_suspend_target(tp_wma_handle wma_handle)
1126 {
1127 QDF_STATUS status;
1128 /* Suspend the target and disable interrupt */
1129 status = ucfg_pmo_psoc_suspend_target(wma_handle->psoc, 0);
1130 if (status)
1131 cds_err("Failed to suspend target, status = %d", status);
1132 }
1133 #else
cds_suspend_target(tp_wma_handle wma_handle)1134 static inline void cds_suspend_target(tp_wma_handle wma_handle)
1135 {
1136 QDF_STATUS status;
1137 /* Suspend the target and disable interrupt */
1138 status = ucfg_pmo_psoc_suspend_target(wma_handle->psoc, 1);
1139 if (status)
1140 cds_err("Failed to suspend target, status = %d", status);
1141 }
1142 #endif /* HIF_USB */
1143
1144 /**
1145 * cds_pre_enable() - pre enable cds
1146 *
1147 * Return: QDF status
1148 */
cds_pre_enable(void)1149 QDF_STATUS cds_pre_enable(void)
1150 {
1151 QDF_STATUS status;
1152 int errno;
1153 void *scn;
1154 void *soc;
1155 void *hif_ctx;
1156
1157 cds_enter();
1158
1159 if (!gp_cds_context) {
1160 cds_err("cds context is null");
1161 return QDF_STATUS_E_INVAL;
1162 }
1163
1164 if (!gp_cds_context->wma_context) {
1165 cds_err("wma context is null");
1166 return QDF_STATUS_E_INVAL;
1167 }
1168
1169 scn = cds_get_context(QDF_MODULE_ID_HIF);
1170 if (!scn)
1171 return QDF_STATUS_E_INVAL;
1172
1173 soc = cds_get_context(QDF_MODULE_ID_SOC);
1174 if (!soc)
1175 return QDF_STATUS_E_INVAL;
1176
1177 /* call Packetlog connect service */
1178 if (QDF_GLOBAL_FTM_MODE != cds_get_conparam() &&
1179 QDF_GLOBAL_EPPING_MODE != cds_get_conparam())
1180 cdp_pkt_log_con_service(soc, OL_TXRX_PDEV_ID,
1181 scn);
1182
1183 /*call WMA pre start */
1184 status = wma_pre_start();
1185 if (QDF_IS_STATUS_ERROR(status)) {
1186 cds_err("Failed to WMA prestart");
1187 goto exit_pkt_log;
1188 }
1189
1190 status = htc_start(gp_cds_context->htc_ctx);
1191 if (QDF_IS_STATUS_ERROR(status)) {
1192 cds_err("Failed to Start HTC");
1193 goto exit_pkt_log;
1194 }
1195
1196 status = wma_wait_for_ready_event(gp_cds_context->wma_context);
1197 if (QDF_IS_STATUS_ERROR(status)) {
1198 cds_err("Failed to wait for ready event; status: %u", status);
1199 goto stop_wmi;
1200 }
1201
1202 errno = cdp_pdev_post_attach(soc, OL_TXRX_PDEV_ID);
1203 if (errno) {
1204 cds_err("Failed to attach pdev");
1205 status = qdf_status_from_os_return(errno);
1206 goto stop_wmi;
1207 }
1208
1209 return QDF_STATUS_SUCCESS;
1210
1211 stop_wmi:
1212 /* Send pdev suspend to fw otherwise FW is not aware that
1213 * host is freeing resources.
1214 */
1215 if (!(cds_is_driver_recovering() || cds_is_driver_in_bad_state()))
1216 cds_suspend_target(gp_cds_context->wma_context);
1217
1218 hif_ctx = cds_get_context(QDF_MODULE_ID_HIF);
1219
1220 wma_wmi_stop();
1221
1222 if (hif_ctx) {
1223 cds_err("Disable the isr & reset the soc!");
1224 hif_disable_isr(hif_ctx);
1225 hif_reset_soc(hif_ctx);
1226 }
1227 htc_stop(gp_cds_context->htc_ctx);
1228
1229 wma_wmi_work_close();
1230
1231 exit_pkt_log:
1232 if (QDF_GLOBAL_FTM_MODE != cds_get_conparam() &&
1233 QDF_GLOBAL_EPPING_MODE != cds_get_conparam())
1234 cdp_pkt_log_exit(soc, OL_TXRX_PDEV_ID);
1235
1236 return status;
1237 }
1238
cds_enable(struct wlan_objmgr_psoc * psoc)1239 QDF_STATUS cds_enable(struct wlan_objmgr_psoc *psoc)
1240 {
1241 QDF_STATUS qdf_status;
1242 struct mac_start_params mac_params;
1243
1244 /* We support only one instance for now ... */
1245 if (!gp_cds_context) {
1246 cds_err("Invalid CDS context");
1247 return QDF_STATUS_E_FAILURE;
1248 }
1249
1250 if (!gp_cds_context->wma_context) {
1251 cds_err("WMA NULL context");
1252 return QDF_STATUS_E_FAILURE;
1253 }
1254
1255 if (!gp_cds_context->mac_context) {
1256 cds_err("MAC NULL context");
1257 return QDF_STATUS_E_FAILURE;
1258 }
1259
1260 /* Start the wma */
1261 qdf_status = wma_start();
1262 if (qdf_status != QDF_STATUS_SUCCESS) {
1263 cds_err("Failed to start wma; status:%d", qdf_status);
1264 return QDF_STATUS_E_FAILURE;
1265 }
1266
1267 /* Start the MAC */
1268 qdf_mem_zero(&mac_params, sizeof(mac_params));
1269 mac_params.driver_type = QDF_DRIVER_TYPE_PRODUCTION;
1270 qdf_status = mac_start(gp_cds_context->mac_context, &mac_params);
1271
1272 if (QDF_STATUS_SUCCESS != qdf_status) {
1273 cds_err("Failed to start MAC; status:%d", qdf_status);
1274 goto err_wma_stop;
1275 }
1276
1277 /* START SME */
1278 qdf_status = sme_start(gp_cds_context->mac_context);
1279 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
1280 cds_err("Failed to start SME; status:%d", qdf_status);
1281 goto err_mac_stop;
1282 }
1283
1284 qdf_status =
1285 ucfg_dp_txrx_attach_target(cds_get_context(QDF_MODULE_ID_SOC),
1286 OL_TXRX_PDEV_ID);
1287 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
1288 cds_err("Failed to attach DP target; status:%d", qdf_status);
1289 goto err_sme_stop;
1290 }
1291
1292 qdf_status = dispatcher_psoc_enable(psoc);
1293 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
1294 cds_err("dispatcher_psoc_enable failed; status:%d", qdf_status);
1295 goto err_soc_target_detach;
1296 }
1297
1298 /* Trigger psoc enable for CLD components */
1299 hdd_component_psoc_enable(psoc);
1300
1301 return QDF_STATUS_SUCCESS;
1302
1303 err_soc_target_detach:
1304 /* NOOP */
1305
1306 err_sme_stop:
1307 sme_stop(gp_cds_context->mac_context);
1308
1309 err_mac_stop:
1310 mac_stop(gp_cds_context->mac_context);
1311
1312 err_wma_stop:
1313 qdf_status = wma_stop();
1314 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
1315 cds_err("Failed to stop wma");
1316 QDF_ASSERT(QDF_IS_STATUS_SUCCESS(qdf_status));
1317 }
1318
1319 return QDF_STATUS_E_FAILURE;
1320 } /* cds_enable() */
1321
1322 /**
1323 * cds_disable() - stop/disable cds module
1324 * @psoc: Psoc pointer
1325 *
1326 * Return: QDF status
1327 */
cds_disable(struct wlan_objmgr_psoc * psoc)1328 QDF_STATUS cds_disable(struct wlan_objmgr_psoc *psoc)
1329 {
1330 QDF_STATUS qdf_status;
1331 void *handle;
1332
1333 /* PSOC disable for all new components. It needs to happen before
1334 * target is PDEV suspended such that a component can abort all its
1335 * ongoing transaction with FW. Always keep it before wma_stop() as
1336 * wma_stop() does target PDEV suspend.
1337 */
1338
1339 /* Trigger psoc disable for CLD components */
1340 if (psoc) {
1341 hdd_component_psoc_disable(psoc);
1342 dispatcher_psoc_disable(psoc);
1343 }
1344
1345 qdf_status = wma_stop();
1346
1347 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
1348 cds_err("Failed to stop wma");
1349 QDF_ASSERT(QDF_IS_STATUS_SUCCESS(qdf_status));
1350 }
1351
1352 handle = cds_get_context(QDF_MODULE_ID_PE);
1353 if (!handle)
1354 return QDF_STATUS_E_INVAL;
1355
1356 umac_stop();
1357
1358 return qdf_status;
1359 }
1360
1361 /**
1362 * cds_post_disable() - post disable cds module
1363 *
1364 * Return: QDF status
1365 */
cds_post_disable(void)1366 QDF_STATUS cds_post_disable(void)
1367 {
1368 tp_wma_handle wma_handle;
1369 struct hif_opaque_softc *hif_ctx;
1370 struct scheduler_ctx *sched_ctx;
1371 QDF_STATUS qdf_status;
1372
1373 wma_handle = cds_get_context(QDF_MODULE_ID_WMA);
1374 if (!wma_handle)
1375 return QDF_STATUS_E_INVAL;
1376
1377 hif_ctx = cds_get_context(QDF_MODULE_ID_HIF);
1378 if (!hif_ctx)
1379 return QDF_STATUS_E_INVAL;
1380
1381 /* flush any unprocessed scheduler messages */
1382 sched_ctx = scheduler_get_context();
1383 if (sched_ctx) {
1384 qdf_status = scheduler_disable();
1385 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
1386 cds_err("Failed to disable scheduler");
1387 return QDF_STATUS_E_INVAL;
1388 }
1389 }
1390 /*
1391 * With new state machine changes cds_close can be invoked without
1392 * cds_disable. So, send the following clean up prerequisites to fw,
1393 * So Fw and host are in sync for cleanup indication:
1394 * - Send PDEV_SUSPEND indication to firmware
1395 * - Disable HIF Interrupts.
1396 * - Clean up CE tasklets.
1397 */
1398
1399 cds_debug("send deinit sequence to firmware");
1400 if (cds_should_suspend_target())
1401 cds_suspend_target(wma_handle);
1402 hif_disable_isr(hif_ctx);
1403 hif_reset_soc(hif_ctx);
1404
1405 if (gp_cds_context->htc_ctx) {
1406 wma_wmi_stop();
1407 htc_stop(gp_cds_context->htc_ctx);
1408 }
1409
1410 qdf_status = cds_close_rx_thread();
1411 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
1412 cds_err("Failed to close RX thread!");
1413 return QDF_STATUS_E_INVAL;
1414 }
1415
1416 cdp_pdev_pre_detach(cds_get_context(QDF_MODULE_ID_SOC),
1417 OL_TXRX_PDEV_ID, 1);
1418
1419 return QDF_STATUS_SUCCESS;
1420 }
1421
1422 /**
1423 * cds_close() - close cds module
1424 * @psoc: Psoc pointer
1425 *
1426 * This API allows user to close modules registered
1427 * with connectivity device services.
1428 *
1429 * Return: QDF status
1430 */
cds_close(struct wlan_objmgr_psoc * psoc)1431 QDF_STATUS cds_close(struct wlan_objmgr_psoc *psoc)
1432 {
1433 QDF_STATUS qdf_status;
1434
1435 qdf_hang_event_unregister_notifier(&cds_hang_event_notifier);
1436 qdf_status = cds_sched_close();
1437 QDF_ASSERT(QDF_IS_STATUS_SUCCESS(qdf_status));
1438 if (QDF_IS_STATUS_ERROR(qdf_status))
1439 cds_err("Failed to close CDS Scheduler");
1440
1441 dispatcher_psoc_close(psoc);
1442
1443 qdf_flush_work(&gp_cds_context->cds_recovery_work);
1444
1445 cds_shutdown_notifier_purge();
1446
1447 qdf_status = wma_wmi_work_close();
1448 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
1449 cds_err("Failed to close wma_wmi_work");
1450 QDF_ASSERT(0);
1451 }
1452
1453 if (gp_cds_context->htc_ctx) {
1454 htc_destroy(gp_cds_context->htc_ctx);
1455 ucfg_pmo_psoc_update_htc_handle(psoc, NULL);
1456 gp_cds_context->htc_ctx = NULL;
1457 }
1458
1459 qdf_status = sme_close(gp_cds_context->mac_context);
1460 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
1461 cds_err("Failed to close SME");
1462 QDF_ASSERT(QDF_IS_STATUS_SUCCESS(qdf_status));
1463 }
1464
1465 qdf_status = mac_close(gp_cds_context->mac_context);
1466 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
1467 cds_err("Failed to close MAC");
1468 QDF_ASSERT(QDF_IS_STATUS_SUCCESS(qdf_status));
1469 }
1470
1471 gp_cds_context->mac_context = NULL;
1472
1473 ucfg_dp_txrx_soc_detach(gp_cds_context->dp_soc);
1474 gp_cds_context->dp_soc = NULL;
1475
1476 ucfg_pmo_psoc_update_dp_handle(psoc, NULL);
1477 wlan_psoc_set_dp_handle(psoc, NULL);
1478
1479
1480 qdf_status = wma_close();
1481 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
1482 cds_err("Failed to close wma");
1483 QDF_ASSERT(QDF_IS_STATUS_SUCCESS(qdf_status));
1484 }
1485
1486 qdf_status = wma_wmi_service_close();
1487 if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
1488 cds_err("Failed to close wma_wmi_service");
1489 QDF_ASSERT(QDF_IS_STATUS_SUCCESS(qdf_status));
1490 }
1491
1492 cds_deinit_ini_config();
1493 qdf_timer_module_deinit();
1494
1495 cds_deregister_all_modules();
1496
1497 return QDF_STATUS_SUCCESS;
1498 }
1499
cds_dp_close(struct wlan_objmgr_psoc * psoc)1500 QDF_STATUS cds_dp_close(struct wlan_objmgr_psoc *psoc)
1501 {
1502 cdp_txrx_intr_detach(gp_cds_context->dp_soc);
1503
1504 qdf_nbuf_stop_replenish_timer();
1505
1506 ucfg_dp_txrx_deinit(cds_get_context(QDF_MODULE_ID_SOC));
1507
1508 cdp_pdev_deinit(cds_get_context(QDF_MODULE_ID_SOC), OL_TXRX_PDEV_ID, 1);
1509
1510 ucfg_dp_txrx_pdev_detach(cds_get_context(QDF_MODULE_ID_SOC),
1511 OL_TXRX_PDEV_ID, 1);
1512
1513 ucfg_pmo_psoc_set_txrx_pdev_id(psoc, OL_TXRX_INVALID_PDEV_ID);
1514
1515 return QDF_STATUS_SUCCESS;
1516 }
1517
__cds_get_context(QDF_MODULE_ID module_id,const char * func)1518 void *__cds_get_context(QDF_MODULE_ID module_id, const char *func)
1519 {
1520 void *context = NULL;
1521
1522 if (!gp_cds_context) {
1523 cds_err("cds context pointer is null (via %s)", func);
1524 return NULL;
1525 }
1526
1527 switch (module_id) {
1528 case QDF_MODULE_ID_HDD:
1529 {
1530 context = gp_cds_context->hdd_context;
1531 break;
1532 }
1533
1534 case QDF_MODULE_ID_SME:
1535 case QDF_MODULE_ID_PE:
1536 {
1537 /* In all these cases, we just return the MAC Context */
1538 context = gp_cds_context->mac_context;
1539 break;
1540 }
1541
1542 case QDF_MODULE_ID_WMA:
1543 {
1544 /* For wma module */
1545 context = gp_cds_context->wma_context;
1546 break;
1547 }
1548
1549 case QDF_MODULE_ID_QDF:
1550 {
1551 /* For SYS this is CDS itself */
1552 context = gp_cds_context;
1553 break;
1554 }
1555
1556 case QDF_MODULE_ID_HIF:
1557 {
1558 context = gp_cds_context->hif_context;
1559 break;
1560 }
1561
1562 case QDF_MODULE_ID_HTC:
1563 {
1564 context = gp_cds_context->htc_ctx;
1565 break;
1566 }
1567
1568 case QDF_MODULE_ID_QDF_DEVICE:
1569 {
1570 context = gp_cds_context->qdf_ctx;
1571 break;
1572 }
1573
1574 case QDF_MODULE_ID_BMI:
1575 {
1576 context = gp_cds_context->g_ol_context;
1577 break;
1578 }
1579
1580 case QDF_MODULE_ID_CFG:
1581 {
1582 context = gp_cds_context->cfg_ctx;
1583 break;
1584 }
1585
1586 case QDF_MODULE_ID_SOC:
1587 {
1588 context = gp_cds_context->dp_soc;
1589 break;
1590 }
1591
1592 default:
1593 {
1594 cds_err("Module ID %d does not have its context maintained by CDS (via %s)",
1595 module_id, func);
1596 QDF_ASSERT(0);
1597 return NULL;
1598 }
1599 }
1600
1601 if (!context)
1602 cds_err("Module ID %d context is Null (via %s)",
1603 module_id, func);
1604
1605 return context;
1606 } /* cds_get_context() */
1607
1608 /**
1609 * cds_get_global_context() - get CDS global Context
1610 *
1611 * This API allows any user to get the CDS Global Context pointer from a
1612 * module context data area.
1613 *
1614 * Return: pointer to the CDS global context, NULL if the function is
1615 * unable to retrieve the CDS context.
1616 */
cds_get_global_context(void)1617 void *cds_get_global_context(void)
1618 {
1619 if (!gp_cds_context) {
1620 /*
1621 * To avoid recursive call, this should not change to
1622 * QDF_TRACE().
1623 */
1624 pr_err("%s: global cds context is NULL", __func__);
1625 }
1626
1627 return gp_cds_context;
1628 } /* cds_get_global_context() */
1629
1630 /**
1631 * cds_get_driver_state() - Get current driver state
1632 *
1633 * This API returns current driver state stored in global context.
1634 *
1635 * Return: Driver state enum
1636 */
cds_get_driver_state(void)1637 enum cds_driver_state cds_get_driver_state(void)
1638 {
1639 if (!gp_cds_context) {
1640 cds_err("global cds context is NULL");
1641
1642 return CDS_DRIVER_STATE_UNINITIALIZED;
1643 }
1644
1645 return gp_cds_context->driver_state;
1646 }
1647
1648 /**
1649 * cds_set_driver_state() - Set current driver state
1650 * @state: Driver state to be set to.
1651 *
1652 * This API sets driver state to state. This API only sets the state and doesn't
1653 * clear states, please make sure to use cds_clear_driver_state to clear any
1654 * state if required.
1655 *
1656 * Return: None
1657 */
cds_set_driver_state(enum cds_driver_state state)1658 void cds_set_driver_state(enum cds_driver_state state)
1659 {
1660 if (!gp_cds_context) {
1661 cds_err("global cds context is NULL: %x", state);
1662
1663 return;
1664 }
1665
1666 gp_cds_context->driver_state |= state;
1667 }
1668
1669 /**
1670 * cds_clear_driver_state() - Clear current driver state
1671 * @state: Driver state to be cleared.
1672 *
1673 * This API clears driver state. This API only clears the state, please make
1674 * sure to use cds_set_driver_state to set any new states.
1675 *
1676 * Return: None
1677 */
cds_clear_driver_state(enum cds_driver_state state)1678 void cds_clear_driver_state(enum cds_driver_state state)
1679 {
1680 if (!gp_cds_context) {
1681 cds_err("global cds context is NULL: %x", state);
1682
1683 return;
1684 }
1685
1686 gp_cds_context->driver_state &= ~state;
1687 }
1688
1689 /**
1690 * cds_alloc_context() - allocate a context within the CDS global Context
1691 * @module_id: module ID who's context area is being allocated.
1692 * @module_context: pointer to location where the pointer to the
1693 * allocated context is returned. Note this output pointer
1694 * is valid only if the API returns QDF_STATUS_SUCCESS
1695 * @size: size of the context area to be allocated.
1696 *
1697 * This API allows any user to allocate a user context area within the
1698 * CDS Global Context.
1699 *
1700 * Return: QDF status
1701 */
cds_alloc_context(QDF_MODULE_ID module_id,void ** module_context,uint32_t size)1702 QDF_STATUS cds_alloc_context(QDF_MODULE_ID module_id,
1703 void **module_context, uint32_t size)
1704 {
1705 void **cds_mod_context = NULL;
1706
1707 if (!gp_cds_context) {
1708 cds_err("cds context is null");
1709 return QDF_STATUS_E_FAILURE;
1710 }
1711
1712 if (!module_context) {
1713 cds_err("null param passed");
1714 return QDF_STATUS_E_FAILURE;
1715 }
1716
1717 switch (module_id) {
1718 case QDF_MODULE_ID_WMA:
1719 cds_mod_context = &gp_cds_context->wma_context;
1720 break;
1721
1722 case QDF_MODULE_ID_HIF:
1723 cds_mod_context = &gp_cds_context->hif_context;
1724 break;
1725
1726 case QDF_MODULE_ID_BMI:
1727 cds_mod_context = &gp_cds_context->g_ol_context;
1728 break;
1729
1730 default:
1731 cds_err("Module ID %i does not have its context allocated by CDS",
1732 module_id);
1733 QDF_ASSERT(0);
1734 return QDF_STATUS_E_INVAL;
1735 }
1736
1737 if (*cds_mod_context) {
1738 /* Context has already been allocated!
1739 * Prevent double allocation
1740 */
1741 cds_err("Module ID %i context has already been allocated",
1742 module_id);
1743 return QDF_STATUS_E_EXISTS;
1744 }
1745
1746 /* Dynamically allocate the context for module */
1747
1748 *module_context = qdf_mem_malloc(size);
1749
1750 if (!*module_context) {
1751 cds_err("Failed to allocate Context for module ID %i",
1752 module_id);
1753 QDF_ASSERT(0);
1754 return QDF_STATUS_E_NOMEM;
1755 }
1756
1757 *cds_mod_context = *module_context;
1758
1759 return QDF_STATUS_SUCCESS;
1760 } /* cds_alloc_context() */
1761
1762 /**
1763 * cds_set_context() - API to set context in global CDS Context
1764 * @module_id: Module ID
1765 * @context: Pointer to the Module Context
1766 *
1767 * API to set a MODULE Context in global CDS Context
1768 *
1769 * Return: QDF_STATUS
1770 */
cds_set_context(QDF_MODULE_ID module_id,void * context)1771 QDF_STATUS cds_set_context(QDF_MODULE_ID module_id, void *context)
1772 {
1773 struct cds_context *p_cds_context = cds_get_global_context();
1774
1775 if (!p_cds_context) {
1776 cds_err("cds context is Invalid");
1777 return QDF_STATUS_NOT_INITIALIZED;
1778 }
1779
1780 switch (module_id) {
1781 case QDF_MODULE_ID_HDD:
1782 p_cds_context->hdd_context = context;
1783 break;
1784 case QDF_MODULE_ID_HIF:
1785 p_cds_context->hif_context = context;
1786 break;
1787 default:
1788 cds_err("Module ID %i does not have its context managed by CDS",
1789 module_id);
1790 QDF_ASSERT(0);
1791 return QDF_STATUS_E_INVAL;
1792 }
1793
1794 return QDF_STATUS_SUCCESS;
1795 }
1796
1797 /**
1798 * cds_free_context() - free an allocated context within the
1799 * CDS global Context
1800 * @module_id: module ID who's context area is being free
1801 * @module_context: pointer to module context area to be free'd.
1802 *
1803 * This API allows a user to free the user context area within the
1804 * CDS Global Context.
1805 *
1806 * Return: QDF status
1807 */
cds_free_context(QDF_MODULE_ID module_id,void * module_context)1808 QDF_STATUS cds_free_context(QDF_MODULE_ID module_id, void *module_context)
1809 {
1810 void **cds_mod_context = NULL;
1811
1812 if (!gp_cds_context) {
1813 cds_err("cds context is null");
1814 return QDF_STATUS_E_FAILURE;
1815 }
1816
1817 if (!module_context) {
1818 cds_err("Null param");
1819 return QDF_STATUS_E_FAILURE;
1820 }
1821
1822 switch (module_id) {
1823 case QDF_MODULE_ID_WMA:
1824 cds_mod_context = &gp_cds_context->wma_context;
1825 break;
1826
1827 case QDF_MODULE_ID_HIF:
1828 cds_mod_context = &gp_cds_context->hif_context;
1829 break;
1830
1831 case QDF_MODULE_ID_BMI:
1832 cds_mod_context = &gp_cds_context->g_ol_context;
1833 break;
1834
1835 default:
1836 cds_err("Module ID %i does not have its context allocated by CDS",
1837 module_id);
1838 QDF_ASSERT(0);
1839 return QDF_STATUS_E_INVAL;
1840 }
1841
1842 if (!*cds_mod_context) {
1843 /* Context has not been allocated or freed already! */
1844 cds_err("Module ID %i context has not been allocated or freed already",
1845 module_id);
1846 return QDF_STATUS_E_FAILURE;
1847 }
1848
1849 if (*cds_mod_context != module_context) {
1850 cds_err("cds_mod_context != module_context");
1851 return QDF_STATUS_E_FAILURE;
1852 }
1853
1854 qdf_mem_free(module_context);
1855
1856 *cds_mod_context = NULL;
1857
1858 return QDF_STATUS_SUCCESS;
1859 } /* cds_free_context() */
1860
1861
1862 /**
1863 * cds_flush_work() - flush pending works
1864 * @work: pointer to work
1865 *
1866 * Return: none
1867 */
cds_flush_work(void * work)1868 void cds_flush_work(void *work)
1869 {
1870 cancel_work_sync(work);
1871 }
1872
1873 /**
1874 * cds_flush_delayed_work() - flush delayed works
1875 * @dwork: pointer to delayed work
1876 *
1877 * Return: none
1878 */
cds_flush_delayed_work(void * dwork)1879 void cds_flush_delayed_work(void *dwork)
1880 {
1881 cancel_delayed_work_sync(dwork);
1882 }
1883
1884 #ifndef REMOVE_PKT_LOG
1885 /**
1886 * cds_is_packet_log_enabled() - check if packet log is enabled
1887 *
1888 * Return: true if packet log is enabled else false
1889 */
cds_is_packet_log_enabled(void)1890 bool cds_is_packet_log_enabled(void)
1891 {
1892 struct hdd_context *hdd_ctx;
1893
1894 hdd_ctx = gp_cds_context->hdd_context;
1895 if ((!hdd_ctx) || (!hdd_ctx->config)) {
1896 cds_alert("Hdd Context is Null");
1897 return false;
1898 }
1899 return hdd_ctx->config->enable_packet_log;
1900 }
1901 #endif
1902
cds_force_assert_target_via_pld(qdf_device_t qdf)1903 static int cds_force_assert_target_via_pld(qdf_device_t qdf)
1904 {
1905 int errno;
1906
1907 errno = pld_force_assert_target(qdf->dev);
1908 if (errno == -EOPNOTSUPP)
1909 cds_info("PLD does not support target force assert");
1910 else if (errno)
1911 cds_err("Failed PLD target force assert; errno %d", errno);
1912 else
1913 cds_info("Target force assert triggered via PLD");
1914
1915 return errno;
1916 }
1917
cds_force_assert_target_via_wmi(qdf_device_t qdf)1918 static QDF_STATUS cds_force_assert_target_via_wmi(qdf_device_t qdf)
1919 {
1920 QDF_STATUS status;
1921 t_wma_handle *wma;
1922
1923 wma = cds_get_context(QDF_MODULE_ID_WMA);
1924 if (!wma)
1925 return QDF_STATUS_E_INVAL;
1926
1927 status = wma_crash_inject(wma, RECOVERY_SIM_SELF_RECOVERY, 0);
1928 if (QDF_IS_STATUS_ERROR(status)) {
1929 cds_err("Failed target force assert; status %d", status);
1930 return status;
1931 }
1932
1933 status = qdf_wait_for_event_completion(&wma->recovery_event,
1934 WMA_CRASH_INJECT_TIMEOUT);
1935 if (QDF_IS_STATUS_ERROR(status)) {
1936 cds_err("Failed target force assert wait; status %d", status);
1937 return status;
1938 }
1939
1940 return QDF_STATUS_SUCCESS;
1941 }
1942
1943 /**
1944 * cds_force_assert_target() - Send assert command to firmware
1945 * @qdf: QDF device instance to assert
1946 *
1947 * An out-of-band recovery mechanism will cleanup and restart the entire wlan
1948 * subsystem in the event of a firmware crash. This API injects a firmware
1949 * crash to start this process when the wlan driver is known to be in a bad
1950 * state. If a firmware assert inject fails, the wlan driver will schedule
1951 * the driver recovery anyway, as a best effort attempt to return to a working
1952 * state.
1953 *
1954 * Return: QDF_STATUS
1955 */
cds_force_assert_target(qdf_device_t qdf)1956 static QDF_STATUS cds_force_assert_target(qdf_device_t qdf)
1957 {
1958 int errno;
1959 QDF_STATUS status;
1960
1961 /* first, try target assert inject via pld */
1962 errno = cds_force_assert_target_via_pld(qdf);
1963 if (!errno)
1964 return QDF_STATUS_SUCCESS;
1965 if (errno != -EOPNOTSUPP)
1966 return QDF_STATUS_E_FAILURE;
1967
1968 /* pld assert is not supported, try target assert inject via wmi */
1969 status = cds_force_assert_target_via_wmi(qdf);
1970 if (QDF_IS_STATUS_SUCCESS(status))
1971 return QDF_STATUS_SUCCESS;
1972
1973 /* wmi assert failed, start recovery without the firmware assert */
1974 cds_err("Scheduling recovery work without firmware assert");
1975 pld_schedule_recovery_work(qdf->dev, PLD_REASON_DEFAULT);
1976
1977 return status;
1978 }
1979
1980 /**
1981 * cds_trigger_recovery_handler() - handle a self recovery request
1982 * @func: the name of the function that called cds_trigger_recovery
1983 * @line: the line number of the call site which called cds_trigger_recovery
1984 *
1985 * Return: none
1986 */
cds_trigger_recovery_handler(const char * func,const uint32_t line)1987 static void cds_trigger_recovery_handler(const char *func, const uint32_t line)
1988 {
1989 QDF_STATUS status;
1990 qdf_runtime_lock_t rtl;
1991 qdf_device_t qdf;
1992 bool ssr_ini_enabled = cds_is_self_recovery_enabled();
1993
1994 /* NOTE! This code path is delicate! Think very carefully before
1995 * modifying the content or order of the following. Please review any
1996 * potential changes with someone closely familiar with this feature.
1997 */
1998
1999 if (cds_is_driver_recovering()) {
2000 cds_info("WLAN recovery already in progress");
2001 return;
2002 }
2003
2004 if (cds_is_driver_in_bad_state()) {
2005 cds_info("WLAN has already failed recovery");
2006 return;
2007 }
2008
2009 if (cds_is_fw_down()) {
2010 cds_info("Firmware has already initiated recovery");
2011 return;
2012 }
2013
2014 qdf = cds_get_context(QDF_MODULE_ID_QDF_DEVICE);
2015 if (!qdf)
2016 return;
2017
2018 /*
2019 * if *wlan* recovery is disabled, crash here for debugging for
2020 * snoc/IPCI targets.
2021 */
2022 if ((qdf->bus_type == QDF_BUS_TYPE_SNOC ||
2023 qdf->bus_type == QDF_BUS_TYPE_IPCI) && !ssr_ini_enabled) {
2024 QDF_DEBUG_PANIC("WLAN recovery is not enabled (via %s:%d)",
2025 func, line);
2026 return;
2027 }
2028
2029 if (cds_is_driver_unloading()) {
2030 QDF_DEBUG_PANIC("WLAN is unloading recovery not expected(via %s:%d)",
2031 func, line);
2032 return;
2033 }
2034
2035 status = qdf_runtime_lock_init(&rtl);
2036 if (QDF_IS_STATUS_ERROR(status)) {
2037 cds_err("qdf_runtime_lock_init failed, status: %d", status);
2038 return;
2039 }
2040
2041 status = qdf_runtime_pm_prevent_suspend(&rtl);
2042 if (QDF_IS_STATUS_ERROR(status)) {
2043 cds_err("Failed to acquire runtime pm lock");
2044 goto deinit_rtl;
2045 }
2046
2047 cds_err("critical host timeout trigger fw recovery for reason code %d",
2048 gp_cds_context->recovery_reason);
2049
2050 cds_set_recovery_in_progress(true);
2051 cds_set_assert_target_in_progress(true);
2052 if (pld_force_collect_target_dump(qdf->dev))
2053 cds_force_assert_target(qdf);
2054 cds_set_assert_target_in_progress(false);
2055
2056 /* Do not wait for firmware down block wmi transactions */
2057 wma_wmi_stop();
2058
2059 /*
2060 * if *wlan* recovery is disabled, once all the required registers are
2061 * read via the platform driver check and crash the system.
2062 */
2063 if (qdf->bus_type == QDF_BUS_TYPE_PCI && !ssr_ini_enabled)
2064 QDF_DEBUG_PANIC("WLAN recovery is not enabled (via %s:%d)",
2065 func, line);
2066
2067 status = qdf_runtime_pm_allow_suspend(&rtl);
2068 if (QDF_IS_STATUS_ERROR(status))
2069 cds_err("Failed to release runtime pm lock");
2070
2071 deinit_rtl:
2072 qdf_runtime_lock_deinit(&rtl);
2073 }
2074
cds_trigger_recovery_work(void * context)2075 static void cds_trigger_recovery_work(void *context)
2076 {
2077 struct cds_recovery_call_info *call_info = context;
2078
2079 cds_trigger_recovery_handler(call_info->func, call_info->line);
2080 }
2081
__cds_trigger_recovery(enum qdf_hang_reason reason,const char * func,const uint32_t line)2082 void __cds_trigger_recovery(enum qdf_hang_reason reason, const char *func,
2083 const uint32_t line)
2084 {
2085 if (!gp_cds_context) {
2086 cds_err("gp_cds_context is null");
2087 return;
2088 }
2089
2090 gp_cds_context->recovery_reason = reason;
2091
2092 __cds_recovery_caller.func = func;
2093 __cds_recovery_caller.line = line;
2094 qdf_queue_work(0, gp_cds_context->cds_recovery_wq,
2095 &gp_cds_context->cds_recovery_work);
2096 }
2097
cds_trigger_recovery_psoc(void * psoc,enum qdf_hang_reason reason,const char * func,const uint32_t line)2098 void cds_trigger_recovery_psoc(void *psoc, enum qdf_hang_reason reason,
2099 const char *func, const uint32_t line)
2100 {
2101 __cds_trigger_recovery(reason, func, line);
2102 }
2103
2104
2105 /**
2106 * cds_get_recovery_reason() - get self recovery reason
2107 * @reason: recovery reason
2108 *
2109 * Return: None
2110 */
cds_get_recovery_reason(enum qdf_hang_reason * reason)2111 void cds_get_recovery_reason(enum qdf_hang_reason *reason)
2112 {
2113 if (!gp_cds_context) {
2114 cds_err("gp_cds_context is null");
2115 return;
2116 }
2117
2118 *reason = gp_cds_context->recovery_reason;
2119 }
2120
2121 /**
2122 * cds_reset_recovery_reason() - reset the reason to unspecified
2123 *
2124 * Return: None
2125 */
cds_reset_recovery_reason(void)2126 void cds_reset_recovery_reason(void)
2127 {
2128 if (!gp_cds_context) {
2129 cds_err("gp_cds_context is null");
2130 return;
2131 }
2132
2133 gp_cds_context->recovery_reason = QDF_REASON_UNSPECIFIED;
2134 }
2135
2136 /**
2137 * cds_set_wakelock_logging() - Logging of wakelock enabled/disabled
2138 * @value: Boolean value
2139 *
2140 * This function is used to set the flag which will indicate whether
2141 * logging of wakelock is enabled or not
2142 *
2143 * Return: None
2144 */
cds_set_wakelock_logging(bool value)2145 void cds_set_wakelock_logging(bool value)
2146 {
2147 struct cds_context *p_cds_context;
2148
2149 p_cds_context = cds_get_global_context();
2150 if (!p_cds_context) {
2151 cds_err("cds context is Invalid");
2152 return;
2153 }
2154 p_cds_context->is_wakelock_log_enabled = value;
2155 }
2156
2157 /**
2158 * cds_is_wakelock_enabled() - Check if logging of wakelock is enabled/disabled
2159 *
2160 * This function is used to check whether logging of wakelock is enabled or not
2161 *
2162 * Return: true if logging of wakelock is enabled
2163 */
cds_is_wakelock_enabled(void)2164 bool cds_is_wakelock_enabled(void)
2165 {
2166 struct cds_context *p_cds_context;
2167
2168 p_cds_context = cds_get_global_context();
2169 if (!p_cds_context) {
2170 cds_err("cds context is Invalid");
2171 return false;
2172 }
2173 return p_cds_context->is_wakelock_log_enabled;
2174 }
2175
2176 /**
2177 * cds_set_ring_log_level() - Sets the log level of a particular ring
2178 * @ring_id: ring_id
2179 * @log_level: Log level specified
2180 *
2181 * This function converts HLOS values to driver log levels and sets the log
2182 * level of a particular ring accordingly.
2183 *
2184 * Return: None
2185 */
cds_set_ring_log_level(uint32_t ring_id,uint32_t log_level)2186 void cds_set_ring_log_level(uint32_t ring_id, uint32_t log_level)
2187 {
2188 struct cds_context *p_cds_context;
2189 uint32_t log_val;
2190
2191 p_cds_context = cds_get_global_context();
2192 if (!p_cds_context) {
2193 cds_err("cds context is Invalid");
2194 return;
2195 }
2196
2197 switch (log_level) {
2198 case LOG_LEVEL_NO_COLLECTION:
2199 log_val = WLAN_LOG_LEVEL_OFF;
2200 break;
2201 case LOG_LEVEL_NORMAL_COLLECT:
2202 log_val = WLAN_LOG_LEVEL_NORMAL;
2203 break;
2204 case LOG_LEVEL_ISSUE_REPRO:
2205 log_val = WLAN_LOG_LEVEL_REPRO;
2206 break;
2207 case LOG_LEVEL_ACTIVE:
2208 default:
2209 log_val = WLAN_LOG_LEVEL_ACTIVE;
2210 break;
2211 }
2212
2213 if (ring_id == RING_ID_WAKELOCK) {
2214 p_cds_context->wakelock_log_level = log_val;
2215 return;
2216 } else if (ring_id == RING_ID_CONNECTIVITY) {
2217 p_cds_context->connectivity_log_level = log_val;
2218 return;
2219 } else if (ring_id == RING_ID_PER_PACKET_STATS) {
2220 p_cds_context->packet_stats_log_level = log_val;
2221 return;
2222 } else if (ring_id == RING_ID_DRIVER_DEBUG) {
2223 p_cds_context->driver_debug_log_level = log_val;
2224 return;
2225 } else if (ring_id == RING_ID_FIRMWARE_DEBUG) {
2226 p_cds_context->fw_debug_log_level = log_val;
2227 return;
2228 }
2229 }
2230
2231 /**
2232 * cds_get_ring_log_level() - Get the a ring id's log level
2233 * @ring_id: Ring id
2234 *
2235 * Fetch and return the log level corresponding to a ring id
2236 *
2237 * Return: Log level corresponding to the ring ID
2238 */
cds_get_ring_log_level(uint32_t ring_id)2239 enum wifi_driver_log_level cds_get_ring_log_level(uint32_t ring_id)
2240 {
2241 struct cds_context *p_cds_context;
2242
2243 p_cds_context = cds_get_global_context();
2244 if (!p_cds_context) {
2245 cds_err("cds context is Invalid");
2246 return WLAN_LOG_LEVEL_OFF;
2247 }
2248
2249 if (ring_id == RING_ID_WAKELOCK)
2250 return p_cds_context->wakelock_log_level;
2251 else if (ring_id == RING_ID_CONNECTIVITY)
2252 return p_cds_context->connectivity_log_level;
2253 else if (ring_id == RING_ID_PER_PACKET_STATS)
2254 return p_cds_context->packet_stats_log_level;
2255 else if (ring_id == RING_ID_DRIVER_DEBUG)
2256 return p_cds_context->driver_debug_log_level;
2257 else if (ring_id == RING_ID_FIRMWARE_DEBUG)
2258 return p_cds_context->fw_debug_log_level;
2259
2260 return WLAN_LOG_LEVEL_OFF;
2261 }
2262
2263 /**
2264 * cds_set_multicast_logging() - Set mutlicast logging value
2265 * @value: Value of multicast logging
2266 *
2267 * Set the multicast logging value which will indicate
2268 * whether to multicast host and fw messages even
2269 * without any registration by userspace entity
2270 *
2271 * Return: None
2272 */
cds_set_multicast_logging(uint8_t value)2273 void cds_set_multicast_logging(uint8_t value)
2274 {
2275 cds_multicast_logging = value;
2276 }
2277
2278 /**
2279 * cds_is_multicast_logging() - Get multicast logging value
2280 *
2281 * Get the multicast logging value which will indicate
2282 * whether to multicast host and fw messages even
2283 * without any registration by userspace entity
2284 *
2285 * Return: 0 - Multicast logging disabled, 1 - Multicast logging enabled
2286 */
cds_is_multicast_logging(void)2287 uint8_t cds_is_multicast_logging(void)
2288 {
2289 return cds_multicast_logging;
2290 }
2291
2292 /*
2293 * cds_init_log_completion() - Initialize log param structure
2294 *
2295 * This function is used to initialize the logging related
2296 * parameters
2297 *
2298 * Return: None
2299 */
cds_init_log_completion(void)2300 void cds_init_log_completion(void)
2301 {
2302 struct cds_context *p_cds_context;
2303
2304 p_cds_context = cds_get_global_context();
2305 if (!p_cds_context) {
2306 cds_err("cds context is Invalid");
2307 return;
2308 }
2309
2310 p_cds_context->log_complete.is_fatal = WLAN_LOG_TYPE_NON_FATAL;
2311 p_cds_context->log_complete.indicator = WLAN_LOG_INDICATOR_UNUSED;
2312 p_cds_context->log_complete.reason_code = WLAN_LOG_REASON_CODE_UNUSED;
2313 p_cds_context->log_complete.is_report_in_progress = false;
2314 }
2315
2316 /**
2317 * cds_set_log_completion() - Store the logging params
2318 * @is_fatal: Indicates if the event triggering bug report is fatal or not
2319 * @indicator: Source which triggered the bug report
2320 * @reason_code: Reason for triggering bug report
2321 * @recovery_needed: If recovery is needed after bug report
2322 *
2323 * This function is used to set the logging parameters based on the
2324 * caller
2325 *
2326 * Return: 0 if setting of params is successful
2327 */
cds_set_log_completion(uint32_t is_fatal,uint32_t indicator,uint32_t reason_code,bool recovery_needed)2328 QDF_STATUS cds_set_log_completion(uint32_t is_fatal,
2329 uint32_t indicator,
2330 uint32_t reason_code,
2331 bool recovery_needed)
2332 {
2333 struct cds_context *p_cds_context;
2334
2335 p_cds_context = cds_get_global_context();
2336 if (!p_cds_context) {
2337 cds_err("cds context is Invalid");
2338 return QDF_STATUS_E_FAILURE;
2339 }
2340
2341 qdf_spinlock_acquire(&p_cds_context->bug_report_lock);
2342 p_cds_context->log_complete.is_fatal = is_fatal;
2343 p_cds_context->log_complete.indicator = indicator;
2344 p_cds_context->log_complete.reason_code = reason_code;
2345 p_cds_context->log_complete.recovery_needed = recovery_needed;
2346 p_cds_context->log_complete.is_report_in_progress = true;
2347 qdf_spinlock_release(&p_cds_context->bug_report_lock);
2348 cds_debug("is_fatal %d indicator %d reason_code %d recovery needed %d",
2349 is_fatal, indicator, reason_code, recovery_needed);
2350
2351 return QDF_STATUS_SUCCESS;
2352 }
2353
2354 /**
2355 * cds_get_and_reset_log_completion() - Get and reset logging related params
2356 * @is_fatal: Indicates if the event triggering bug report is fatal or not
2357 * @indicator: Source which triggered the bug report
2358 * @reason_code: Reason for triggering bug report
2359 * @recovery_needed: If recovery is needed after bug report
2360 *
2361 * This function is used to get the logging related parameters
2362 *
2363 * Return: None
2364 */
cds_get_and_reset_log_completion(uint32_t * is_fatal,uint32_t * indicator,uint32_t * reason_code,bool * recovery_needed)2365 void cds_get_and_reset_log_completion(uint32_t *is_fatal,
2366 uint32_t *indicator,
2367 uint32_t *reason_code,
2368 bool *recovery_needed)
2369 {
2370 struct cds_context *p_cds_context;
2371
2372 p_cds_context = cds_get_global_context();
2373 if (!p_cds_context) {
2374 cds_err("cds context is Invalid");
2375 return;
2376 }
2377
2378 qdf_spinlock_acquire(&p_cds_context->bug_report_lock);
2379 *is_fatal = p_cds_context->log_complete.is_fatal;
2380 *indicator = p_cds_context->log_complete.indicator;
2381 *reason_code = p_cds_context->log_complete.reason_code;
2382 *recovery_needed = p_cds_context->log_complete.recovery_needed;
2383
2384 /* reset */
2385 p_cds_context->log_complete.indicator = WLAN_LOG_INDICATOR_UNUSED;
2386 p_cds_context->log_complete.is_fatal = WLAN_LOG_TYPE_NON_FATAL;
2387 p_cds_context->log_complete.is_report_in_progress = false;
2388 p_cds_context->log_complete.reason_code = WLAN_LOG_REASON_CODE_UNUSED;
2389 p_cds_context->log_complete.recovery_needed = false;
2390 qdf_spinlock_release(&p_cds_context->bug_report_lock);
2391 }
2392
2393 /**
2394 * cds_is_log_report_in_progress() - Check if bug reporting is in progress
2395 *
2396 * This function is used to check if the bug reporting is already in progress
2397 *
2398 * Return: true if the bug reporting is in progress
2399 */
cds_is_log_report_in_progress(void)2400 bool cds_is_log_report_in_progress(void)
2401 {
2402 struct cds_context *p_cds_context;
2403
2404 p_cds_context = cds_get_global_context();
2405 if (!p_cds_context) {
2406 cds_err("cds context is Invalid");
2407 return true;
2408 }
2409 return p_cds_context->log_complete.is_report_in_progress;
2410 }
2411
2412 /**
2413 * cds_is_fatal_event_enabled() - Return if fatal event is enabled
2414 *
2415 * Return true if fatal event is enabled.
2416 */
cds_is_fatal_event_enabled(void)2417 bool cds_is_fatal_event_enabled(void)
2418 {
2419 struct cds_context *p_cds_context;
2420
2421 p_cds_context = cds_get_global_context();
2422 if (!p_cds_context) {
2423 cds_err("cds context is Invalid");
2424 return false;
2425 }
2426
2427
2428 return p_cds_context->enable_fatal_event;
2429 }
2430
2431 #ifdef WLAN_FEATURE_TSF_PLUS_SOCK_TS
cds_is_ptp_rx_opt_enabled(void)2432 bool cds_is_ptp_rx_opt_enabled(void)
2433 {
2434 struct hdd_context *hdd_ctx;
2435 struct cds_context *p_cds_context;
2436
2437 p_cds_context = cds_get_global_context();
2438 if (!p_cds_context) {
2439 cds_err("cds context is Invalid");
2440 return false;
2441 }
2442
2443 hdd_ctx = (struct hdd_context *)(p_cds_context->hdd_context);
2444 if ((!hdd_ctx) || (!hdd_ctx->config)) {
2445 cds_err("Hdd Context is Null");
2446 return false;
2447 }
2448
2449 return hdd_tsf_is_rx_set(hdd_ctx);
2450 }
2451
cds_is_ptp_tx_opt_enabled(void)2452 bool cds_is_ptp_tx_opt_enabled(void)
2453 {
2454 struct hdd_context *hdd_ctx;
2455 struct cds_context *p_cds_context;
2456
2457 p_cds_context = cds_get_global_context();
2458 if (!p_cds_context) {
2459 cds_err("cds context is Invalid");
2460 return false;
2461 }
2462
2463 hdd_ctx = (struct hdd_context *)(p_cds_context->hdd_context);
2464 if ((!hdd_ctx) || (!hdd_ctx->config)) {
2465 cds_err("Hdd Context is Null");
2466 return false;
2467 }
2468
2469 return hdd_tsf_is_tx_set(hdd_ctx);
2470 }
2471 #endif
2472
2473 /**
2474 * cds_get_log_indicator() - Get the log flush indicator
2475 *
2476 * This function is used to get the log flush indicator
2477 *
2478 * Return: log indicator
2479 */
cds_get_log_indicator(void)2480 uint32_t cds_get_log_indicator(void)
2481 {
2482 struct cds_context *p_cds_context;
2483 uint32_t indicator;
2484
2485 p_cds_context = cds_get_global_context();
2486 if (!p_cds_context) {
2487 cds_err("cds context is Invalid");
2488 return WLAN_LOG_INDICATOR_UNUSED;
2489 }
2490
2491 if (cds_is_load_or_unload_in_progress() ||
2492 cds_is_driver_recovering() || cds_is_driver_in_bad_state()) {
2493 return WLAN_LOG_INDICATOR_UNUSED;
2494 }
2495
2496 qdf_spinlock_acquire(&p_cds_context->bug_report_lock);
2497 indicator = p_cds_context->log_complete.indicator;
2498 qdf_spinlock_release(&p_cds_context->bug_report_lock);
2499 return indicator;
2500 }
2501
2502 /**
2503 * cds_wlan_flush_host_logs_for_fatal() - Wrapper to flush host logs
2504 *
2505 * This function is used to send signal to the logger thread to
2506 * flush the host logs.
2507 *
2508 * Return: None
2509 *
2510 */
cds_wlan_flush_host_logs_for_fatal(void)2511 void cds_wlan_flush_host_logs_for_fatal(void)
2512 {
2513 if (cds_is_log_report_in_progress())
2514 wlan_flush_host_logs_for_fatal();
2515 }
2516
2517 /**
2518 * cds_flush_logs() - Report fatal event to userspace
2519 * @is_fatal: Indicates if the event triggering bug report is fatal or not
2520 * @indicator: Source which triggered the bug report
2521 * @reason_code: Reason for triggering bug report
2522 * @dump_mac_trace: If mac trace are needed in logs.
2523 * @recovery_needed: If recovery is needed after bug report
2524 *
2525 * This function sets the log related params and send the WMI command to the
2526 * FW to flush its logs. On receiving the flush completion event from the FW
2527 * the same will be conveyed to userspace
2528 *
2529 * Return: 0 on success
2530 */
cds_flush_logs(uint32_t is_fatal,uint32_t indicator,uint32_t reason_code,bool dump_mac_trace,bool recovery_needed)2531 QDF_STATUS cds_flush_logs(uint32_t is_fatal,
2532 uint32_t indicator,
2533 uint32_t reason_code,
2534 bool dump_mac_trace,
2535 bool recovery_needed)
2536 {
2537 QDF_STATUS status;
2538
2539 struct cds_context *p_cds_context;
2540
2541 p_cds_context = cds_get_global_context();
2542 if (!p_cds_context) {
2543 cds_err("cds context is Invalid");
2544 return QDF_STATUS_E_FAILURE;
2545 }
2546 if (!p_cds_context->enable_fatal_event) {
2547 cds_err("Fatal event not enabled");
2548 return QDF_STATUS_E_FAILURE;
2549 }
2550 if (cds_is_load_or_unload_in_progress() ||
2551 cds_is_driver_recovering() || cds_is_driver_in_bad_state()) {
2552 cds_err("un/Load/SSR in progress");
2553 return QDF_STATUS_E_FAILURE;
2554 }
2555
2556 if (cds_is_log_report_in_progress()) {
2557 cds_err("Bug report already in progress - dropping! type:%d, indicator=%d reason_code=%d",
2558 is_fatal, indicator, reason_code);
2559 return QDF_STATUS_E_FAILURE;
2560 }
2561
2562 status = cds_set_log_completion(is_fatal, indicator,
2563 reason_code, recovery_needed);
2564 if (QDF_STATUS_SUCCESS != status) {
2565 cds_err("Failed to set log trigger params");
2566 return QDF_STATUS_E_FAILURE;
2567 }
2568
2569 cds_debug("Triggering bug report: type:%d, indicator=%d reason_code=%d",
2570 is_fatal, indicator, reason_code);
2571
2572 if (dump_mac_trace)
2573 qdf_trace_dump_all(p_cds_context->mac_context, 0, 0, 100, 0);
2574
2575 if (WLAN_LOG_INDICATOR_HOST_ONLY == indicator) {
2576 cds_wlan_flush_host_logs_for_fatal();
2577 return QDF_STATUS_SUCCESS;
2578 }
2579
2580 status = sme_send_flush_logs_cmd_to_fw();
2581 if (QDF_IS_STATUS_ERROR(status)) {
2582 cds_err("Failed to send flush FW log");
2583 cds_init_log_completion();
2584 return QDF_STATUS_E_FAILURE;
2585 }
2586
2587 return QDF_STATUS_SUCCESS;
2588 }
2589
2590 /**
2591 * cds_logging_set_fw_flush_complete() - Wrapper for FW log flush completion
2592 *
2593 * This function is used to send signal to the logger thread to indicate
2594 * that the flushing of FW logs is complete by the FW
2595 *
2596 * Return: None
2597 *
2598 */
cds_logging_set_fw_flush_complete(void)2599 void cds_logging_set_fw_flush_complete(void)
2600 {
2601 if (cds_is_fatal_event_enabled())
2602 wlan_logging_set_fw_flush_complete();
2603 }
2604
2605 /**
2606 * cds_set_fatal_event() - set fatal event status
2607 * @value: pending statue to set
2608 *
2609 * Return: None
2610 */
cds_set_fatal_event(bool value)2611 void cds_set_fatal_event(bool value)
2612 {
2613 struct cds_context *p_cds_context;
2614
2615 p_cds_context = cds_get_global_context();
2616 if (!p_cds_context) {
2617 cds_err("cds context is Invalid");
2618 return;
2619 }
2620 p_cds_context->enable_fatal_event = value;
2621 }
2622
2623 /**
2624 * cds_get_radio_index() - get radio index
2625 *
2626 * Return: radio index otherwise, -EINVAL
2627 */
cds_get_radio_index(void)2628 int cds_get_radio_index(void)
2629 {
2630 struct cds_context *p_cds_context;
2631
2632 p_cds_context = cds_get_global_context();
2633 if (!p_cds_context) {
2634 /*
2635 * To avoid recursive call, this should not change to
2636 * QDF_TRACE().
2637 */
2638 pr_err("%s: cds context is invalid\n", __func__);
2639 return -EINVAL;
2640 }
2641
2642 return p_cds_context->radio_index;
2643 }
2644
2645 /**
2646 * cds_set_radio_index() - set radio index
2647 * @radio_index: the radio index to set
2648 *
2649 * Return: QDF status
2650 */
cds_set_radio_index(int radio_index)2651 QDF_STATUS cds_set_radio_index(int radio_index)
2652 {
2653 struct cds_context *p_cds_context;
2654
2655 p_cds_context = cds_get_global_context();
2656 if (!p_cds_context) {
2657 pr_err("%s: cds context is invalid\n", __func__);
2658 return QDF_STATUS_E_FAILURE;
2659 }
2660
2661 p_cds_context->radio_index = radio_index;
2662
2663 return QDF_STATUS_SUCCESS;
2664 }
2665
2666 /**
2667 * cds_init_ini_config() - API to initialize CDS configuration parameters
2668 * @cfg: CDS Configuration
2669 *
2670 * Return: void
2671 */
2672
cds_init_ini_config(struct cds_config_info * cfg)2673 void cds_init_ini_config(struct cds_config_info *cfg)
2674 {
2675 struct cds_context *cds_ctx;
2676
2677 cds_ctx = cds_get_context(QDF_MODULE_ID_QDF);
2678 if (!cds_ctx)
2679 return;
2680
2681 cds_ctx->cds_cfg = cfg;
2682 }
2683
2684 /**
2685 * cds_deinit_ini_config() - API to free CDS configuration parameters
2686 *
2687 * Return: void
2688 */
cds_deinit_ini_config(void)2689 void cds_deinit_ini_config(void)
2690 {
2691 struct cds_context *cds_ctx;
2692 struct cds_config_info *cds_cfg;
2693
2694 cds_ctx = cds_get_context(QDF_MODULE_ID_QDF);
2695 if (!cds_ctx)
2696 return;
2697
2698 cds_cfg = cds_ctx->cds_cfg;
2699 cds_ctx->cds_cfg = NULL;
2700
2701 if (cds_cfg)
2702 qdf_mem_free(cds_cfg);
2703 }
2704
2705 /**
2706 * cds_get_ini_config() - API to get CDS configuration parameters
2707 *
2708 * Return: cds config structure
2709 */
cds_get_ini_config(void)2710 struct cds_config_info *cds_get_ini_config(void)
2711 {
2712 struct cds_context *cds_ctx;
2713
2714 cds_ctx = cds_get_context(QDF_MODULE_ID_QDF);
2715 if (!cds_ctx)
2716 return NULL;
2717
2718 return cds_ctx->cds_cfg;
2719 }
2720
2721 /**
2722 * cds_is_5_mhz_enabled() - API to get 5MHZ enabled
2723 *
2724 * Return: true if 5 mhz is enabled, false otherwise
2725 */
cds_is_5_mhz_enabled(void)2726 bool cds_is_5_mhz_enabled(void)
2727 {
2728 struct cds_context *p_cds_context;
2729
2730 p_cds_context = cds_get_context(QDF_MODULE_ID_QDF);
2731 if (!p_cds_context)
2732 return false;
2733
2734 if (p_cds_context->cds_cfg)
2735 return (p_cds_context->cds_cfg->sub_20_channel_width ==
2736 WLAN_SUB_20_CH_WIDTH_5);
2737
2738 return false;
2739 }
2740
2741 /**
2742 * cds_is_10_mhz_enabled() - API to get 10-MHZ enabled
2743 *
2744 * Return: true if 10 mhz is enabled, false otherwise
2745 */
cds_is_10_mhz_enabled(void)2746 bool cds_is_10_mhz_enabled(void)
2747 {
2748 struct cds_context *p_cds_context;
2749
2750 p_cds_context = cds_get_context(QDF_MODULE_ID_QDF);
2751 if (!p_cds_context)
2752 return false;
2753
2754 if (p_cds_context->cds_cfg)
2755 return (p_cds_context->cds_cfg->sub_20_channel_width ==
2756 WLAN_SUB_20_CH_WIDTH_10);
2757
2758 return false;
2759 }
2760
2761 /**
2762 * cds_is_sub_20_mhz_enabled() - API to get sub 20-MHZ enabled
2763 *
2764 * Return: true if 5 or 10 mhz is enabled, false otherwise
2765 */
cds_is_sub_20_mhz_enabled(void)2766 bool cds_is_sub_20_mhz_enabled(void)
2767 {
2768 struct cds_context *p_cds_context;
2769
2770 p_cds_context = cds_get_context(QDF_MODULE_ID_QDF);
2771 if (!p_cds_context)
2772 return false;
2773
2774 if (p_cds_context->cds_cfg)
2775 return p_cds_context->cds_cfg->sub_20_channel_width;
2776
2777 return false;
2778 }
2779
2780 /**
2781 * cds_is_self_recovery_enabled() - API to get self recovery enabled
2782 *
2783 * Return: true if self recovery enabled, false otherwise
2784 */
cds_is_self_recovery_enabled(void)2785 bool cds_is_self_recovery_enabled(void)
2786 {
2787 struct cds_context *p_cds_context;
2788
2789 p_cds_context = cds_get_context(QDF_MODULE_ID_QDF);
2790 if (!p_cds_context)
2791 return false;
2792
2793 if (p_cds_context->cds_cfg)
2794 return p_cds_context->cds_cfg->self_recovery_enabled;
2795
2796 return false;
2797 }
2798
2799 /**
2800 * cds_is_fw_down() - Is FW down or not
2801 *
2802 * Return: true if FW is down and false otherwise.
2803 */
cds_is_fw_down(void)2804 bool cds_is_fw_down(void)
2805 {
2806 qdf_device_t qdf_ctx;
2807
2808 qdf_ctx = cds_get_context(QDF_MODULE_ID_QDF_DEVICE);
2809 if (!qdf_ctx)
2810 return false;
2811
2812 return pld_is_fw_down(qdf_ctx->dev);
2813 }
2814
2815 /**
2816 * cds_svc_fw_shutdown_ind() - API to send userspace about FW crash
2817 *
2818 * @dev: Device Pointer
2819 *
2820 * Return: None
2821 */
cds_svc_fw_shutdown_ind(struct device * dev)2822 void cds_svc_fw_shutdown_ind(struct device *dev)
2823 {
2824 hdd_svc_fw_shutdown_ind(dev);
2825 }
2826
2827 #ifdef WLAN_LOGGING_SOCK_SVC_ENABLE
2828 /*
2829 * cds_pkt_stats_to_logger_thread() - send pktstats to user
2830 * @pl_hdr: Pointer to pl_hdr
2831 * @pkt_dump: Pointer to pkt_dump data structure.
2832 * @data: Pointer to data
2833 *
2834 * This function is used to send the pkt stats to SVC module.
2835 *
2836 * Return: None
2837 */
cds_pkt_stats_to_logger_thread(void * pl_hdr,void * pkt_dump,void * data)2838 inline void cds_pkt_stats_to_logger_thread(void *pl_hdr, void *pkt_dump,
2839 void *data)
2840 {
2841 if (cds_get_ring_log_level(RING_ID_PER_PACKET_STATS) !=
2842 WLAN_LOG_LEVEL_ACTIVE)
2843 return;
2844
2845 wlan_pkt_stats_to_logger_thread(pl_hdr, pkt_dump, data);
2846 }
2847 #endif
2848
2849 /**
2850 * cds_get_conparam() - Get the connection mode parameters
2851 *
2852 * Return the connection mode parameter set by insmod or set during statically
2853 * linked driver
2854 *
2855 * Return: enum QDF_GLOBAL_MODE
2856 */
cds_get_conparam(void)2857 enum QDF_GLOBAL_MODE cds_get_conparam(void)
2858 {
2859 enum QDF_GLOBAL_MODE con_mode;
2860
2861 con_mode = hdd_get_conparam();
2862
2863 return con_mode;
2864 }
2865
2866 #ifdef FEATURE_HTC_CREDIT_HISTORY
2867 inline void
cds_print_htc_credit_history(uint32_t count,qdf_abstract_print * print,void * print_priv)2868 cds_print_htc_credit_history(uint32_t count, qdf_abstract_print *print,
2869 void *print_priv)
2870 {
2871 htc_print_credit_history(gp_cds_context->htc_ctx, count,
2872 print, print_priv);
2873 }
2874 #endif
2875
2876 #ifdef FEATURE_ALIGN_STATS_FROM_DP
2877 /**
2878 * cds_get_cdp_vdev_stats() - Function which retrieves cdp vdev stats
2879 * @vdev_id: vdev id
2880 * @vdev_stats: cdp vdev stats retrieves from DP
2881 *
2882 * Return: If get cdp vdev stats success return true, otherwise return false
2883 */
2884 static bool
cds_get_cdp_vdev_stats(uint8_t vdev_id,struct cdp_vdev_stats * vdev_stats)2885 cds_get_cdp_vdev_stats(uint8_t vdev_id, struct cdp_vdev_stats *vdev_stats)
2886 {
2887 void *soc = cds_get_context(QDF_MODULE_ID_SOC);
2888
2889 if (!vdev_stats)
2890 return false;
2891
2892 if (cdp_host_get_vdev_stats(soc, vdev_id, vdev_stats, true))
2893 return false;
2894
2895 return true;
2896 }
2897
2898 bool
cds_dp_get_vdev_stats(uint8_t vdev_id,struct cds_vdev_dp_stats * stats)2899 cds_dp_get_vdev_stats(uint8_t vdev_id, struct cds_vdev_dp_stats *stats)
2900 {
2901 struct cdp_vdev_stats *vdev_stats;
2902 bool ret = false;
2903
2904 vdev_stats = qdf_mem_malloc(sizeof(*vdev_stats));
2905 if (!vdev_stats)
2906 return false;
2907
2908 if (cds_get_cdp_vdev_stats(vdev_id, vdev_stats)) {
2909 stats->tx_retries = vdev_stats->tx.retries;
2910 stats->tx_retries_mpdu = vdev_stats->tx.retries_mpdu;
2911 stats->tx_mpdu_success_with_retries =
2912 vdev_stats->tx.mpdu_success_with_retries;
2913 ret = true;
2914 }
2915
2916 qdf_mem_free(vdev_stats);
2917 return ret;
2918 }
2919 #endif
2920
2921 #ifdef ENABLE_SMMU_S1_TRANSLATION
2922 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0))
cds_smmu_mem_map_setup(qdf_device_t osdev,bool ipa_present)2923 QDF_STATUS cds_smmu_mem_map_setup(qdf_device_t osdev, bool ipa_present)
2924 {
2925 struct iommu_domain *domain;
2926 bool ipa_smmu_enabled;
2927 bool wlan_smmu_enabled;
2928
2929 domain = pld_smmu_get_domain(osdev->dev);
2930 if (domain) {
2931 int attr = 0;
2932 int errno = qdf_iommu_domain_get_attr(domain,
2933 QDF_DOMAIN_ATTR_S1_BYPASS,
2934 &attr);
2935
2936 wlan_smmu_enabled = !errno && !attr;
2937 } else {
2938 cds_info("No SMMU mapping present");
2939 wlan_smmu_enabled = false;
2940 }
2941
2942 if (!wlan_smmu_enabled) {
2943 osdev->smmu_s1_enabled = false;
2944 goto exit_with_success;
2945 }
2946
2947 if (!ipa_present) {
2948 osdev->smmu_s1_enabled = true;
2949 goto exit_with_success;
2950 }
2951
2952 ipa_smmu_enabled = qdf_get_ipa_smmu_enabled();
2953
2954 osdev->smmu_s1_enabled = ipa_smmu_enabled && wlan_smmu_enabled;
2955 if (ipa_smmu_enabled != wlan_smmu_enabled) {
2956 cds_err("SMMU mismatch; IPA:%s, WLAN:%s",
2957 ipa_smmu_enabled ? "enabled" : "disabled",
2958 wlan_smmu_enabled ? "enabled" : "disabled");
2959 return QDF_STATUS_E_FAILURE;
2960 }
2961
2962 exit_with_success:
2963 osdev->domain = domain;
2964
2965 cds_info("SMMU S1 %s", osdev->smmu_s1_enabled ? "enabled" : "disabled");
2966
2967 return QDF_STATUS_SUCCESS;
2968 }
2969
2970 #else
cds_smmu_mem_map_setup(qdf_device_t osdev,bool ipa_present)2971 QDF_STATUS cds_smmu_mem_map_setup(qdf_device_t osdev, bool ipa_present)
2972 {
2973 struct dma_iommu_mapping *mapping;
2974 bool ipa_smmu_enabled;
2975 bool wlan_smmu_enabled;
2976
2977 mapping = pld_smmu_get_mapping(osdev->dev);
2978 if (mapping) {
2979 int attr = 0;
2980 int errno = qdf_iommu_domain_get_attr(mapping->domain,
2981 QDF_DOMAIN_ATTR_S1_BYPASS,
2982 &attr);
2983
2984 wlan_smmu_enabled = !errno && !attr;
2985 } else {
2986 cds_info("No SMMU mapping present");
2987 wlan_smmu_enabled = false;
2988 }
2989
2990 if (!wlan_smmu_enabled) {
2991 osdev->smmu_s1_enabled = false;
2992 goto exit_with_success;
2993 }
2994
2995 if (!ipa_present) {
2996 osdev->smmu_s1_enabled = true;
2997 goto exit_with_success;
2998 }
2999
3000 ipa_smmu_enabled = qdf_get_ipa_smmu_enabled();
3001
3002 osdev->smmu_s1_enabled = ipa_smmu_enabled && wlan_smmu_enabled;
3003 if (ipa_smmu_enabled != wlan_smmu_enabled) {
3004 cds_err("SMMU mismatch; IPA:%s, WLAN:%s",
3005 ipa_smmu_enabled ? "enabled" : "disabled",
3006 wlan_smmu_enabled ? "enabled" : "disabled");
3007 return QDF_STATUS_E_FAILURE;
3008 }
3009
3010 exit_with_success:
3011 osdev->iommu_mapping = mapping;
3012
3013 cds_info("SMMU S1 %s", osdev->smmu_s1_enabled ? "enabled" : "disabled");
3014
3015 return QDF_STATUS_SUCCESS;
3016 }
3017 #endif
3018
3019 #ifdef IPA_OFFLOAD
cds_smmu_map_unmap(bool map,uint32_t num_buf,qdf_mem_info_t * buf_arr)3020 int cds_smmu_map_unmap(bool map, uint32_t num_buf, qdf_mem_info_t *buf_arr)
3021 {
3022 return ucfg_ipa_uc_smmu_map(map, num_buf, buf_arr);
3023 }
3024 #else
cds_smmu_map_unmap(bool map,uint32_t num_buf,qdf_mem_info_t * buf_arr)3025 int cds_smmu_map_unmap(bool map, uint32_t num_buf, qdf_mem_info_t *buf_arr)
3026 {
3027 return 0;
3028 }
3029 #endif
3030
3031 #else
3032 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0))
cds_smmu_mem_map_setup(qdf_device_t osdev,bool ipa_present)3033 QDF_STATUS cds_smmu_mem_map_setup(qdf_device_t osdev, bool ipa_present)
3034 {
3035 osdev->smmu_s1_enabled = false;
3036 osdev->domain = NULL;
3037 return QDF_STATUS_SUCCESS;
3038 }
3039 #else
cds_smmu_mem_map_setup(qdf_device_t osdev,bool ipa_present)3040 QDF_STATUS cds_smmu_mem_map_setup(qdf_device_t osdev, bool ipa_present)
3041 {
3042 osdev->smmu_s1_enabled = false;
3043 return QDF_STATUS_SUCCESS;
3044 }
3045 #endif
3046
cds_smmu_map_unmap(bool map,uint32_t num_buf,qdf_mem_info_t * buf_arr)3047 int cds_smmu_map_unmap(bool map, uint32_t num_buf, qdf_mem_info_t *buf_arr)
3048 {
3049 return 0;
3050 }
3051 #endif
3052