1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
4 * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
5 */
6
7 #include <linux/delay.h>
8 #include <linux/devcoredump.h>
9 #include <linux/elf.h>
10 #include <linux/jiffies.h>
11 #include <linux/module.h>
12 #include <linux/of.h>
13 #include <linux/of_device.h>
14 #include <linux/of_gpio.h>
15 #include <linux/pm_wakeup.h>
16 #include <linux/reboot.h>
17 #include <linux/rwsem.h>
18 #include <linux/suspend.h>
19 #include <linux/timer.h>
20 #include <linux/thermal.h>
21 #include <linux/version.h>
22 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 14, 0))
23 #include <linux/panic_notifier.h>
24 #endif
25 #if IS_ENABLED(CONFIG_QCOM_MINIDUMP)
26 #include <soc/qcom/minidump.h>
27 #endif
28
29 #include "cnss_plat_ipc_qmi.h"
30 #include "cnss_utils.h"
31 #include "main.h"
32 #include "bus.h"
33 #include "debug.h"
34 #include "genl.h"
35 #include "reg.h"
36
37 #ifdef CONFIG_CNSS_HW_SECURE_DISABLE
38 #ifdef CONFIG_CNSS_HW_SECURE_SMEM
39 #include <linux/soc/qcom/smem.h>
40 #define PERISEC_SMEM_ID 651
41 #define HW_WIFI_UID 0x508
42 #else
43 #include "smcinvoke.h"
44 #include "smcinvoke_object.h"
45 #include "IClientEnv.h"
46 #define HW_STATE_UID 0x108
47 #define HW_OP_GET_STATE 1
48 #define HW_WIFI_UID 0x508
49 #define FEATURE_NOT_SUPPORTED 12
50 #define PERIPHERAL_NOT_FOUND 10
51 #endif
52 #endif
53
54 #define CNSS_DUMP_FORMAT_VER 0x11
55 #define CNSS_DUMP_FORMAT_VER_V2 0x22
56 #define CNSS_DUMP_MAGIC_VER_V2 0x42445953
57 #define CNSS_DUMP_NAME "CNSS_WLAN"
58 #define CNSS_DUMP_DESC_SIZE 0x1000
59 #define CNSS_DUMP_SEG_VER 0x1
60 #define FILE_SYSTEM_READY 1
61 #define FW_READY_TIMEOUT 20000
62 #define FW_ASSERT_TIMEOUT 5000
63 #define CNSS_EVENT_PENDING 2989
64 #define POWER_RESET_MIN_DELAY_MS 100
65 #define MAX_NAME_LEN 12
66
67 #define CNSS_QUIRKS_DEFAULT 0
68 #ifdef CONFIG_CNSS_EMULATION
69 #define CNSS_MHI_TIMEOUT_DEFAULT 90000
70 #define CNSS_MHI_M2_TIMEOUT_DEFAULT 2000
71 #define CNSS_QMI_TIMEOUT_DEFAULT 90000
72 #else
73 #define CNSS_MHI_TIMEOUT_DEFAULT 0
74 #define CNSS_MHI_M2_TIMEOUT_DEFAULT 25
75 #define CNSS_QMI_TIMEOUT_DEFAULT 10000
76 #endif
77 #define CNSS_BDF_TYPE_DEFAULT CNSS_BDF_ELF
78 #define CNSS_TIME_SYNC_PERIOD_DEFAULT 900000
79 #define CNSS_MIN_TIME_SYNC_PERIOD 2000
80 #define CNSS_DMS_QMI_CONNECTION_WAIT_MS 50
81 #define CNSS_DMS_QMI_CONNECTION_WAIT_RETRY 200
82 #define CNSS_DAEMON_CONNECT_TIMEOUT_MS 30000
83 #define CNSS_CAL_DB_FILE_NAME "wlfw_cal_db.bin"
84 #define CNSS_CAL_START_PROBE_WAIT_RETRY_MAX 100
85 #define CNSS_CAL_START_PROBE_WAIT_MS 500
86 #define CNSS_TIME_SYNC_PERIOD_INVALID 0xFFFFFFFF
87
88 enum cnss_cal_db_op {
89 CNSS_CAL_DB_UPLOAD,
90 CNSS_CAL_DB_DOWNLOAD,
91 CNSS_CAL_DB_INVALID_OP,
92 };
93
94 enum cnss_recovery_type {
95 CNSS_WLAN_RECOVERY = 0x1,
96 CNSS_PCSS_RECOVERY = 0x2,
97 };
98
99 #ifdef CONFIG_CNSS_SUPPORT_DUAL_DEV
100 #define CNSS_MAX_DEV_NUM 2
101 static struct cnss_plat_data *plat_env[CNSS_MAX_DEV_NUM];
102 static atomic_t plat_env_count;
103 #else
104 static struct cnss_plat_data *plat_env;
105 #endif
106
107 static bool cnss_allow_driver_loading;
108
109 static struct cnss_fw_files FW_FILES_QCA6174_FW_3_0 = {
110 "qwlan30.bin", "bdwlan30.bin", "otp30.bin", "utf30.bin",
111 "utfbd30.bin", "epping30.bin", "evicted30.bin"
112 };
113
114 static struct cnss_fw_files FW_FILES_DEFAULT = {
115 "qwlan.bin", "bdwlan.bin", "otp.bin", "utf.bin",
116 "utfbd.bin", "epping.bin", "evicted.bin"
117 };
118
119 struct cnss_driver_event {
120 struct list_head list;
121 enum cnss_driver_event_type type;
122 bool sync;
123 struct completion complete;
124 int ret;
125 void *data;
126 };
127
cnss_check_driver_loading_allowed(void)128 bool cnss_check_driver_loading_allowed(void)
129 {
130 return cnss_allow_driver_loading;
131 }
132
133 #ifdef CONFIG_CNSS_SUPPORT_DUAL_DEV
cnss_init_plat_env_count(void)134 static void cnss_init_plat_env_count(void)
135 {
136 atomic_set(&plat_env_count, 0);
137 }
138
cnss_inc_plat_env_count(void)139 static void cnss_inc_plat_env_count(void)
140 {
141 atomic_inc(&plat_env_count);
142 }
143
cnss_dec_plat_env_count(void)144 static void cnss_dec_plat_env_count(void)
145 {
146 atomic_dec(&plat_env_count);
147 }
148
cnss_get_plat_env_count(void)149 static int cnss_get_plat_env_count(void)
150 {
151 return atomic_read(&plat_env_count);
152 }
153
cnss_get_max_plat_env_count(void)154 int cnss_get_max_plat_env_count(void)
155 {
156 return CNSS_MAX_DEV_NUM;
157 }
158
cnss_set_plat_priv(struct platform_device * plat_dev,struct cnss_plat_data * plat_priv)159 static void cnss_set_plat_priv(struct platform_device *plat_dev,
160 struct cnss_plat_data *plat_priv)
161 {
162 int env_count = cnss_get_plat_env_count();
163
164 cnss_pr_dbg("Set plat_priv at %d", env_count);
165 if (plat_priv) {
166 plat_priv->plat_idx = env_count;
167 plat_env[plat_priv->plat_idx] = plat_priv;
168 cnss_inc_plat_env_count();
169 }
170 }
171
cnss_get_plat_priv(struct platform_device * plat_dev)172 struct cnss_plat_data *cnss_get_plat_priv(struct platform_device
173 *plat_dev)
174 {
175 int i;
176
177 if (!plat_dev)
178 return NULL;
179
180 for (i = 0; i < CNSS_MAX_DEV_NUM; i++) {
181 if (plat_env[i] && plat_env[i]->plat_dev == plat_dev)
182 return plat_env[i];
183 }
184 return NULL;
185 }
186
cnss_get_first_plat_priv(struct platform_device * plat_dev)187 struct cnss_plat_data *cnss_get_first_plat_priv(struct platform_device
188 *plat_dev)
189 {
190 int i;
191
192 if (!plat_dev) {
193 for (i = 0; i < CNSS_MAX_DEV_NUM; i++) {
194 if (plat_env[i])
195 return plat_env[i];
196 }
197 }
198 return NULL;
199 }
200
cnss_clear_plat_priv(struct cnss_plat_data * plat_priv)201 static void cnss_clear_plat_priv(struct cnss_plat_data *plat_priv)
202 {
203 cnss_pr_dbg("Clear plat_priv at %d", plat_priv->plat_idx);
204 plat_env[plat_priv->plat_idx] = NULL;
205 cnss_dec_plat_env_count();
206 }
207
cnss_set_device_name(struct cnss_plat_data * plat_priv)208 static int cnss_set_device_name(struct cnss_plat_data *plat_priv)
209 {
210 snprintf(plat_priv->device_name, sizeof(plat_priv->device_name),
211 "wlan_%d", plat_priv->plat_idx);
212
213 return 0;
214 }
215
cnss_plat_env_available(void)216 static int cnss_plat_env_available(void)
217 {
218 int ret = 0;
219 int env_count = cnss_get_plat_env_count();
220
221 if (env_count >= CNSS_MAX_DEV_NUM) {
222 cnss_pr_err("ERROR: No space to store plat_priv\n");
223 ret = -ENOMEM;
224 }
225 return ret;
226 }
227
cnss_get_plat_env(int index)228 struct cnss_plat_data *cnss_get_plat_env(int index)
229 {
230 return plat_env[index];
231 }
232
cnss_get_plat_priv_by_rc_num(int rc_num)233 struct cnss_plat_data *cnss_get_plat_priv_by_rc_num(int rc_num)
234 {
235 int i;
236
237 for (i = 0; i < CNSS_MAX_DEV_NUM; i++) {
238 if (plat_env[i] && plat_env[i]->rc_num == rc_num)
239 return plat_env[i];
240 }
241 return NULL;
242 }
243
244 static inline int
cnss_get_qrtr_node_id(struct cnss_plat_data * plat_priv)245 cnss_get_qrtr_node_id(struct cnss_plat_data *plat_priv)
246 {
247 return of_property_read_u32(plat_priv->dev_node,
248 "qcom,qrtr_node_id", &plat_priv->qrtr_node_id);
249 }
250
cnss_get_qrtr_info(struct cnss_plat_data * plat_priv)251 void cnss_get_qrtr_info(struct cnss_plat_data *plat_priv)
252 {
253 int ret = 0;
254
255 ret = cnss_get_qrtr_node_id(plat_priv);
256 if (ret) {
257 cnss_pr_warn("Failed to find qrtr_node_id err=%d\n", ret);
258 plat_priv->qrtr_node_id = 0;
259 plat_priv->wlfw_service_instance_id = 0;
260 } else {
261 plat_priv->wlfw_service_instance_id = plat_priv->qrtr_node_id +
262 QRTR_NODE_FW_ID_BASE;
263 cnss_pr_dbg("service_instance_id=0x%x\n",
264 plat_priv->wlfw_service_instance_id);
265 }
266 }
267
268 static inline int
cnss_get_pld_bus_ops_name(struct cnss_plat_data * plat_priv)269 cnss_get_pld_bus_ops_name(struct cnss_plat_data *plat_priv)
270 {
271 return of_property_read_string(plat_priv->plat_dev->dev.of_node,
272 "qcom,pld_bus_ops_name",
273 &plat_priv->pld_bus_ops_name);
274 }
275
276 #else
cnss_init_plat_env_count(void)277 static void cnss_init_plat_env_count(void)
278 {
279 }
280
cnss_set_plat_priv(struct platform_device * plat_dev,struct cnss_plat_data * plat_priv)281 static void cnss_set_plat_priv(struct platform_device *plat_dev,
282 struct cnss_plat_data *plat_priv)
283 {
284 plat_env = plat_priv;
285 }
286
cnss_get_plat_priv(struct platform_device * plat_dev)287 struct cnss_plat_data *cnss_get_plat_priv(struct platform_device *plat_dev)
288 {
289 return plat_env;
290 }
291
cnss_clear_plat_priv(struct cnss_plat_data * plat_priv)292 static void cnss_clear_plat_priv(struct cnss_plat_data *plat_priv)
293 {
294 plat_env = NULL;
295 }
296
cnss_set_device_name(struct cnss_plat_data * plat_priv)297 static int cnss_set_device_name(struct cnss_plat_data *plat_priv)
298 {
299 snprintf(plat_priv->device_name, sizeof(plat_priv->device_name),
300 "wlan");
301 return 0;
302 }
303
cnss_plat_env_available(void)304 static int cnss_plat_env_available(void)
305 {
306 return 0;
307 }
308
cnss_get_plat_priv_by_rc_num(int rc_num)309 struct cnss_plat_data *cnss_get_plat_priv_by_rc_num(int rc_num)
310 {
311 return cnss_bus_dev_to_plat_priv(NULL);
312 }
313
cnss_get_qrtr_info(struct cnss_plat_data * plat_priv)314 void cnss_get_qrtr_info(struct cnss_plat_data *plat_priv)
315 {
316 }
317
318 static int
cnss_get_pld_bus_ops_name(struct cnss_plat_data * plat_priv)319 cnss_get_pld_bus_ops_name(struct cnss_plat_data *plat_priv)
320 {
321 return 0;
322 }
323 #endif
324
cnss_get_sleep_clk_supported(struct cnss_plat_data * plat_priv)325 void cnss_get_sleep_clk_supported(struct cnss_plat_data *plat_priv)
326 {
327 plat_priv->sleep_clk = of_property_read_bool(plat_priv->dev_node,
328 "qcom,sleep-clk-support");
329 cnss_pr_dbg("qcom,sleep-clk-support is %d\n",
330 plat_priv->sleep_clk);
331 }
332
cnss_get_bwscal_info(struct cnss_plat_data * plat_priv)333 void cnss_get_bwscal_info(struct cnss_plat_data *plat_priv)
334 {
335 plat_priv->no_bwscale = of_property_read_bool(plat_priv->dev_node,
336 "qcom,no-bwscale");
337 }
338
339 static inline int
cnss_get_rc_num(struct cnss_plat_data * plat_priv)340 cnss_get_rc_num(struct cnss_plat_data *plat_priv)
341 {
342 return of_property_read_u32(plat_priv->plat_dev->dev.of_node,
343 "qcom,wlan-rc-num", &plat_priv->rc_num);
344 }
345
cnss_is_dual_wlan_enabled(void)346 bool cnss_is_dual_wlan_enabled(void)
347 {
348 return IS_ENABLED(CONFIG_CNSS_SUPPORT_DUAL_DEV);
349 }
350
351 /**
352 * cnss_get_mem_seg_count - Get segment count of memory
353 * @type: memory type
354 * @seg: segment count
355 *
356 * Return: 0 on success, negative value on failure
357 */
cnss_get_mem_seg_count(enum cnss_remote_mem_type type,u32 * seg)358 int cnss_get_mem_seg_count(enum cnss_remote_mem_type type, u32 *seg)
359 {
360 struct cnss_plat_data *plat_priv;
361
362 plat_priv = cnss_get_plat_priv(NULL);
363 if (!plat_priv)
364 return -ENODEV;
365
366 switch (type) {
367 case CNSS_REMOTE_MEM_TYPE_FW:
368 *seg = plat_priv->fw_mem_seg_len;
369 break;
370 case CNSS_REMOTE_MEM_TYPE_QDSS:
371 *seg = plat_priv->qdss_mem_seg_len;
372 break;
373 default:
374 return -EINVAL;
375 }
376
377 return 0;
378 }
379 EXPORT_SYMBOL(cnss_get_mem_seg_count);
380
381 /**
382 * cnss_get_wifi_kobject -return wifi kobject
383 * Return: Null, to maintain driver comnpatibilty
384 */
cnss_get_wifi_kobj(struct device * dev)385 struct kobject *cnss_get_wifi_kobj(struct device *dev)
386 {
387 struct cnss_plat_data *plat_priv;
388
389 plat_priv = cnss_get_plat_priv(NULL);
390 if (!plat_priv)
391 return NULL;
392
393 return plat_priv->wifi_kobj;
394 }
395 EXPORT_SYMBOL(cnss_get_wifi_kobj);
396
397 /**
398 * cnss_get_mem_segment_info - Get memory info of different type
399 * @type: memory type
400 * @segment: array to save the segment info
401 * @seg: segment count
402 *
403 * Return: 0 on success, negative value on failure
404 */
cnss_get_mem_segment_info(enum cnss_remote_mem_type type,struct cnss_mem_segment segment[],u32 segment_count)405 int cnss_get_mem_segment_info(enum cnss_remote_mem_type type,
406 struct cnss_mem_segment segment[],
407 u32 segment_count)
408 {
409 struct cnss_plat_data *plat_priv;
410 u32 i;
411
412 plat_priv = cnss_get_plat_priv(NULL);
413 if (!plat_priv)
414 return -ENODEV;
415
416 switch (type) {
417 case CNSS_REMOTE_MEM_TYPE_FW:
418 if (segment_count > plat_priv->fw_mem_seg_len)
419 segment_count = plat_priv->fw_mem_seg_len;
420 for (i = 0; i < segment_count; i++) {
421 segment[i].size = plat_priv->fw_mem[i].size;
422 segment[i].va = plat_priv->fw_mem[i].va;
423 segment[i].pa = plat_priv->fw_mem[i].pa;
424 }
425 break;
426 case CNSS_REMOTE_MEM_TYPE_QDSS:
427 if (segment_count > plat_priv->qdss_mem_seg_len)
428 segment_count = plat_priv->qdss_mem_seg_len;
429 for (i = 0; i < segment_count; i++) {
430 segment[i].size = plat_priv->qdss_mem[i].size;
431 segment[i].va = plat_priv->qdss_mem[i].va;
432 segment[i].pa = plat_priv->qdss_mem[i].pa;
433 }
434 break;
435 default:
436 return -EINVAL;
437 }
438
439 return 0;
440 }
441 EXPORT_SYMBOL(cnss_get_mem_segment_info);
442
cnss_get_audio_iommu_domain(struct cnss_plat_data * plat_priv)443 static int cnss_get_audio_iommu_domain(struct cnss_plat_data *plat_priv)
444 {
445 struct device_node *audio_ion_node;
446 struct platform_device *audio_ion_pdev;
447
448 audio_ion_node = of_find_compatible_node(NULL, NULL,
449 "qcom,msm-audio-ion");
450 if (!audio_ion_node) {
451 cnss_pr_err("Unable to get Audio ion node");
452 return -EINVAL;
453 }
454
455 audio_ion_pdev = of_find_device_by_node(audio_ion_node);
456 of_node_put(audio_ion_node);
457 if (!audio_ion_pdev) {
458 cnss_pr_err("Unable to get Audio ion platform device");
459 return -EINVAL;
460 }
461
462 plat_priv->audio_iommu_domain =
463 iommu_get_domain_for_dev(&audio_ion_pdev->dev);
464 put_device(&audio_ion_pdev->dev);
465 if (!plat_priv->audio_iommu_domain) {
466 cnss_pr_err("Unable to get Audio ion iommu domain");
467 return -EINVAL;
468 }
469
470 return 0;
471 }
472
cnss_get_audio_shared_iommu_group_cap(struct device * dev)473 bool cnss_get_audio_shared_iommu_group_cap(struct device *dev)
474 {
475 struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
476 struct device_node *audio_ion_node;
477 struct device_node *cnss_iommu_group_node;
478 struct device_node *audio_iommu_group_node;
479
480 if (!plat_priv)
481 return false;
482
483 audio_ion_node = of_find_compatible_node(NULL, NULL,
484 "qcom,msm-audio-ion");
485 if (!audio_ion_node) {
486 cnss_pr_err("Unable to get Audio ion node");
487 return false;
488 }
489
490 audio_iommu_group_node = of_parse_phandle(audio_ion_node,
491 "qcom,iommu-group", 0);
492 of_node_put(audio_ion_node);
493 if (!audio_iommu_group_node) {
494 cnss_pr_err("Unable to get audio iommu group phandle");
495 return false;
496 }
497 of_node_put(audio_iommu_group_node);
498
499 cnss_iommu_group_node = of_parse_phandle(dev->of_node,
500 "qcom,iommu-group", 0);
501 if (!cnss_iommu_group_node) {
502 cnss_pr_err("Unable to get cnss iommu group phandle");
503 return false;
504 }
505 of_node_put(cnss_iommu_group_node);
506
507 if (cnss_iommu_group_node == audio_iommu_group_node) {
508 plat_priv->is_audio_shared_iommu_group = true;
509 cnss_pr_info("CNSS and Audio share IOMMU group");
510 } else {
511 cnss_pr_info("CNSS and Audio do not share IOMMU group");
512 }
513
514 return plat_priv->is_audio_shared_iommu_group;
515 }
516 EXPORT_SYMBOL(cnss_get_audio_shared_iommu_group_cap);
517
cnss_set_feature_list(struct cnss_plat_data * plat_priv,enum cnss_feature_v01 feature)518 int cnss_set_feature_list(struct cnss_plat_data *plat_priv,
519 enum cnss_feature_v01 feature)
520 {
521 if (unlikely(!plat_priv || feature >= CNSS_MAX_FEATURE_V01))
522 return -EINVAL;
523
524 plat_priv->feature_list |= 1 << feature;
525 return 0;
526 }
527
cnss_clear_feature_list(struct cnss_plat_data * plat_priv,enum cnss_feature_v01 feature)528 int cnss_clear_feature_list(struct cnss_plat_data *plat_priv,
529 enum cnss_feature_v01 feature)
530 {
531 if (unlikely(!plat_priv || feature >= CNSS_MAX_FEATURE_V01))
532 return -EINVAL;
533
534 plat_priv->feature_list &= ~(1 << feature);
535 return 0;
536 }
537
cnss_get_feature_list(struct cnss_plat_data * plat_priv,u64 * feature_list)538 int cnss_get_feature_list(struct cnss_plat_data *plat_priv,
539 u64 *feature_list)
540 {
541 if (unlikely(!plat_priv))
542 return -EINVAL;
543
544 *feature_list = plat_priv->feature_list;
545 return 0;
546 }
547
cnss_get_platform_name(struct cnss_plat_data * plat_priv,char * buf,const size_t buf_len)548 size_t cnss_get_platform_name(struct cnss_plat_data *plat_priv,
549 char *buf, const size_t buf_len)
550 {
551 if (unlikely(!plat_priv || !buf || !buf_len))
552 return 0;
553
554 if (of_property_read_bool(plat_priv->plat_dev->dev.of_node,
555 "platform-name-required")) {
556 struct device_node *root;
557
558 root = of_find_node_by_path("/");
559 if (root) {
560 const char *model;
561 size_t model_len;
562
563 model = of_get_property(root, "model", NULL);
564 if (model) {
565 model_len = strlcpy(buf, model, buf_len);
566 cnss_pr_dbg("Platform name: %s (%zu)\n",
567 buf, model_len);
568
569 return model_len;
570 }
571 }
572 }
573
574 return 0;
575 }
576
cnss_pm_stay_awake(struct cnss_plat_data * plat_priv)577 void cnss_pm_stay_awake(struct cnss_plat_data *plat_priv)
578 {
579 if (atomic_inc_return(&plat_priv->pm_count) != 1)
580 return;
581
582 cnss_pr_dbg("PM stay awake, state: 0x%lx, count: %d\n",
583 plat_priv->driver_state,
584 atomic_read(&plat_priv->pm_count));
585 pm_stay_awake(&plat_priv->plat_dev->dev);
586 }
587
cnss_pm_relax(struct cnss_plat_data * plat_priv)588 void cnss_pm_relax(struct cnss_plat_data *plat_priv)
589 {
590 int r = atomic_dec_return(&plat_priv->pm_count);
591
592 WARN_ON(r < 0);
593
594 if (r != 0)
595 return;
596
597 cnss_pr_dbg("PM relax, state: 0x%lx, count: %d\n",
598 plat_priv->driver_state,
599 atomic_read(&plat_priv->pm_count));
600 pm_relax(&plat_priv->plat_dev->dev);
601 }
602
cnss_get_fw_files_for_target(struct device * dev,struct cnss_fw_files * pfw_files,u32 target_type,u32 target_version)603 int cnss_get_fw_files_for_target(struct device *dev,
604 struct cnss_fw_files *pfw_files,
605 u32 target_type, u32 target_version)
606 {
607 if (!pfw_files)
608 return -ENODEV;
609
610 switch (target_version) {
611 case QCA6174_REV3_VERSION:
612 case QCA6174_REV3_2_VERSION:
613 memcpy(pfw_files, &FW_FILES_QCA6174_FW_3_0, sizeof(*pfw_files));
614 break;
615 default:
616 memcpy(pfw_files, &FW_FILES_DEFAULT, sizeof(*pfw_files));
617 cnss_pr_err("Unknown target version, type: 0x%X, version: 0x%X",
618 target_type, target_version);
619 break;
620 }
621
622 return 0;
623 }
624 EXPORT_SYMBOL(cnss_get_fw_files_for_target);
625
cnss_get_platform_cap(struct device * dev,struct cnss_platform_cap * cap)626 int cnss_get_platform_cap(struct device *dev, struct cnss_platform_cap *cap)
627 {
628 struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
629
630 if (!plat_priv)
631 return -ENODEV;
632
633 if (!cap)
634 return -EINVAL;
635
636 *cap = plat_priv->cap;
637 cnss_pr_dbg("Platform cap_flag is 0x%x\n", cap->cap_flag);
638
639 return 0;
640 }
641 EXPORT_SYMBOL(cnss_get_platform_cap);
642
643 /**
644 * cnss_get_fw_cap - Check whether FW supports specific capability or not
645 * @dev: Device
646 * @fw_cap: FW Capability which needs to be checked
647 *
648 * Return: TRUE if supported, FALSE on failure or if not supported
649 */
cnss_get_fw_cap(struct device * dev,enum cnss_fw_caps fw_cap)650 bool cnss_get_fw_cap(struct device *dev, enum cnss_fw_caps fw_cap)
651 {
652 struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
653 bool is_supported = false;
654
655 if (!plat_priv)
656 return is_supported;
657
658 if (!plat_priv->fw_caps)
659 return is_supported;
660
661 switch (fw_cap) {
662 case CNSS_FW_CAP_DIRECT_LINK_SUPPORT:
663 is_supported = !!(plat_priv->fw_caps &
664 QMI_WLFW_DIRECT_LINK_SUPPORT_V01);
665 break;
666 case CNSS_FW_CAP_CALDB_SEG_DDR_SUPPORT:
667 is_supported = !!(plat_priv->fw_caps &
668 QMI_WLFW_CALDB_SEG_DDR_SUPPORT_V01);
669 break;
670 default:
671 cnss_pr_err("Invalid FW Capability: 0x%x\n", fw_cap);
672 }
673
674 cnss_pr_dbg("FW Capability 0x%x is %s\n", fw_cap,
675 is_supported ? "supported" : "not supported");
676 return is_supported;
677 }
678 EXPORT_SYMBOL(cnss_get_fw_cap);
679
680 /**
681 * cnss_audio_is_direct_link_supported - Check whether Audio can be used for direct link support
682 * @dev: Device
683 *
684 * Return: TRUE if supported, FALSE on failure or if not supported
685 */
cnss_audio_is_direct_link_supported(struct device * dev)686 bool cnss_audio_is_direct_link_supported(struct device *dev)
687 {
688 struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
689 bool is_supported = false;
690
691 if (!plat_priv) {
692 cnss_pr_err("plat_priv not available to check audio direct link cap\n");
693 return is_supported;
694 }
695
696 if (cnss_get_audio_iommu_domain(plat_priv) == 0)
697 is_supported = true;
698
699 return is_supported;
700 }
701 EXPORT_SYMBOL(cnss_audio_is_direct_link_supported);
702
703
cnss_request_pm_qos(struct device * dev,u32 qos_val)704 void cnss_request_pm_qos(struct device *dev, u32 qos_val)
705 {
706 struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
707
708 if (!plat_priv)
709 return;
710
711 cpu_latency_qos_add_request(&plat_priv->qos_request, qos_val);
712 }
713 EXPORT_SYMBOL(cnss_request_pm_qos);
714
cnss_remove_pm_qos(struct device * dev)715 void cnss_remove_pm_qos(struct device *dev)
716 {
717 struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
718
719 if (!plat_priv)
720 return;
721
722 cpu_latency_qos_remove_request(&plat_priv->qos_request);
723 }
724 EXPORT_SYMBOL(cnss_remove_pm_qos);
725
cnss_wlan_enable(struct device * dev,struct cnss_wlan_enable_cfg * config,enum cnss_driver_mode mode,const char * host_version)726 int cnss_wlan_enable(struct device *dev,
727 struct cnss_wlan_enable_cfg *config,
728 enum cnss_driver_mode mode,
729 const char *host_version)
730 {
731 int ret = 0;
732 struct cnss_plat_data *plat_priv;
733
734 if (!dev) {
735 cnss_pr_err("Invalid dev pointer\n");
736 return -EINVAL;
737 }
738
739 plat_priv = cnss_bus_dev_to_plat_priv(dev);
740 if (!plat_priv)
741 return -ENODEV;
742
743 if (plat_priv->device_id == QCA6174_DEVICE_ID)
744 return 0;
745
746 if (test_bit(QMI_BYPASS, &plat_priv->ctrl_params.quirks))
747 return 0;
748
749 if (!config || !host_version) {
750 cnss_pr_err("Invalid config or host_version pointer\n");
751 return -EINVAL;
752 }
753
754 cnss_pr_dbg("Mode: %d, config: %pK, host_version: %s\n",
755 mode, config, host_version);
756
757 if (mode == CNSS_WALTEST || mode == CNSS_CCPM)
758 goto skip_cfg;
759
760 if (plat_priv->device_id == QCN7605_DEVICE_ID)
761 config->send_msi_ce = true;
762
763 ret = cnss_wlfw_wlan_cfg_send_sync(plat_priv, config, host_version);
764 if (ret)
765 goto out;
766
767 skip_cfg:
768 ret = cnss_wlfw_wlan_mode_send_sync(plat_priv, mode);
769 out:
770 return ret;
771 }
772 EXPORT_SYMBOL(cnss_wlan_enable);
773
cnss_wlan_disable(struct device * dev,enum cnss_driver_mode mode)774 int cnss_wlan_disable(struct device *dev, enum cnss_driver_mode mode)
775 {
776 int ret = 0;
777 struct cnss_plat_data *plat_priv;
778
779 if (!dev) {
780 cnss_pr_err("Invalid dev pointer\n");
781 return -EINVAL;
782 }
783
784 plat_priv = cnss_bus_dev_to_plat_priv(dev);
785 if (!plat_priv)
786 return -ENODEV;
787
788 if (plat_priv->device_id == QCA6174_DEVICE_ID)
789 return 0;
790
791 if (test_bit(QMI_BYPASS, &plat_priv->ctrl_params.quirks))
792 return 0;
793
794 ret = cnss_wlfw_wlan_mode_send_sync(plat_priv, CNSS_OFF);
795 cnss_bus_free_qdss_mem(plat_priv);
796
797 return ret;
798 }
799 EXPORT_SYMBOL(cnss_wlan_disable);
800
801 #if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 2, 0))
cnss_iommu_map(struct iommu_domain * domain,unsigned long iova,phys_addr_t paddr,size_t size,int prot)802 int cnss_iommu_map(struct iommu_domain *domain,
803 unsigned long iova, phys_addr_t paddr, size_t size, int prot)
804 {
805 return iommu_map(domain, iova, paddr, size, prot);
806 }
807 #else
cnss_iommu_map(struct iommu_domain * domain,unsigned long iova,phys_addr_t paddr,size_t size,int prot)808 int cnss_iommu_map(struct iommu_domain *domain,
809 unsigned long iova, phys_addr_t paddr, size_t size, int prot)
810 {
811 return iommu_map(domain, iova, paddr, size, prot, GFP_KERNEL);
812 }
813 #endif
814
cnss_audio_smmu_map(struct device * dev,phys_addr_t paddr,dma_addr_t iova,size_t size)815 int cnss_audio_smmu_map(struct device *dev, phys_addr_t paddr,
816 dma_addr_t iova, size_t size)
817 {
818 struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
819 uint32_t page_offset;
820
821 if (!plat_priv)
822 return -ENODEV;
823
824 if (!plat_priv->audio_iommu_domain)
825 return -EINVAL;
826
827 if (plat_priv->is_audio_shared_iommu_group)
828 return 0;
829
830 page_offset = iova & (PAGE_SIZE - 1);
831 if (page_offset + size > PAGE_SIZE)
832 size += PAGE_SIZE;
833
834 iova -= page_offset;
835 paddr -= page_offset;
836
837 return cnss_iommu_map(plat_priv->audio_iommu_domain, iova, paddr,
838 roundup(size, PAGE_SIZE), IOMMU_READ |
839 IOMMU_WRITE | IOMMU_CACHE);
840 }
841 EXPORT_SYMBOL(cnss_audio_smmu_map);
842
cnss_audio_smmu_unmap(struct device * dev,dma_addr_t iova,size_t size)843 void cnss_audio_smmu_unmap(struct device *dev, dma_addr_t iova, size_t size)
844 {
845 struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
846 uint32_t page_offset;
847
848 if (!plat_priv || !plat_priv->audio_iommu_domain ||
849 plat_priv->is_audio_shared_iommu_group)
850 return;
851
852 page_offset = iova & (PAGE_SIZE - 1);
853 if (page_offset + size > PAGE_SIZE)
854 size += PAGE_SIZE;
855
856 iova -= page_offset;
857
858 iommu_unmap(plat_priv->audio_iommu_domain, iova,
859 roundup(size, PAGE_SIZE));
860 }
861 EXPORT_SYMBOL(cnss_audio_smmu_unmap);
862
cnss_get_fw_lpass_shared_mem(struct device * dev,dma_addr_t * iova,size_t * size)863 int cnss_get_fw_lpass_shared_mem(struct device *dev, dma_addr_t *iova,
864 size_t *size)
865 {
866 struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
867 uint8_t i;
868
869 if (!plat_priv)
870 return -EINVAL;
871
872 for (i = 0; i < plat_priv->fw_mem_seg_len; i++) {
873 if (plat_priv->fw_mem[i].type ==
874 QMI_WLFW_MEM_LPASS_SHARED_V01) {
875 *iova = plat_priv->fw_mem[i].pa;
876 *size = plat_priv->fw_mem[i].size;
877 return 0;
878 }
879 }
880
881 return -EINVAL;
882 }
883 EXPORT_SYMBOL(cnss_get_fw_lpass_shared_mem);
884
cnss_athdiag_read(struct device * dev,u32 offset,u32 mem_type,u32 data_len,u8 * output)885 int cnss_athdiag_read(struct device *dev, u32 offset, u32 mem_type,
886 u32 data_len, u8 *output)
887 {
888 struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
889 int ret = 0;
890
891 if (!plat_priv) {
892 cnss_pr_err("plat_priv is NULL!\n");
893 return -EINVAL;
894 }
895
896 if (plat_priv->device_id == QCA6174_DEVICE_ID)
897 return 0;
898
899 if (!test_bit(CNSS_FW_READY, &plat_priv->driver_state)) {
900 cnss_pr_err("Invalid state for athdiag read: 0x%lx\n",
901 plat_priv->driver_state);
902 ret = -EINVAL;
903 goto out;
904 }
905
906 ret = cnss_wlfw_athdiag_read_send_sync(plat_priv, offset, mem_type,
907 data_len, output);
908
909 out:
910 return ret;
911 }
912 EXPORT_SYMBOL(cnss_athdiag_read);
913
cnss_athdiag_write(struct device * dev,u32 offset,u32 mem_type,u32 data_len,u8 * input)914 int cnss_athdiag_write(struct device *dev, u32 offset, u32 mem_type,
915 u32 data_len, u8 *input)
916 {
917 struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
918 int ret = 0;
919
920 if (!plat_priv) {
921 cnss_pr_err("plat_priv is NULL!\n");
922 return -EINVAL;
923 }
924
925 if (plat_priv->device_id == QCA6174_DEVICE_ID)
926 return 0;
927
928 if (!test_bit(CNSS_FW_READY, &plat_priv->driver_state)) {
929 cnss_pr_err("Invalid state for athdiag write: 0x%lx\n",
930 plat_priv->driver_state);
931 ret = -EINVAL;
932 goto out;
933 }
934
935 ret = cnss_wlfw_athdiag_write_send_sync(plat_priv, offset, mem_type,
936 data_len, input);
937
938 out:
939 return ret;
940 }
941 EXPORT_SYMBOL(cnss_athdiag_write);
942
cnss_set_fw_log_mode(struct device * dev,u8 fw_log_mode)943 int cnss_set_fw_log_mode(struct device *dev, u8 fw_log_mode)
944 {
945 struct cnss_plat_data *plat_priv;
946
947 if (!dev) {
948 cnss_pr_err("Invalid dev pointer\n");
949 return -EINVAL;
950 }
951
952 plat_priv = cnss_bus_dev_to_plat_priv(dev);
953 if (!plat_priv)
954 return -ENODEV;
955
956 if (plat_priv->device_id == QCA6174_DEVICE_ID)
957 return 0;
958
959 return cnss_wlfw_ini_send_sync(plat_priv, fw_log_mode);
960 }
961 EXPORT_SYMBOL(cnss_set_fw_log_mode);
962
cnss_set_pcie_gen_speed(struct device * dev,u8 pcie_gen_speed)963 int cnss_set_pcie_gen_speed(struct device *dev, u8 pcie_gen_speed)
964 {
965 struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
966
967 if (!plat_priv)
968 return -EINVAL;
969
970 if (!plat_priv->fw_pcie_gen_switch) {
971 cnss_pr_err("Firmware does not support PCIe gen switch\n");
972 return -EOPNOTSUPP;
973 }
974
975 if (pcie_gen_speed < QMI_PCIE_GEN_SPEED_1_V01 ||
976 pcie_gen_speed > QMI_PCIE_GEN_SPEED_3_V01)
977 return -EINVAL;
978
979 cnss_pr_dbg("WLAN provided PCIE gen speed: %d\n", pcie_gen_speed);
980 plat_priv->pcie_gen_speed = pcie_gen_speed;
981 return 0;
982 }
983 EXPORT_SYMBOL(cnss_set_pcie_gen_speed);
984
cnss_is_aux_support_enabled(struct cnss_plat_data * plat_priv)985 static bool cnss_is_aux_support_enabled(struct cnss_plat_data *plat_priv)
986 {
987 switch (plat_priv->device_id) {
988 case PEACH_DEVICE_ID:
989 if (!plat_priv->fw_aux_uc_support) {
990 cnss_pr_dbg("FW does not support aux uc capability\n");
991 return false;
992 }
993 break;
994 default:
995 cnss_pr_dbg("Host does not support aux uc capability\n");
996 return false;
997 }
998
999 return true;
1000 }
1001
cnss_fw_mem_ready_hdlr(struct cnss_plat_data * plat_priv)1002 static int cnss_fw_mem_ready_hdlr(struct cnss_plat_data *plat_priv)
1003 {
1004 int ret = 0;
1005
1006 if (!plat_priv)
1007 return -ENODEV;
1008
1009 set_bit(CNSS_FW_MEM_READY, &plat_priv->driver_state);
1010
1011 ret = cnss_wlfw_tgt_cap_send_sync(plat_priv);
1012 if (ret)
1013 goto out;
1014
1015 cnss_bus_load_tme_patch(plat_priv);
1016
1017 cnss_wlfw_tme_patch_dnld_send_sync(plat_priv,
1018 WLFW_TME_LITE_PATCH_FILE_V01);
1019
1020 if (plat_priv->hds_enabled)
1021 cnss_wlfw_bdf_dnld_send_sync(plat_priv, CNSS_BDF_HDS);
1022
1023 cnss_wlfw_bdf_dnld_send_sync(plat_priv, CNSS_BDF_REGDB);
1024
1025 if (plat_priv->device_id == QCN7605_DEVICE_ID)
1026 plat_priv->ctrl_params.bdf_type = CNSS_BDF_BIN;
1027
1028 ret = cnss_wlfw_bdf_dnld_send_sync(plat_priv,
1029 plat_priv->ctrl_params.bdf_type);
1030 if (ret)
1031 goto out;
1032
1033 if (plat_priv->device_id == QCN7605_DEVICE_ID)
1034 return 0;
1035
1036 ret = cnss_bus_load_m3(plat_priv);
1037 if (ret)
1038 goto out;
1039
1040 ret = cnss_wlfw_m3_dnld_send_sync(plat_priv);
1041 if (ret)
1042 goto out;
1043
1044 if (cnss_is_aux_support_enabled(plat_priv)) {
1045 ret = cnss_bus_load_aux(plat_priv);
1046 if (ret)
1047 goto out;
1048
1049 ret = cnss_wlfw_aux_dnld_send_sync(plat_priv);
1050 if (ret)
1051 goto out;
1052 }
1053
1054 cnss_wlfw_qdss_dnld_send_sync(plat_priv);
1055
1056 return 0;
1057 out:
1058 return ret;
1059 }
1060
cnss_request_antenna_sharing(struct cnss_plat_data * plat_priv)1061 static int cnss_request_antenna_sharing(struct cnss_plat_data *plat_priv)
1062 {
1063 int ret = 0;
1064
1065 if (!plat_priv->antenna) {
1066 ret = cnss_wlfw_antenna_switch_send_sync(plat_priv);
1067 if (ret)
1068 goto out;
1069 }
1070
1071 if (test_bit(CNSS_COEX_CONNECTED, &plat_priv->driver_state)) {
1072 ret = coex_antenna_switch_to_wlan_send_sync_msg(plat_priv);
1073 if (ret)
1074 goto out;
1075 }
1076
1077 ret = cnss_wlfw_antenna_grant_send_sync(plat_priv);
1078 if (ret)
1079 goto out;
1080
1081 return 0;
1082
1083 out:
1084 return ret;
1085 }
1086
cnss_release_antenna_sharing(struct cnss_plat_data * plat_priv)1087 static void cnss_release_antenna_sharing(struct cnss_plat_data *plat_priv)
1088 {
1089 if (test_bit(CNSS_COEX_CONNECTED, &plat_priv->driver_state))
1090 coex_antenna_switch_to_mdm_send_sync_msg(plat_priv);
1091 }
1092
cnss_setup_dms_mac(struct cnss_plat_data * plat_priv)1093 static int cnss_setup_dms_mac(struct cnss_plat_data *plat_priv)
1094 {
1095 u32 i;
1096 int ret = 0;
1097 struct cnss_plat_ipc_daemon_config *cfg;
1098
1099 ret = cnss_qmi_get_dms_mac(plat_priv);
1100 if (ret == 0 && plat_priv->dms.mac_valid)
1101 goto qmi_send;
1102
1103 /* DTSI property use-nv-mac is used to force DMS MAC address for WLAN.
1104 * Thus assert on failure to get MAC from DMS even after retries
1105 */
1106 if (plat_priv->use_nv_mac) {
1107 /* Check if Daemon says platform support DMS MAC provisioning */
1108 cfg = cnss_plat_ipc_qmi_daemon_config();
1109 if (cfg) {
1110 if (!cfg->dms_mac_addr_supported) {
1111 cnss_pr_err("DMS MAC address not supported\n");
1112 CNSS_ASSERT(0);
1113 return -EINVAL;
1114 }
1115 }
1116 for (i = 0; i < CNSS_DMS_QMI_CONNECTION_WAIT_RETRY; i++) {
1117 if (plat_priv->dms.mac_valid)
1118 break;
1119
1120 ret = cnss_qmi_get_dms_mac(plat_priv);
1121 if (ret == 0)
1122 break;
1123 msleep(CNSS_DMS_QMI_CONNECTION_WAIT_MS);
1124 }
1125 if (!plat_priv->dms.mac_valid) {
1126 cnss_pr_err("Unable to get MAC from DMS after retries\n");
1127 CNSS_ASSERT(0);
1128 return -EINVAL;
1129 }
1130 }
1131 qmi_send:
1132 if (plat_priv->dms.mac_valid)
1133 ret =
1134 cnss_wlfw_wlan_mac_req_send_sync(plat_priv, plat_priv->dms.mac,
1135 ARRAY_SIZE(plat_priv->dms.mac));
1136
1137 return ret;
1138 }
1139
cnss_cal_db_mem_update(struct cnss_plat_data * plat_priv,enum cnss_cal_db_op op,u32 * size)1140 static int cnss_cal_db_mem_update(struct cnss_plat_data *plat_priv,
1141 enum cnss_cal_db_op op, u32 *size)
1142 {
1143 int ret = 0;
1144 u32 timeout = cnss_get_timeout(plat_priv,
1145 CNSS_TIMEOUT_DAEMON_CONNECTION);
1146 enum cnss_plat_ipc_qmi_client_id_v01 client_id =
1147 CNSS_PLAT_IPC_DAEMON_QMI_CLIENT_V01;
1148
1149 if (op >= CNSS_CAL_DB_INVALID_OP)
1150 return -EINVAL;
1151
1152 if (!plat_priv->cbc_file_download) {
1153 cnss_pr_info("CAL DB file not required as per BDF\n");
1154 return 0;
1155 }
1156 if (*size == 0) {
1157 cnss_pr_err("Invalid cal file size\n");
1158 return -EINVAL;
1159 }
1160 if (!test_bit(CNSS_DAEMON_CONNECTED, &plat_priv->driver_state)) {
1161 cnss_pr_info("Waiting for CNSS Daemon connection\n");
1162 ret = wait_for_completion_timeout(&plat_priv->daemon_connected,
1163 msecs_to_jiffies(timeout));
1164 if (!ret) {
1165 cnss_pr_err("Daemon not yet connected\n");
1166 CNSS_ASSERT(0);
1167 return ret;
1168 }
1169 }
1170 if (!plat_priv->cal_mem->va) {
1171 cnss_pr_err("CAL DB Memory not setup for FW\n");
1172 return -EINVAL;
1173 }
1174
1175 /* Copy CAL DB file contents to/from CAL_TYPE_DDR mem allocated to FW */
1176 if (op == CNSS_CAL_DB_DOWNLOAD) {
1177 cnss_pr_dbg("Initiating Calibration file download to mem\n");
1178 ret = cnss_plat_ipc_qmi_file_download(client_id,
1179 CNSS_CAL_DB_FILE_NAME,
1180 plat_priv->cal_mem->va,
1181 size);
1182 } else {
1183 cnss_pr_dbg("Initiating Calibration mem upload to file\n");
1184 ret = cnss_plat_ipc_qmi_file_upload(client_id,
1185 CNSS_CAL_DB_FILE_NAME,
1186 plat_priv->cal_mem->va,
1187 *size);
1188 }
1189
1190 if (ret)
1191 cnss_pr_err("Cal DB file %s %s failure\n",
1192 CNSS_CAL_DB_FILE_NAME,
1193 op == CNSS_CAL_DB_DOWNLOAD ? "download" : "upload");
1194 else
1195 cnss_pr_dbg("Cal DB file %s %s size %d done\n",
1196 CNSS_CAL_DB_FILE_NAME,
1197 op == CNSS_CAL_DB_DOWNLOAD ? "download" : "upload",
1198 *size);
1199
1200 return ret;
1201 }
1202
cnss_cal_mem_upload_to_file(struct cnss_plat_data * plat_priv)1203 static int cnss_cal_mem_upload_to_file(struct cnss_plat_data *plat_priv)
1204 {
1205 if (plat_priv->cal_file_size > plat_priv->cal_mem->size) {
1206 cnss_pr_err("Cal file size is larger than Cal DB Mem size\n");
1207 return -EINVAL;
1208 }
1209 return cnss_cal_db_mem_update(plat_priv, CNSS_CAL_DB_UPLOAD,
1210 &plat_priv->cal_file_size);
1211 }
1212
cnss_cal_file_download_to_mem(struct cnss_plat_data * plat_priv,u32 * cal_file_size)1213 static int cnss_cal_file_download_to_mem(struct cnss_plat_data *plat_priv,
1214 u32 *cal_file_size)
1215 {
1216 /* To download pass the total size of cal DB mem allocated.
1217 * After cal file is download to mem, its size is updated in
1218 * return pointer
1219 */
1220 *cal_file_size = plat_priv->cal_mem->size;
1221 return cnss_cal_db_mem_update(plat_priv, CNSS_CAL_DB_DOWNLOAD,
1222 cal_file_size);
1223 }
1224
cnss_fw_ready_hdlr(struct cnss_plat_data * plat_priv)1225 static int cnss_fw_ready_hdlr(struct cnss_plat_data *plat_priv)
1226 {
1227 int ret = 0;
1228 u32 cal_file_size = 0;
1229
1230 if (!plat_priv)
1231 return -ENODEV;
1232
1233 if (test_bit(CNSS_IN_REBOOT, &plat_priv->driver_state)) {
1234 cnss_pr_err("Reboot is in progress, ignore FW ready\n");
1235 return -EINVAL;
1236 }
1237
1238 cnss_pr_dbg("Processing FW Init Done..\n");
1239 del_timer(&plat_priv->fw_boot_timer);
1240 set_bit(CNSS_FW_READY, &plat_priv->driver_state);
1241 clear_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state);
1242
1243 cnss_wlfw_send_pcie_gen_speed_sync(plat_priv);
1244 cnss_send_subsys_restart_level_msg(plat_priv);
1245
1246 if (test_bit(CNSS_FW_BOOT_RECOVERY, &plat_priv->driver_state)) {
1247 clear_bit(CNSS_FW_BOOT_RECOVERY, &plat_priv->driver_state);
1248 clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
1249 }
1250
1251 if (test_bit(ENABLE_WALTEST, &plat_priv->ctrl_params.quirks)) {
1252 ret = cnss_wlfw_wlan_mode_send_sync(plat_priv,
1253 CNSS_WALTEST);
1254 } else if (test_bit(CNSS_IN_COLD_BOOT_CAL, &plat_priv->driver_state)) {
1255 cnss_request_antenna_sharing(plat_priv);
1256 cnss_cal_file_download_to_mem(plat_priv, &cal_file_size);
1257 cnss_wlfw_cal_report_req_send_sync(plat_priv, cal_file_size);
1258 plat_priv->cal_time = jiffies;
1259 ret = cnss_wlfw_wlan_mode_send_sync(plat_priv,
1260 CNSS_CALIBRATION);
1261 } else {
1262 ret = cnss_setup_dms_mac(plat_priv);
1263 ret = cnss_bus_call_driver_probe(plat_priv);
1264 }
1265
1266 if (ret && test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state))
1267 goto out;
1268 else if (ret)
1269 goto shutdown;
1270
1271 cnss_vreg_unvote_type(plat_priv, CNSS_VREG_PRIM);
1272
1273 return 0;
1274
1275 shutdown:
1276 cnss_bus_dev_shutdown(plat_priv);
1277
1278 clear_bit(CNSS_FW_READY, &plat_priv->driver_state);
1279 clear_bit(CNSS_FW_MEM_READY, &plat_priv->driver_state);
1280
1281 out:
1282 return ret;
1283 }
1284
cnss_driver_event_to_str(enum cnss_driver_event_type type)1285 static char *cnss_driver_event_to_str(enum cnss_driver_event_type type)
1286 {
1287 switch (type) {
1288 case CNSS_DRIVER_EVENT_SERVER_ARRIVE:
1289 return "SERVER_ARRIVE";
1290 case CNSS_DRIVER_EVENT_SERVER_EXIT:
1291 return "SERVER_EXIT";
1292 case CNSS_DRIVER_EVENT_REQUEST_MEM:
1293 return "REQUEST_MEM";
1294 case CNSS_DRIVER_EVENT_FW_MEM_READY:
1295 return "FW_MEM_READY";
1296 case CNSS_DRIVER_EVENT_FW_READY:
1297 return "FW_READY";
1298 case CNSS_DRIVER_EVENT_COLD_BOOT_CAL_START:
1299 return "COLD_BOOT_CAL_START";
1300 case CNSS_DRIVER_EVENT_COLD_BOOT_CAL_DONE:
1301 return "COLD_BOOT_CAL_DONE";
1302 case CNSS_DRIVER_EVENT_REGISTER_DRIVER:
1303 return "REGISTER_DRIVER";
1304 case CNSS_DRIVER_EVENT_UNREGISTER_DRIVER:
1305 return "UNREGISTER_DRIVER";
1306 case CNSS_DRIVER_EVENT_RECOVERY:
1307 return "RECOVERY";
1308 case CNSS_DRIVER_EVENT_FORCE_FW_ASSERT:
1309 return "FORCE_FW_ASSERT";
1310 case CNSS_DRIVER_EVENT_POWER_UP:
1311 return "POWER_UP";
1312 case CNSS_DRIVER_EVENT_POWER_DOWN:
1313 return "POWER_DOWN";
1314 case CNSS_DRIVER_EVENT_IDLE_RESTART:
1315 return "IDLE_RESTART";
1316 case CNSS_DRIVER_EVENT_IDLE_SHUTDOWN:
1317 return "IDLE_SHUTDOWN";
1318 case CNSS_DRIVER_EVENT_IMS_WFC_CALL_IND:
1319 return "IMS_WFC_CALL_IND";
1320 case CNSS_DRIVER_EVENT_WLFW_TWT_CFG_IND:
1321 return "WLFW_TWC_CFG_IND";
1322 case CNSS_DRIVER_EVENT_QDSS_TRACE_REQ_MEM:
1323 return "QDSS_TRACE_REQ_MEM";
1324 case CNSS_DRIVER_EVENT_FW_MEM_FILE_SAVE:
1325 return "FW_MEM_FILE_SAVE";
1326 case CNSS_DRIVER_EVENT_QDSS_TRACE_FREE:
1327 return "QDSS_TRACE_FREE";
1328 case CNSS_DRIVER_EVENT_QDSS_TRACE_REQ_DATA:
1329 return "QDSS_TRACE_REQ_DATA";
1330 case CNSS_DRIVER_EVENT_MAX:
1331 return "EVENT_MAX";
1332 }
1333
1334 return "UNKNOWN";
1335 };
1336
cnss_driver_event_post(struct cnss_plat_data * plat_priv,enum cnss_driver_event_type type,u32 flags,void * data)1337 int cnss_driver_event_post(struct cnss_plat_data *plat_priv,
1338 enum cnss_driver_event_type type,
1339 u32 flags, void *data)
1340 {
1341 struct cnss_driver_event *event;
1342 unsigned long irq_flags;
1343 int gfp = GFP_KERNEL;
1344 int ret = 0;
1345
1346 if (!plat_priv)
1347 return -ENODEV;
1348
1349 cnss_pr_dbg("Posting event: %s(%d)%s, state: 0x%lx flags: 0x%0x\n",
1350 cnss_driver_event_to_str(type), type,
1351 flags ? "-sync" : "", plat_priv->driver_state, flags);
1352
1353 if (type >= CNSS_DRIVER_EVENT_MAX) {
1354 cnss_pr_err("Invalid Event type: %d, can't post", type);
1355 return -EINVAL;
1356 }
1357
1358 if (in_interrupt() || irqs_disabled())
1359 gfp = GFP_ATOMIC;
1360
1361 event = kzalloc(sizeof(*event), gfp);
1362 if (!event)
1363 return -ENOMEM;
1364
1365 cnss_pm_stay_awake(plat_priv);
1366
1367 event->type = type;
1368 event->data = data;
1369 init_completion(&event->complete);
1370 event->ret = CNSS_EVENT_PENDING;
1371 event->sync = !!(flags & CNSS_EVENT_SYNC);
1372
1373 spin_lock_irqsave(&plat_priv->event_lock, irq_flags);
1374 list_add_tail(&event->list, &plat_priv->event_list);
1375 spin_unlock_irqrestore(&plat_priv->event_lock, irq_flags);
1376
1377 queue_work(plat_priv->event_wq, &plat_priv->event_work);
1378
1379 if (!(flags & CNSS_EVENT_SYNC))
1380 goto out;
1381
1382 if (flags & CNSS_EVENT_UNKILLABLE)
1383 wait_for_completion(&event->complete);
1384 else if (flags & CNSS_EVENT_UNINTERRUPTIBLE)
1385 ret = wait_for_completion_killable(&event->complete);
1386 else
1387 ret = wait_for_completion_interruptible(&event->complete);
1388
1389 cnss_pr_dbg("Completed event: %s(%d), state: 0x%lx, ret: %d/%d\n",
1390 cnss_driver_event_to_str(type), type,
1391 plat_priv->driver_state, ret, event->ret);
1392 spin_lock_irqsave(&plat_priv->event_lock, irq_flags);
1393 if (ret == -ERESTARTSYS && event->ret == CNSS_EVENT_PENDING) {
1394 event->sync = false;
1395 spin_unlock_irqrestore(&plat_priv->event_lock, irq_flags);
1396 ret = -EINTR;
1397 goto out;
1398 }
1399 spin_unlock_irqrestore(&plat_priv->event_lock, irq_flags);
1400
1401 ret = event->ret;
1402 kfree(event);
1403
1404 out:
1405 cnss_pm_relax(plat_priv);
1406 return ret;
1407 }
1408
1409 /**
1410 * cnss_get_timeout - Get timeout for corresponding type.
1411 * @plat_priv: Pointer to platform driver context.
1412 * @cnss_timeout_type: Timeout type.
1413 *
1414 * Return: Timeout in milliseconds.
1415 */
cnss_get_timeout(struct cnss_plat_data * plat_priv,enum cnss_timeout_type timeout_type)1416 unsigned int cnss_get_timeout(struct cnss_plat_data *plat_priv,
1417 enum cnss_timeout_type timeout_type)
1418 {
1419 unsigned int qmi_timeout = cnss_get_qmi_timeout(plat_priv);
1420
1421 switch (timeout_type) {
1422 case CNSS_TIMEOUT_QMI:
1423 return qmi_timeout;
1424 case CNSS_TIMEOUT_POWER_UP:
1425 return (qmi_timeout << 2);
1426 case CNSS_TIMEOUT_IDLE_RESTART:
1427 /* In idle restart power up sequence, we have fw_boot_timer to
1428 * handle FW initialization failure.
1429 * It uses WLAN_MISSION_MODE_TIMEOUT, so setup 3x that time to
1430 * account for FW dump collection and FW re-initialization on
1431 * retry.
1432 */
1433 return (qmi_timeout + WLAN_MISSION_MODE_TIMEOUT * 3);
1434 case CNSS_TIMEOUT_CALIBRATION:
1435 /* Similar to mission mode, in CBC if FW init fails
1436 * fw recovery is tried. Thus return 2x the CBC timeout.
1437 */
1438 return (qmi_timeout + WLAN_COLD_BOOT_CAL_TIMEOUT * 2);
1439 case CNSS_TIMEOUT_WLAN_WATCHDOG:
1440 return ((qmi_timeout << 1) + WLAN_WD_TIMEOUT_MS);
1441 case CNSS_TIMEOUT_RDDM:
1442 return CNSS_RDDM_TIMEOUT_MS;
1443 case CNSS_TIMEOUT_RECOVERY:
1444 return RECOVERY_TIMEOUT;
1445 case CNSS_TIMEOUT_DAEMON_CONNECTION:
1446 return qmi_timeout + CNSS_DAEMON_CONNECT_TIMEOUT_MS;
1447 default:
1448 return qmi_timeout;
1449 }
1450 }
1451
cnss_get_boot_timeout(struct device * dev)1452 unsigned int cnss_get_boot_timeout(struct device *dev)
1453 {
1454 struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
1455
1456 if (!plat_priv) {
1457 cnss_pr_err("plat_priv is NULL\n");
1458 return 0;
1459 }
1460
1461 return cnss_get_timeout(plat_priv, CNSS_TIMEOUT_QMI);
1462 }
1463 EXPORT_SYMBOL(cnss_get_boot_timeout);
1464
cnss_power_up(struct device * dev)1465 int cnss_power_up(struct device *dev)
1466 {
1467 int ret = 0;
1468 struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
1469 unsigned int timeout;
1470
1471 if (!plat_priv) {
1472 cnss_pr_err("plat_priv is NULL\n");
1473 return -ENODEV;
1474 }
1475
1476 cnss_pr_dbg("Powering up device\n");
1477
1478 ret = cnss_driver_event_post(plat_priv,
1479 CNSS_DRIVER_EVENT_POWER_UP,
1480 CNSS_EVENT_SYNC, NULL);
1481 if (ret)
1482 goto out;
1483
1484 if (plat_priv->device_id == QCA6174_DEVICE_ID)
1485 goto out;
1486
1487 timeout = cnss_get_timeout(plat_priv, CNSS_TIMEOUT_POWER_UP);
1488
1489 reinit_completion(&plat_priv->power_up_complete);
1490 ret = wait_for_completion_timeout(&plat_priv->power_up_complete,
1491 msecs_to_jiffies(timeout));
1492 if (!ret) {
1493 cnss_pr_err("Timeout (%ums) waiting for power up to complete\n",
1494 timeout);
1495 ret = -EAGAIN;
1496 goto out;
1497 }
1498
1499 return 0;
1500
1501 out:
1502 return ret;
1503 }
1504 EXPORT_SYMBOL(cnss_power_up);
1505
cnss_power_down(struct device * dev)1506 int cnss_power_down(struct device *dev)
1507 {
1508 struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
1509
1510 if (!plat_priv) {
1511 cnss_pr_err("plat_priv is NULL\n");
1512 return -ENODEV;
1513 }
1514
1515 cnss_pr_dbg("Powering down device\n");
1516
1517 return cnss_driver_event_post(plat_priv,
1518 CNSS_DRIVER_EVENT_POWER_DOWN,
1519 CNSS_EVENT_SYNC, NULL);
1520 }
1521 EXPORT_SYMBOL(cnss_power_down);
1522
cnss_idle_restart(struct device * dev)1523 int cnss_idle_restart(struct device *dev)
1524 {
1525 struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
1526 unsigned int timeout;
1527 int ret = 0;
1528
1529 if (!plat_priv) {
1530 cnss_pr_err("plat_priv is NULL\n");
1531 return -ENODEV;
1532 }
1533
1534 if (!mutex_trylock(&plat_priv->driver_ops_lock)) {
1535 cnss_pr_dbg("Another driver operation is in progress, ignore idle restart\n");
1536 return -EBUSY;
1537 }
1538
1539 cnss_pr_dbg("Doing idle restart\n");
1540
1541 reinit_completion(&plat_priv->power_up_complete);
1542
1543 if (test_bit(CNSS_IN_REBOOT, &plat_priv->driver_state)) {
1544 cnss_pr_dbg("Reboot or shutdown is in progress, ignore idle restart\n");
1545 ret = -EINVAL;
1546 goto out;
1547 }
1548
1549 ret = cnss_driver_event_post(plat_priv,
1550 CNSS_DRIVER_EVENT_IDLE_RESTART,
1551 CNSS_EVENT_SYNC_UNINTERRUPTIBLE, NULL);
1552 if (ret == -EINTR && plat_priv->device_id != QCA6174_DEVICE_ID)
1553 cnss_pr_err("Idle restart has been interrupted but device power up is still in progress");
1554 else if (ret)
1555 goto out;
1556
1557 if (plat_priv->device_id == QCA6174_DEVICE_ID) {
1558 ret = cnss_bus_call_driver_probe(plat_priv);
1559 goto out;
1560 }
1561
1562 timeout = cnss_get_timeout(plat_priv, CNSS_TIMEOUT_IDLE_RESTART);
1563 ret = wait_for_completion_timeout(&plat_priv->power_up_complete,
1564 msecs_to_jiffies(timeout));
1565 if (plat_priv->power_up_error) {
1566 ret = plat_priv->power_up_error;
1567 clear_bit(CNSS_DRIVER_IDLE_RESTART, &plat_priv->driver_state);
1568 cnss_pr_dbg("Power up error:%d, exiting\n",
1569 plat_priv->power_up_error);
1570 goto out;
1571 }
1572
1573 if (!ret) {
1574 /* This exception occurs after attempting retry of FW recovery.
1575 * Thus we can safely power off the device.
1576 */
1577 cnss_fatal_err("Timeout (%ums) waiting for idle restart to complete\n",
1578 timeout);
1579 ret = -ETIMEDOUT;
1580 cnss_power_down(dev);
1581 CNSS_ASSERT(0);
1582 goto out;
1583 }
1584
1585 if (test_bit(CNSS_IN_REBOOT, &plat_priv->driver_state)) {
1586 cnss_pr_dbg("Reboot or shutdown is in progress, ignore idle restart\n");
1587 del_timer(&plat_priv->fw_boot_timer);
1588 ret = -EINVAL;
1589 goto out;
1590 }
1591
1592 /* In non-DRV mode, remove MHI satellite configuration. Switching to
1593 * non-DRV is supported only once after device reboots and before wifi
1594 * is turned on. We do not allow switching back to DRV.
1595 * To bring device back into DRV, user needs to reboot device.
1596 */
1597 if (test_bit(DISABLE_DRV, &plat_priv->ctrl_params.quirks)) {
1598 cnss_pr_dbg("DRV is disabled\n");
1599 cnss_bus_disable_mhi_satellite_cfg(plat_priv);
1600 }
1601
1602 mutex_unlock(&plat_priv->driver_ops_lock);
1603 return 0;
1604
1605 out:
1606 mutex_unlock(&plat_priv->driver_ops_lock);
1607 return ret;
1608 }
1609 EXPORT_SYMBOL(cnss_idle_restart);
1610
cnss_idle_shutdown(struct device * dev)1611 int cnss_idle_shutdown(struct device *dev)
1612 {
1613 struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
1614
1615 if (!plat_priv) {
1616 cnss_pr_err("plat_priv is NULL\n");
1617 return -ENODEV;
1618 }
1619
1620 if (test_bit(CNSS_IN_SUSPEND_RESUME, &plat_priv->driver_state)) {
1621 cnss_pr_dbg("System suspend or resume in progress, ignore idle shutdown\n");
1622 return -EAGAIN;
1623 }
1624
1625 cnss_pr_dbg("Doing idle shutdown\n");
1626
1627 if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state) ||
1628 test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state)) {
1629 cnss_pr_dbg("Recovery in progress. Ignore IDLE Shutdown\n");
1630 return -EBUSY;
1631 }
1632
1633 return cnss_driver_event_post(plat_priv,
1634 CNSS_DRIVER_EVENT_IDLE_SHUTDOWN,
1635 CNSS_EVENT_SYNC_UNINTERRUPTIBLE, NULL);
1636 }
1637 EXPORT_SYMBOL(cnss_idle_shutdown);
1638
cnss_get_resources(struct cnss_plat_data * plat_priv)1639 static int cnss_get_resources(struct cnss_plat_data *plat_priv)
1640 {
1641 int ret = 0;
1642
1643 ret = cnss_get_vreg_type(plat_priv, CNSS_VREG_PRIM);
1644 if (ret < 0) {
1645 cnss_pr_err("Failed to get vreg, err = %d\n", ret);
1646 goto out;
1647 }
1648
1649 ret = cnss_get_clk(plat_priv);
1650 if (ret) {
1651 cnss_pr_err("Failed to get clocks, err = %d\n", ret);
1652 goto put_vreg;
1653 }
1654
1655 ret = cnss_get_pinctrl(plat_priv);
1656 if (ret) {
1657 cnss_pr_err("Failed to get pinctrl, err = %d\n", ret);
1658 goto put_clk;
1659 }
1660
1661 return 0;
1662
1663 put_clk:
1664 cnss_put_clk(plat_priv);
1665 put_vreg:
1666 cnss_put_vreg_type(plat_priv, CNSS_VREG_PRIM);
1667 out:
1668 return ret;
1669 }
1670
cnss_put_resources(struct cnss_plat_data * plat_priv)1671 static void cnss_put_resources(struct cnss_plat_data *plat_priv)
1672 {
1673 cnss_put_clk(plat_priv);
1674 cnss_put_vreg_type(plat_priv, CNSS_VREG_PRIM);
1675 }
1676
1677 #if IS_ENABLED(CONFIG_ESOC) && IS_ENABLED(CONFIG_MSM_SUBSYSTEM_RESTART)
cnss_modem_notifier_nb(struct notifier_block * nb,unsigned long code,void * ss_handle)1678 static int cnss_modem_notifier_nb(struct notifier_block *nb,
1679 unsigned long code,
1680 void *ss_handle)
1681 {
1682 struct cnss_plat_data *plat_priv =
1683 container_of(nb, struct cnss_plat_data, modem_nb);
1684 struct cnss_esoc_info *esoc_info;
1685
1686 cnss_pr_dbg("Modem notifier: event %lu\n", code);
1687
1688 if (!plat_priv)
1689 return NOTIFY_DONE;
1690
1691 esoc_info = &plat_priv->esoc_info;
1692
1693 if (code == SUBSYS_AFTER_POWERUP)
1694 esoc_info->modem_current_status = 1;
1695 else if (code == SUBSYS_BEFORE_SHUTDOWN)
1696 esoc_info->modem_current_status = 0;
1697 else
1698 return NOTIFY_DONE;
1699
1700 if (!cnss_bus_call_driver_modem_status(plat_priv,
1701 esoc_info->modem_current_status))
1702 return NOTIFY_DONE;
1703
1704 return NOTIFY_OK;
1705 }
1706
cnss_register_esoc(struct cnss_plat_data * plat_priv)1707 static int cnss_register_esoc(struct cnss_plat_data *plat_priv)
1708 {
1709 int ret = 0;
1710 struct device *dev;
1711 struct cnss_esoc_info *esoc_info;
1712 struct esoc_desc *esoc_desc;
1713 const char *client_desc;
1714
1715 dev = &plat_priv->plat_dev->dev;
1716 esoc_info = &plat_priv->esoc_info;
1717
1718 esoc_info->notify_modem_status =
1719 of_property_read_bool(dev->of_node,
1720 "qcom,notify-modem-status");
1721
1722 if (!esoc_info->notify_modem_status)
1723 goto out;
1724
1725 ret = of_property_read_string_index(dev->of_node, "esoc-names", 0,
1726 &client_desc);
1727 if (ret) {
1728 cnss_pr_dbg("esoc-names is not defined in DT, skip!\n");
1729 } else {
1730 esoc_desc = devm_register_esoc_client(dev, client_desc);
1731 if (IS_ERR_OR_NULL(esoc_desc)) {
1732 ret = PTR_RET(esoc_desc);
1733 cnss_pr_err("Failed to register esoc_desc, err = %d\n",
1734 ret);
1735 goto out;
1736 }
1737 esoc_info->esoc_desc = esoc_desc;
1738 }
1739
1740 plat_priv->modem_nb.notifier_call = cnss_modem_notifier_nb;
1741 esoc_info->modem_current_status = 0;
1742 esoc_info->modem_notify_handler =
1743 subsys_notif_register_notifier(esoc_info->esoc_desc ?
1744 esoc_info->esoc_desc->name :
1745 "modem", &plat_priv->modem_nb);
1746 if (IS_ERR(esoc_info->modem_notify_handler)) {
1747 ret = PTR_ERR(esoc_info->modem_notify_handler);
1748 cnss_pr_err("Failed to register esoc notifier, err = %d\n",
1749 ret);
1750 goto unreg_esoc;
1751 }
1752
1753 return 0;
1754 unreg_esoc:
1755 if (esoc_info->esoc_desc)
1756 devm_unregister_esoc_client(dev, esoc_info->esoc_desc);
1757 out:
1758 return ret;
1759 }
1760
cnss_unregister_esoc(struct cnss_plat_data * plat_priv)1761 static void cnss_unregister_esoc(struct cnss_plat_data *plat_priv)
1762 {
1763 struct device *dev;
1764 struct cnss_esoc_info *esoc_info;
1765
1766 dev = &plat_priv->plat_dev->dev;
1767 esoc_info = &plat_priv->esoc_info;
1768
1769 if (esoc_info->notify_modem_status)
1770 subsys_notif_unregister_notifier
1771 (esoc_info->modem_notify_handler,
1772 &plat_priv->modem_nb);
1773 if (esoc_info->esoc_desc)
1774 devm_unregister_esoc_client(dev, esoc_info->esoc_desc);
1775 }
1776 #else
cnss_register_esoc(struct cnss_plat_data * plat_priv)1777 static inline int cnss_register_esoc(struct cnss_plat_data *plat_priv)
1778 {
1779 return 0;
1780 }
1781
cnss_unregister_esoc(struct cnss_plat_data * plat_priv)1782 static inline void cnss_unregister_esoc(struct cnss_plat_data *plat_priv) {}
1783 #endif
1784
cnss_enable_dev_sol_irq(struct cnss_plat_data * plat_priv)1785 int cnss_enable_dev_sol_irq(struct cnss_plat_data *plat_priv)
1786 {
1787 struct cnss_sol_gpio *sol_gpio = &plat_priv->sol_gpio;
1788 int ret = 0;
1789
1790 if (sol_gpio->dev_sol_gpio < 0 || sol_gpio->dev_sol_irq <= 0)
1791 return 0;
1792
1793 ret = enable_irq_wake(sol_gpio->dev_sol_irq);
1794 if (ret)
1795 cnss_pr_err("Failed to enable device SOL as wake IRQ, err = %d\n",
1796 ret);
1797
1798 return ret;
1799 }
1800
cnss_disable_dev_sol_irq(struct cnss_plat_data * plat_priv)1801 int cnss_disable_dev_sol_irq(struct cnss_plat_data *plat_priv)
1802 {
1803 struct cnss_sol_gpio *sol_gpio = &plat_priv->sol_gpio;
1804 int ret = 0;
1805
1806 if (sol_gpio->dev_sol_gpio < 0 || sol_gpio->dev_sol_irq <= 0)
1807 return 0;
1808
1809 ret = disable_irq_wake(sol_gpio->dev_sol_irq);
1810 if (ret)
1811 cnss_pr_err("Failed to disable device SOL as wake IRQ, err = %d\n",
1812 ret);
1813
1814 return ret;
1815 }
1816
cnss_get_dev_sol_value(struct cnss_plat_data * plat_priv)1817 int cnss_get_dev_sol_value(struct cnss_plat_data *plat_priv)
1818 {
1819 struct cnss_sol_gpio *sol_gpio = &plat_priv->sol_gpio;
1820
1821 if (sol_gpio->dev_sol_gpio < 0)
1822 return -EINVAL;
1823
1824 return gpio_get_value(sol_gpio->dev_sol_gpio);
1825 }
1826
cnss_dev_sol_handler(int irq,void * data)1827 static irqreturn_t cnss_dev_sol_handler(int irq, void *data)
1828 {
1829 struct cnss_plat_data *plat_priv = data;
1830 struct cnss_sol_gpio *sol_gpio = &plat_priv->sol_gpio;
1831
1832 if (test_bit(CNSS_POWER_OFF, &plat_priv->driver_state)) {
1833 cnss_pr_dbg("Ignore Dev SOL during device power off");
1834 return IRQ_HANDLED;
1835 }
1836
1837 sol_gpio->dev_sol_counter++;
1838 cnss_pr_dbg("WLAN device SOL IRQ (%u) is asserted #%u, dev_sol_val: %d\n",
1839 irq, sol_gpio->dev_sol_counter,
1840 cnss_get_dev_sol_value(plat_priv));
1841
1842 /* Make sure abort current suspend */
1843 cnss_pm_stay_awake(plat_priv);
1844 cnss_pm_relax(plat_priv);
1845 pm_system_wakeup();
1846
1847 cnss_bus_handle_dev_sol_irq(plat_priv);
1848
1849 return IRQ_HANDLED;
1850 }
1851
cnss_init_dev_sol_gpio(struct cnss_plat_data * plat_priv)1852 static int cnss_init_dev_sol_gpio(struct cnss_plat_data *plat_priv)
1853 {
1854 struct device *dev = &plat_priv->plat_dev->dev;
1855 struct cnss_sol_gpio *sol_gpio = &plat_priv->sol_gpio;
1856 int ret = 0;
1857
1858 sol_gpio->dev_sol_gpio = of_get_named_gpio(dev->of_node,
1859 "wlan-dev-sol-gpio", 0);
1860 if (sol_gpio->dev_sol_gpio < 0)
1861 goto out;
1862
1863 cnss_pr_dbg("Get device SOL GPIO (%d) from device node\n",
1864 sol_gpio->dev_sol_gpio);
1865
1866 ret = gpio_request(sol_gpio->dev_sol_gpio, "wlan_dev_sol_gpio");
1867 if (ret) {
1868 cnss_pr_err("Failed to request device SOL GPIO, err = %d\n",
1869 ret);
1870 goto out;
1871 }
1872
1873 gpio_direction_input(sol_gpio->dev_sol_gpio);
1874 sol_gpio->dev_sol_irq = gpio_to_irq(sol_gpio->dev_sol_gpio);
1875
1876 ret = request_irq(sol_gpio->dev_sol_irq, cnss_dev_sol_handler,
1877 IRQF_TRIGGER_FALLING, "wlan_dev_sol_irq", plat_priv);
1878 if (ret) {
1879 cnss_pr_err("Failed to request device SOL IRQ, err = %d\n", ret);
1880 goto free_gpio;
1881 }
1882
1883 return 0;
1884
1885 free_gpio:
1886 gpio_free(sol_gpio->dev_sol_gpio);
1887 out:
1888 return ret;
1889 }
1890
cnss_deinit_dev_sol_gpio(struct cnss_plat_data * plat_priv)1891 static void cnss_deinit_dev_sol_gpio(struct cnss_plat_data *plat_priv)
1892 {
1893 struct cnss_sol_gpio *sol_gpio = &plat_priv->sol_gpio;
1894
1895 if (sol_gpio->dev_sol_gpio < 0)
1896 return;
1897
1898 free_irq(sol_gpio->dev_sol_irq, plat_priv);
1899 gpio_free(sol_gpio->dev_sol_gpio);
1900 }
1901
cnss_set_host_sol_value(struct cnss_plat_data * plat_priv,int value)1902 int cnss_set_host_sol_value(struct cnss_plat_data *plat_priv, int value)
1903 {
1904 struct cnss_sol_gpio *sol_gpio = &plat_priv->sol_gpio;
1905
1906 if (sol_gpio->host_sol_gpio < 0)
1907 return -EINVAL;
1908
1909 if (value)
1910 cnss_pr_dbg("Assert host SOL GPIO\n");
1911 gpio_set_value(sol_gpio->host_sol_gpio, value);
1912
1913 return 0;
1914 }
1915
cnss_get_host_sol_value(struct cnss_plat_data * plat_priv)1916 int cnss_get_host_sol_value(struct cnss_plat_data *plat_priv)
1917 {
1918 struct cnss_sol_gpio *sol_gpio = &plat_priv->sol_gpio;
1919
1920 if (sol_gpio->host_sol_gpio < 0)
1921 return -EINVAL;
1922
1923 return gpio_get_value(sol_gpio->host_sol_gpio);
1924 }
1925
cnss_init_host_sol_gpio(struct cnss_plat_data * plat_priv)1926 static int cnss_init_host_sol_gpio(struct cnss_plat_data *plat_priv)
1927 {
1928 struct device *dev = &plat_priv->plat_dev->dev;
1929 struct cnss_sol_gpio *sol_gpio = &plat_priv->sol_gpio;
1930 int ret = 0;
1931
1932 sol_gpio->host_sol_gpio = of_get_named_gpio(dev->of_node,
1933 "wlan-host-sol-gpio", 0);
1934 if (sol_gpio->host_sol_gpio < 0)
1935 goto out;
1936
1937 cnss_pr_dbg("Get host SOL GPIO (%d) from device node\n",
1938 sol_gpio->host_sol_gpio);
1939
1940 ret = gpio_request(sol_gpio->host_sol_gpio, "wlan_host_sol_gpio");
1941 if (ret) {
1942 cnss_pr_err("Failed to request host SOL GPIO, err = %d\n",
1943 ret);
1944 goto out;
1945 }
1946
1947 gpio_direction_output(sol_gpio->host_sol_gpio, 0);
1948
1949 return 0;
1950
1951 out:
1952 return ret;
1953 }
1954
cnss_deinit_host_sol_gpio(struct cnss_plat_data * plat_priv)1955 static void cnss_deinit_host_sol_gpio(struct cnss_plat_data *plat_priv)
1956 {
1957 struct cnss_sol_gpio *sol_gpio = &plat_priv->sol_gpio;
1958
1959 if (sol_gpio->host_sol_gpio < 0)
1960 return;
1961
1962 gpio_free(sol_gpio->host_sol_gpio);
1963 }
1964
cnss_init_sol_gpio(struct cnss_plat_data * plat_priv)1965 static int cnss_init_sol_gpio(struct cnss_plat_data *plat_priv)
1966 {
1967 int ret;
1968
1969 ret = cnss_init_dev_sol_gpio(plat_priv);
1970 if (ret)
1971 goto out;
1972
1973 ret = cnss_init_host_sol_gpio(plat_priv);
1974 if (ret)
1975 goto deinit_dev_sol;
1976
1977 return 0;
1978
1979 deinit_dev_sol:
1980 cnss_deinit_dev_sol_gpio(plat_priv);
1981 out:
1982 return ret;
1983 }
1984
cnss_deinit_sol_gpio(struct cnss_plat_data * plat_priv)1985 static void cnss_deinit_sol_gpio(struct cnss_plat_data *plat_priv)
1986 {
1987 cnss_deinit_host_sol_gpio(plat_priv);
1988 cnss_deinit_dev_sol_gpio(plat_priv);
1989 }
1990
1991 #if IS_ENABLED(CONFIG_MSM_SUBSYSTEM_RESTART)
cnss_subsys_powerup(const struct subsys_desc * subsys_desc)1992 static int cnss_subsys_powerup(const struct subsys_desc *subsys_desc)
1993 {
1994 struct cnss_plat_data *plat_priv;
1995 int ret = 0;
1996
1997 if (!subsys_desc->dev) {
1998 cnss_pr_err("dev from subsys_desc is NULL\n");
1999 return -ENODEV;
2000 }
2001
2002 plat_priv = dev_get_drvdata(subsys_desc->dev);
2003 if (!plat_priv) {
2004 cnss_pr_err("plat_priv is NULL\n");
2005 return -ENODEV;
2006 }
2007
2008 if (!plat_priv->driver_state) {
2009 cnss_pr_dbg("subsys powerup is ignored\n");
2010 return 0;
2011 }
2012
2013 ret = cnss_bus_dev_powerup(plat_priv);
2014 if (ret)
2015 __pm_relax(plat_priv->recovery_ws);
2016 return ret;
2017 }
2018
cnss_subsys_shutdown(const struct subsys_desc * subsys_desc,bool force_stop)2019 static int cnss_subsys_shutdown(const struct subsys_desc *subsys_desc,
2020 bool force_stop)
2021 {
2022 struct cnss_plat_data *plat_priv;
2023
2024 if (!subsys_desc->dev) {
2025 cnss_pr_err("dev from subsys_desc is NULL\n");
2026 return -ENODEV;
2027 }
2028
2029 plat_priv = dev_get_drvdata(subsys_desc->dev);
2030 if (!plat_priv) {
2031 cnss_pr_err("plat_priv is NULL\n");
2032 return -ENODEV;
2033 }
2034
2035 if (!plat_priv->driver_state) {
2036 cnss_pr_dbg("subsys shutdown is ignored\n");
2037 return 0;
2038 }
2039
2040 return cnss_bus_dev_shutdown(plat_priv);
2041 }
2042
cnss_device_crashed(struct device * dev)2043 void cnss_device_crashed(struct device *dev)
2044 {
2045 struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
2046 struct cnss_subsys_info *subsys_info;
2047
2048 if (!plat_priv)
2049 return;
2050
2051 subsys_info = &plat_priv->subsys_info;
2052 if (subsys_info->subsys_device) {
2053 set_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
2054 subsys_set_crash_status(subsys_info->subsys_device, true);
2055 subsystem_restart_dev(subsys_info->subsys_device);
2056 }
2057 }
2058 EXPORT_SYMBOL(cnss_device_crashed);
2059
cnss_subsys_crash_shutdown(const struct subsys_desc * subsys_desc)2060 static void cnss_subsys_crash_shutdown(const struct subsys_desc *subsys_desc)
2061 {
2062 struct cnss_plat_data *plat_priv = dev_get_drvdata(subsys_desc->dev);
2063
2064 if (!plat_priv) {
2065 cnss_pr_err("plat_priv is NULL\n");
2066 return;
2067 }
2068
2069 cnss_bus_dev_crash_shutdown(plat_priv);
2070 }
2071
cnss_subsys_ramdump(int enable,const struct subsys_desc * subsys_desc)2072 static int cnss_subsys_ramdump(int enable,
2073 const struct subsys_desc *subsys_desc)
2074 {
2075 struct cnss_plat_data *plat_priv = dev_get_drvdata(subsys_desc->dev);
2076
2077 if (!plat_priv) {
2078 cnss_pr_err("plat_priv is NULL\n");
2079 return -ENODEV;
2080 }
2081
2082 if (!enable)
2083 return 0;
2084
2085 return cnss_bus_dev_ramdump(plat_priv);
2086 }
2087
cnss_recovery_work_handler(struct work_struct * work)2088 static void cnss_recovery_work_handler(struct work_struct *work)
2089 {
2090 }
2091 #else
cnss_recovery_handler(struct cnss_plat_data * plat_priv)2092 void cnss_recovery_handler(struct cnss_plat_data *plat_priv)
2093 {
2094 int ret;
2095
2096 set_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
2097
2098 if (!plat_priv->recovery_enabled)
2099 panic("subsys-restart: Resetting the SoC wlan crashed\n");
2100
2101 cnss_bus_dev_shutdown(plat_priv);
2102 cnss_bus_dev_ramdump(plat_priv);
2103
2104 /* If recovery is triggered before Host driver registration,
2105 * avoid device power up because eventually device will be
2106 * power up as part of driver registration.
2107 */
2108 if (!test_bit(CNSS_DRIVER_REGISTER, &plat_priv->driver_state) ||
2109 !test_bit(CNSS_DRIVER_REGISTERED, &plat_priv->driver_state)) {
2110 cnss_pr_dbg("Host driver not registered yet, ignore Device Power Up, 0x%lx\n",
2111 plat_priv->driver_state);
2112 return;
2113 }
2114
2115 msleep(POWER_RESET_MIN_DELAY_MS);
2116
2117 ret = cnss_bus_dev_powerup(plat_priv);
2118 if (ret) {
2119 __pm_relax(plat_priv->recovery_ws);
2120 clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
2121 }
2122
2123 return;
2124 }
2125
cnss_recovery_work_handler(struct work_struct * work)2126 static void cnss_recovery_work_handler(struct work_struct *work)
2127 {
2128 struct cnss_plat_data *plat_priv =
2129 container_of(work, struct cnss_plat_data, recovery_work);
2130
2131 cnss_recovery_handler(plat_priv);
2132 }
2133
cnss_device_crashed(struct device * dev)2134 void cnss_device_crashed(struct device *dev)
2135 {
2136 struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
2137
2138 if (!plat_priv)
2139 return;
2140
2141 set_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
2142 schedule_work(&plat_priv->recovery_work);
2143 }
2144 EXPORT_SYMBOL(cnss_device_crashed);
2145 #endif /* CONFIG_MSM_SUBSYSTEM_RESTART */
2146
cnss_get_virt_ramdump_mem(struct device * dev,unsigned long * size)2147 void *cnss_get_virt_ramdump_mem(struct device *dev, unsigned long *size)
2148 {
2149 struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
2150 struct cnss_ramdump_info *ramdump_info;
2151
2152 if (!plat_priv)
2153 return NULL;
2154
2155 ramdump_info = &plat_priv->ramdump_info;
2156 *size = ramdump_info->ramdump_size;
2157
2158 return ramdump_info->ramdump_va;
2159 }
2160 EXPORT_SYMBOL(cnss_get_virt_ramdump_mem);
2161
cnss_recovery_reason_to_str(enum cnss_recovery_reason reason)2162 static const char *cnss_recovery_reason_to_str(enum cnss_recovery_reason reason)
2163 {
2164 switch (reason) {
2165 case CNSS_REASON_DEFAULT:
2166 return "DEFAULT";
2167 case CNSS_REASON_LINK_DOWN:
2168 return "LINK_DOWN";
2169 case CNSS_REASON_RDDM:
2170 return "RDDM";
2171 case CNSS_REASON_TIMEOUT:
2172 return "TIMEOUT";
2173 }
2174
2175 return "UNKNOWN";
2176 };
2177
cnss_do_recovery(struct cnss_plat_data * plat_priv,enum cnss_recovery_reason reason)2178 static int cnss_do_recovery(struct cnss_plat_data *plat_priv,
2179 enum cnss_recovery_reason reason)
2180 {
2181 int ret;
2182
2183 plat_priv->recovery_count++;
2184
2185 if (plat_priv->device_id == QCA6174_DEVICE_ID)
2186 goto self_recovery;
2187
2188 if (test_bit(SKIP_RECOVERY, &plat_priv->ctrl_params.quirks)) {
2189 cnss_pr_dbg("Skip device recovery\n");
2190 return 0;
2191 }
2192
2193 /* FW recovery sequence has multiple steps and firmware load requires
2194 * linux PM in awake state. Thus hold the cnss wake source until
2195 * WLAN MISSION enabled. CNSS_TIMEOUT_RECOVERY option should cover all
2196 * time taken in this process.
2197 */
2198 pm_wakeup_ws_event(plat_priv->recovery_ws,
2199 cnss_get_timeout(plat_priv, CNSS_TIMEOUT_RECOVERY),
2200 true);
2201
2202 switch (reason) {
2203 case CNSS_REASON_LINK_DOWN:
2204 if (!cnss_bus_check_link_status(plat_priv)) {
2205 cnss_pr_dbg("Skip link down recovery as link is already up\n");
2206 return 0;
2207 }
2208 if (test_bit(LINK_DOWN_SELF_RECOVERY,
2209 &plat_priv->ctrl_params.quirks))
2210 goto self_recovery;
2211 if (!cnss_bus_recover_link_down(plat_priv)) {
2212 /* clear recovery bit here to avoid skipping
2213 * the recovery work for RDDM later
2214 */
2215 clear_bit(CNSS_DRIVER_RECOVERY,
2216 &plat_priv->driver_state);
2217 return 0;
2218 }
2219 break;
2220 case CNSS_REASON_RDDM:
2221 cnss_bus_collect_dump_info(plat_priv, false);
2222 break;
2223 case CNSS_REASON_DEFAULT:
2224 case CNSS_REASON_TIMEOUT:
2225 break;
2226 default:
2227 cnss_pr_err("Unsupported recovery reason: %s(%d)\n",
2228 cnss_recovery_reason_to_str(reason), reason);
2229 break;
2230 }
2231 cnss_bus_device_crashed(plat_priv);
2232
2233 return 0;
2234
2235 self_recovery:
2236 cnss_pr_dbg("Going for self recovery\n");
2237 cnss_bus_dev_shutdown(plat_priv);
2238
2239 if (test_bit(LINK_DOWN_SELF_RECOVERY, &plat_priv->ctrl_params.quirks))
2240 clear_bit(LINK_DOWN_SELF_RECOVERY,
2241 &plat_priv->ctrl_params.quirks);
2242
2243 /* If link down self recovery is triggered before Host driver
2244 * registration, avoid device power up because eventually device
2245 * will be power up as part of driver registration.
2246 */
2247
2248 if (!test_bit(CNSS_DRIVER_REGISTER, &plat_priv->driver_state) ||
2249 !test_bit(CNSS_DRIVER_REGISTERED, &plat_priv->driver_state)) {
2250 cnss_pr_dbg("Host driver not registered yet, ignore Device Power Up, 0x%lx\n",
2251 plat_priv->driver_state);
2252 return 0;
2253 }
2254
2255 ret = cnss_bus_dev_powerup(plat_priv);
2256 if (ret)
2257 clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
2258
2259 return 0;
2260 }
2261
cnss_driver_recovery_hdlr(struct cnss_plat_data * plat_priv,void * data)2262 static int cnss_driver_recovery_hdlr(struct cnss_plat_data *plat_priv,
2263 void *data)
2264 {
2265 struct cnss_recovery_data *recovery_data = data;
2266 int ret = 0;
2267
2268 cnss_pr_dbg("Driver recovery is triggered with reason: %s(%d)\n",
2269 cnss_recovery_reason_to_str(recovery_data->reason),
2270 recovery_data->reason);
2271
2272 if (!plat_priv->driver_state) {
2273 cnss_pr_err("Improper driver state, ignore recovery\n");
2274 ret = -EINVAL;
2275 goto out;
2276 }
2277
2278 if (test_bit(CNSS_IN_REBOOT, &plat_priv->driver_state)) {
2279 cnss_pr_err("Reboot is in progress, ignore recovery\n");
2280 ret = -EINVAL;
2281 goto out;
2282 }
2283
2284 if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) {
2285 cnss_pr_err("Recovery is already in progress\n");
2286 CNSS_ASSERT(0);
2287 ret = -EINVAL;
2288 goto out;
2289 }
2290
2291 if (test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state) ||
2292 test_bit(CNSS_DRIVER_IDLE_SHUTDOWN, &plat_priv->driver_state)) {
2293 cnss_pr_err("Driver unload or idle shutdown is in progress, ignore recovery\n");
2294 ret = -EINVAL;
2295 goto out;
2296 }
2297
2298 switch (plat_priv->device_id) {
2299 case QCA6174_DEVICE_ID:
2300 if (test_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state) ||
2301 test_bit(CNSS_DRIVER_IDLE_RESTART,
2302 &plat_priv->driver_state)) {
2303 cnss_pr_err("Driver load or idle restart is in progress, ignore recovery\n");
2304 ret = -EINVAL;
2305 goto out;
2306 }
2307 break;
2308 default:
2309 if (!test_bit(CNSS_FW_READY, &plat_priv->driver_state)) {
2310 set_bit(CNSS_FW_BOOT_RECOVERY,
2311 &plat_priv->driver_state);
2312 }
2313 break;
2314 }
2315
2316 set_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
2317 ret = cnss_do_recovery(plat_priv, recovery_data->reason);
2318
2319 out:
2320 kfree(data);
2321 return ret;
2322 }
2323
cnss_self_recovery(struct device * dev,enum cnss_recovery_reason reason)2324 int cnss_self_recovery(struct device *dev,
2325 enum cnss_recovery_reason reason)
2326 {
2327 cnss_schedule_recovery(dev, reason);
2328 return 0;
2329 }
2330 EXPORT_SYMBOL(cnss_self_recovery);
2331
cnss_schedule_recovery(struct device * dev,enum cnss_recovery_reason reason)2332 void cnss_schedule_recovery(struct device *dev,
2333 enum cnss_recovery_reason reason)
2334 {
2335 struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
2336 struct cnss_recovery_data *data;
2337 int gfp = GFP_KERNEL;
2338
2339 if (!test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state))
2340 cnss_bus_update_status(plat_priv, CNSS_FW_DOWN);
2341
2342 if (test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state) ||
2343 test_bit(CNSS_DRIVER_IDLE_SHUTDOWN, &plat_priv->driver_state)) {
2344 cnss_pr_dbg("Driver unload or idle shutdown is in progress, ignore schedule recovery\n");
2345 return;
2346 }
2347
2348 if (in_interrupt() || irqs_disabled())
2349 gfp = GFP_ATOMIC;
2350
2351 data = kzalloc(sizeof(*data), gfp);
2352 if (!data)
2353 return;
2354
2355 data->reason = reason;
2356 cnss_driver_event_post(plat_priv,
2357 CNSS_DRIVER_EVENT_RECOVERY,
2358 0, data);
2359 }
2360 EXPORT_SYMBOL(cnss_schedule_recovery);
2361
cnss_force_fw_assert(struct device * dev)2362 int cnss_force_fw_assert(struct device *dev)
2363 {
2364 struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
2365
2366 if (!plat_priv) {
2367 cnss_pr_err("plat_priv is NULL\n");
2368 return -ENODEV;
2369 }
2370
2371 if (plat_priv->device_id == QCA6174_DEVICE_ID) {
2372 cnss_pr_info("Forced FW assert is not supported\n");
2373 return -EOPNOTSUPP;
2374 }
2375
2376 if (cnss_bus_is_device_down(plat_priv)) {
2377 cnss_pr_info("Device is already in bad state, ignore force assert\n");
2378 return 0;
2379 }
2380
2381 if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) {
2382 cnss_pr_info("Recovery is already in progress, ignore forced FW assert\n");
2383 return 0;
2384 }
2385
2386 if (in_interrupt() || irqs_disabled())
2387 cnss_driver_event_post(plat_priv,
2388 CNSS_DRIVER_EVENT_FORCE_FW_ASSERT,
2389 0, NULL);
2390 else
2391 cnss_bus_force_fw_assert_hdlr(plat_priv);
2392
2393 return 0;
2394 }
2395 EXPORT_SYMBOL(cnss_force_fw_assert);
2396
cnss_force_collect_rddm(struct device * dev)2397 int cnss_force_collect_rddm(struct device *dev)
2398 {
2399 struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
2400 unsigned int timeout;
2401 int ret = 0;
2402
2403 if (!plat_priv) {
2404 cnss_pr_err("plat_priv is NULL\n");
2405 return -ENODEV;
2406 }
2407
2408 if (plat_priv->device_id == QCA6174_DEVICE_ID) {
2409 cnss_pr_info("Force collect rddm is not supported\n");
2410 return -EOPNOTSUPP;
2411 }
2412
2413 if (cnss_bus_is_device_down(plat_priv)) {
2414 cnss_pr_info("Device is already in bad state, wait to collect rddm\n");
2415 goto wait_rddm;
2416 }
2417
2418 if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) {
2419 cnss_pr_info("Recovery is already in progress, wait to collect rddm\n");
2420 goto wait_rddm;
2421 }
2422
2423 if (test_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state) ||
2424 test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state) ||
2425 test_bit(CNSS_DRIVER_IDLE_RESTART, &plat_priv->driver_state) ||
2426 test_bit(CNSS_DRIVER_IDLE_SHUTDOWN, &plat_priv->driver_state)) {
2427 cnss_pr_info("Loading/Unloading/idle restart/shutdown is in progress, ignore forced collect rddm\n");
2428 return 0;
2429 }
2430
2431 ret = cnss_bus_force_fw_assert_hdlr(plat_priv);
2432 if (ret)
2433 return ret;
2434
2435 wait_rddm:
2436 reinit_completion(&plat_priv->rddm_complete);
2437 timeout = cnss_get_timeout(plat_priv, CNSS_TIMEOUT_RDDM);
2438 ret = wait_for_completion_timeout(&plat_priv->rddm_complete,
2439 msecs_to_jiffies(timeout));
2440 if (!ret) {
2441 cnss_pr_err("Timeout (%ums) waiting for RDDM to complete\n",
2442 timeout);
2443 ret = -ETIMEDOUT;
2444 } else if (ret > 0) {
2445 ret = 0;
2446 }
2447
2448 return ret;
2449 }
2450 EXPORT_SYMBOL(cnss_force_collect_rddm);
2451
cnss_qmi_send_get(struct device * dev)2452 int cnss_qmi_send_get(struct device *dev)
2453 {
2454 struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
2455
2456 if (!test_bit(CNSS_QMI_WLFW_CONNECTED, &plat_priv->driver_state))
2457 return 0;
2458
2459 return cnss_bus_qmi_send_get(plat_priv);
2460 }
2461 EXPORT_SYMBOL(cnss_qmi_send_get);
2462
cnss_qmi_send_put(struct device * dev)2463 int cnss_qmi_send_put(struct device *dev)
2464 {
2465 struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
2466
2467 if (!test_bit(CNSS_QMI_WLFW_CONNECTED, &plat_priv->driver_state))
2468 return 0;
2469
2470 return cnss_bus_qmi_send_put(plat_priv);
2471 }
2472 EXPORT_SYMBOL(cnss_qmi_send_put);
2473
cnss_qmi_send(struct device * dev,int type,void * cmd,int cmd_len,void * cb_ctx,int (* cb)(void * ctx,void * event,int event_len))2474 int cnss_qmi_send(struct device *dev, int type, void *cmd,
2475 int cmd_len, void *cb_ctx,
2476 int (*cb)(void *ctx, void *event, int event_len))
2477 {
2478 struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
2479 int ret;
2480
2481 if (!plat_priv)
2482 return -ENODEV;
2483
2484 if (!test_bit(CNSS_QMI_WLFW_CONNECTED, &plat_priv->driver_state))
2485 return -EINVAL;
2486
2487 plat_priv->get_info_cb = cb;
2488 plat_priv->get_info_cb_ctx = cb_ctx;
2489
2490 ret = cnss_wlfw_get_info_send_sync(plat_priv, type, cmd, cmd_len);
2491 if (ret) {
2492 plat_priv->get_info_cb = NULL;
2493 plat_priv->get_info_cb_ctx = NULL;
2494 }
2495
2496 return ret;
2497 }
2498 EXPORT_SYMBOL(cnss_qmi_send);
2499
cnss_register_driver_async_data_cb(struct device * dev,void * cb_ctx,int (* cb)(void * ctx,uint16_t type,void * event,int event_len))2500 int cnss_register_driver_async_data_cb(struct device *dev, void *cb_ctx,
2501 int (*cb)(void *ctx, uint16_t type,
2502 void *event, int event_len))
2503 {
2504 struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
2505
2506 if (!plat_priv)
2507 return -ENODEV;
2508
2509 if (!test_bit(CNSS_QMI_WLFW_CONNECTED, &plat_priv->driver_state))
2510 return -EINVAL;
2511
2512 plat_priv->get_driver_async_data_cb = cb;
2513 plat_priv->get_driver_async_data_ctx = cb_ctx;
2514
2515 return 0;
2516 }
2517 EXPORT_SYMBOL(cnss_register_driver_async_data_cb);
2518
cnss_cold_boot_cal_start_hdlr(struct cnss_plat_data * plat_priv)2519 static int cnss_cold_boot_cal_start_hdlr(struct cnss_plat_data *plat_priv)
2520 {
2521 int ret = 0;
2522 u32 retry = 0, timeout;
2523
2524 if (test_bit(CNSS_COLD_BOOT_CAL_DONE, &plat_priv->driver_state)) {
2525 cnss_pr_dbg("Calibration complete. Ignore calibration req\n");
2526 goto out;
2527 } else if (test_bit(CNSS_IN_COLD_BOOT_CAL, &plat_priv->driver_state)) {
2528 cnss_pr_dbg("Calibration in progress. Ignore new calibration req\n");
2529 goto out;
2530 } else if (test_bit(CNSS_WLAN_HW_DISABLED, &plat_priv->driver_state)) {
2531 cnss_pr_dbg("Calibration deferred as WLAN device disabled\n");
2532 goto out;
2533 }
2534
2535 if (test_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state) ||
2536 test_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state) ||
2537 test_bit(CNSS_FW_READY, &plat_priv->driver_state)) {
2538 cnss_pr_err("WLAN in mission mode before cold boot calibration\n");
2539 CNSS_ASSERT(0);
2540 return -EINVAL;
2541 }
2542
2543 while (retry++ < CNSS_CAL_START_PROBE_WAIT_RETRY_MAX) {
2544 if (test_bit(CNSS_PCI_PROBE_DONE, &plat_priv->driver_state))
2545 break;
2546 msleep(CNSS_CAL_START_PROBE_WAIT_MS);
2547
2548 if (retry == CNSS_CAL_START_PROBE_WAIT_RETRY_MAX) {
2549 cnss_pr_err("Calibration start failed as PCI probe not complete\n");
2550 CNSS_ASSERT(0);
2551 ret = -EINVAL;
2552 goto mark_cal_fail;
2553 }
2554 }
2555
2556 switch (plat_priv->device_id) {
2557 case QCA6290_DEVICE_ID:
2558 case QCA6390_DEVICE_ID:
2559 case QCA6490_DEVICE_ID:
2560 case KIWI_DEVICE_ID:
2561 case MANGO_DEVICE_ID:
2562 case PEACH_DEVICE_ID:
2563 break;
2564 default:
2565 cnss_pr_err("Not supported for device ID 0x%lx\n",
2566 plat_priv->device_id);
2567 ret = -EINVAL;
2568 goto mark_cal_fail;
2569 }
2570
2571 set_bit(CNSS_IN_COLD_BOOT_CAL, &plat_priv->driver_state);
2572 if (test_bit(CNSS_DRIVER_REGISTER, &plat_priv->driver_state)) {
2573 timeout = cnss_get_timeout(plat_priv,
2574 CNSS_TIMEOUT_CALIBRATION);
2575 cnss_pr_dbg("Restarting calibration %ds timeout\n",
2576 timeout / 1000);
2577 if (cancel_delayed_work_sync(&plat_priv->wlan_reg_driver_work))
2578 schedule_delayed_work(&plat_priv->wlan_reg_driver_work,
2579 msecs_to_jiffies(timeout));
2580 }
2581 reinit_completion(&plat_priv->cal_complete);
2582 ret = cnss_bus_dev_powerup(plat_priv);
2583 mark_cal_fail:
2584 if (ret) {
2585 complete(&plat_priv->cal_complete);
2586 clear_bit(CNSS_IN_COLD_BOOT_CAL, &plat_priv->driver_state);
2587 /* Set CBC done in driver state to mark attempt and note error
2588 * since calibration cannot be retried at boot.
2589 */
2590 plat_priv->cal_done = CNSS_CAL_FAILURE;
2591 set_bit(CNSS_COLD_BOOT_CAL_DONE, &plat_priv->driver_state);
2592
2593 if (plat_priv->device_id == QCA6174_DEVICE_ID ||
2594 plat_priv->device_id == QCN7605_DEVICE_ID) {
2595 if (!test_bit(CNSS_DRIVER_REGISTER, &plat_priv->driver_state))
2596 goto out;
2597
2598 cnss_pr_info("Schedule WLAN driver load\n");
2599
2600 if (cancel_delayed_work_sync(&plat_priv->wlan_reg_driver_work))
2601 schedule_delayed_work(&plat_priv->wlan_reg_driver_work,
2602 0);
2603 }
2604 }
2605
2606 out:
2607 return ret;
2608 }
2609
cnss_cold_boot_cal_done_hdlr(struct cnss_plat_data * plat_priv,void * data)2610 static int cnss_cold_boot_cal_done_hdlr(struct cnss_plat_data *plat_priv,
2611 void *data)
2612 {
2613 struct cnss_cal_info *cal_info = data;
2614
2615 if (!test_bit(CNSS_IN_COLD_BOOT_CAL, &plat_priv->driver_state) ||
2616 test_bit(CNSS_COLD_BOOT_CAL_DONE, &plat_priv->driver_state))
2617 goto out;
2618
2619 switch (cal_info->cal_status) {
2620 case CNSS_CAL_DONE:
2621 cnss_pr_dbg("Calibration completed successfully\n");
2622 plat_priv->cal_done = true;
2623 break;
2624 case CNSS_CAL_TIMEOUT:
2625 case CNSS_CAL_FAILURE:
2626 cnss_pr_dbg("Calibration failed. Status: %d, force shutdown\n",
2627 cal_info->cal_status);
2628 break;
2629 default:
2630 cnss_pr_err("Unknown calibration status: %u\n",
2631 cal_info->cal_status);
2632 break;
2633 }
2634
2635 cnss_wlfw_wlan_mode_send_sync(plat_priv, CNSS_OFF);
2636 cnss_bus_free_qdss_mem(plat_priv);
2637 cnss_release_antenna_sharing(plat_priv);
2638
2639 if (plat_priv->device_id == QCN7605_DEVICE_ID)
2640 goto skip_shutdown;
2641
2642 cnss_bus_dev_shutdown(plat_priv);
2643 msleep(POWER_RESET_MIN_DELAY_MS);
2644
2645 skip_shutdown:
2646 complete(&plat_priv->cal_complete);
2647 clear_bit(CNSS_IN_COLD_BOOT_CAL, &plat_priv->driver_state);
2648 set_bit(CNSS_COLD_BOOT_CAL_DONE, &plat_priv->driver_state);
2649
2650 if (cal_info->cal_status == CNSS_CAL_DONE) {
2651 cnss_cal_mem_upload_to_file(plat_priv);
2652 if (!test_bit(CNSS_DRIVER_REGISTER, &plat_priv->driver_state))
2653 goto out;
2654
2655 cnss_pr_dbg("Schedule WLAN driver load\n");
2656 if (cancel_delayed_work_sync(&plat_priv->wlan_reg_driver_work))
2657 schedule_delayed_work(&plat_priv->wlan_reg_driver_work,
2658 0);
2659 }
2660 out:
2661 kfree(data);
2662 return 0;
2663 }
2664
cnss_power_up_hdlr(struct cnss_plat_data * plat_priv)2665 static int cnss_power_up_hdlr(struct cnss_plat_data *plat_priv)
2666 {
2667 int ret;
2668
2669 ret = cnss_bus_dev_powerup(plat_priv);
2670 if (ret)
2671 clear_bit(CNSS_DRIVER_IDLE_RESTART, &plat_priv->driver_state);
2672
2673 return ret;
2674 }
2675
cnss_power_down_hdlr(struct cnss_plat_data * plat_priv)2676 static int cnss_power_down_hdlr(struct cnss_plat_data *plat_priv)
2677 {
2678 cnss_bus_dev_shutdown(plat_priv);
2679
2680 return 0;
2681 }
2682
cnss_qdss_trace_req_mem_hdlr(struct cnss_plat_data * plat_priv)2683 static int cnss_qdss_trace_req_mem_hdlr(struct cnss_plat_data *plat_priv)
2684 {
2685 int ret = 0;
2686
2687 ret = cnss_bus_alloc_qdss_mem(plat_priv);
2688 if (ret < 0)
2689 return ret;
2690
2691 return cnss_wlfw_qdss_trace_mem_info_send_sync(plat_priv);
2692 }
2693
cnss_get_fw_mem_pa_to_va(struct cnss_fw_mem * fw_mem,u32 mem_seg_len,u64 pa,u32 size)2694 static void *cnss_get_fw_mem_pa_to_va(struct cnss_fw_mem *fw_mem,
2695 u32 mem_seg_len, u64 pa, u32 size)
2696 {
2697 int i = 0;
2698 u64 offset = 0;
2699 void *va = NULL;
2700 u64 local_pa;
2701 u32 local_size;
2702
2703 for (i = 0; i < mem_seg_len; i++) {
2704 if (i == QMI_WLFW_MEM_LPASS_SHARED_V01)
2705 continue;
2706
2707 local_pa = (u64)fw_mem[i].pa;
2708 local_size = (u32)fw_mem[i].size;
2709 if (pa == local_pa && size <= local_size) {
2710 va = fw_mem[i].va;
2711 break;
2712 }
2713 if (pa > local_pa &&
2714 pa < local_pa + local_size &&
2715 pa + size <= local_pa + local_size) {
2716 offset = pa - local_pa;
2717 va = fw_mem[i].va + offset;
2718 break;
2719 }
2720 }
2721 return va;
2722 }
2723
cnss_fw_mem_file_save_hdlr(struct cnss_plat_data * plat_priv,void * data)2724 static int cnss_fw_mem_file_save_hdlr(struct cnss_plat_data *plat_priv,
2725 void *data)
2726 {
2727 struct cnss_qmi_event_fw_mem_file_save_data *event_data = data;
2728 struct cnss_fw_mem *fw_mem_seg;
2729 int ret = 0L;
2730 void *va = NULL;
2731 u32 i, fw_mem_seg_len;
2732
2733 switch (event_data->mem_type) {
2734 case QMI_WLFW_MEM_TYPE_DDR_V01:
2735 if (!plat_priv->fw_mem_seg_len)
2736 goto invalid_mem_save;
2737
2738 fw_mem_seg = plat_priv->fw_mem;
2739 fw_mem_seg_len = plat_priv->fw_mem_seg_len;
2740 break;
2741 case QMI_WLFW_MEM_QDSS_V01:
2742 if (!plat_priv->qdss_mem_seg_len)
2743 goto invalid_mem_save;
2744
2745 fw_mem_seg = plat_priv->qdss_mem;
2746 fw_mem_seg_len = plat_priv->qdss_mem_seg_len;
2747 break;
2748 default:
2749 goto invalid_mem_save;
2750 }
2751
2752 for (i = 0; i < event_data->mem_seg_len; i++) {
2753 va = cnss_get_fw_mem_pa_to_va(fw_mem_seg, fw_mem_seg_len,
2754 event_data->mem_seg[i].addr,
2755 event_data->mem_seg[i].size);
2756 if (!va) {
2757 cnss_pr_err("Fail to find matching va of pa %pa for mem type: %d\n",
2758 &event_data->mem_seg[i].addr,
2759 event_data->mem_type);
2760 ret = -EINVAL;
2761 break;
2762 }
2763 ret = cnss_genl_send_msg(va, CNSS_GENL_MSG_TYPE_QDSS,
2764 event_data->file_name,
2765 event_data->mem_seg[i].size);
2766 if (ret < 0) {
2767 cnss_pr_err("Fail to save fw mem data: %d\n",
2768 ret);
2769 break;
2770 }
2771 }
2772 kfree(data);
2773 return ret;
2774
2775 invalid_mem_save:
2776 cnss_pr_err("FW Mem type %d not allocated. Invalid save request\n",
2777 event_data->mem_type);
2778 kfree(data);
2779 return -EINVAL;
2780 }
2781
cnss_qdss_trace_free_hdlr(struct cnss_plat_data * plat_priv)2782 static int cnss_qdss_trace_free_hdlr(struct cnss_plat_data *plat_priv)
2783 {
2784 cnss_bus_free_qdss_mem(plat_priv);
2785
2786 return 0;
2787 }
2788
cnss_qdss_trace_req_data_hdlr(struct cnss_plat_data * plat_priv,void * data)2789 static int cnss_qdss_trace_req_data_hdlr(struct cnss_plat_data *plat_priv,
2790 void *data)
2791 {
2792 int ret = 0;
2793 struct cnss_qmi_event_fw_mem_file_save_data *event_data = data;
2794
2795 if (!plat_priv)
2796 return -ENODEV;
2797
2798 ret = cnss_wlfw_qdss_data_send_sync(plat_priv, event_data->file_name,
2799 event_data->total_size);
2800
2801 kfree(data);
2802 return ret;
2803 }
2804
cnss_driver_event_work(struct work_struct * work)2805 static void cnss_driver_event_work(struct work_struct *work)
2806 {
2807 struct cnss_plat_data *plat_priv =
2808 container_of(work, struct cnss_plat_data, event_work);
2809 struct cnss_driver_event *event;
2810 unsigned long flags;
2811 int ret = 0;
2812
2813 if (!plat_priv) {
2814 cnss_pr_err("plat_priv is NULL!\n");
2815 return;
2816 }
2817
2818 cnss_pm_stay_awake(plat_priv);
2819
2820 spin_lock_irqsave(&plat_priv->event_lock, flags);
2821
2822 while (!list_empty(&plat_priv->event_list)) {
2823 event = list_first_entry(&plat_priv->event_list,
2824 struct cnss_driver_event, list);
2825 list_del(&event->list);
2826 spin_unlock_irqrestore(&plat_priv->event_lock, flags);
2827
2828 cnss_pr_dbg("Processing driver event: %s%s(%d), state: 0x%lx\n",
2829 cnss_driver_event_to_str(event->type),
2830 event->sync ? "-sync" : "", event->type,
2831 plat_priv->driver_state);
2832
2833 switch (event->type) {
2834 case CNSS_DRIVER_EVENT_SERVER_ARRIVE:
2835 ret = cnss_wlfw_server_arrive(plat_priv, event->data);
2836 break;
2837 case CNSS_DRIVER_EVENT_SERVER_EXIT:
2838 ret = cnss_wlfw_server_exit(plat_priv);
2839 break;
2840 case CNSS_DRIVER_EVENT_REQUEST_MEM:
2841 ret = cnss_bus_alloc_fw_mem(plat_priv);
2842 if (ret)
2843 break;
2844 ret = cnss_wlfw_respond_mem_send_sync(plat_priv);
2845 break;
2846 case CNSS_DRIVER_EVENT_FW_MEM_READY:
2847 ret = cnss_fw_mem_ready_hdlr(plat_priv);
2848 break;
2849 case CNSS_DRIVER_EVENT_FW_READY:
2850 ret = cnss_fw_ready_hdlr(plat_priv);
2851 break;
2852 case CNSS_DRIVER_EVENT_COLD_BOOT_CAL_START:
2853 ret = cnss_cold_boot_cal_start_hdlr(plat_priv);
2854 break;
2855 case CNSS_DRIVER_EVENT_COLD_BOOT_CAL_DONE:
2856 ret = cnss_cold_boot_cal_done_hdlr(plat_priv,
2857 event->data);
2858 break;
2859 case CNSS_DRIVER_EVENT_REGISTER_DRIVER:
2860 ret = cnss_bus_register_driver_hdlr(plat_priv,
2861 event->data);
2862 break;
2863 case CNSS_DRIVER_EVENT_UNREGISTER_DRIVER:
2864 ret = cnss_bus_unregister_driver_hdlr(plat_priv);
2865 break;
2866 case CNSS_DRIVER_EVENT_RECOVERY:
2867 ret = cnss_driver_recovery_hdlr(plat_priv,
2868 event->data);
2869 break;
2870 case CNSS_DRIVER_EVENT_FORCE_FW_ASSERT:
2871 ret = cnss_bus_force_fw_assert_hdlr(plat_priv);
2872 break;
2873 case CNSS_DRIVER_EVENT_IDLE_RESTART:
2874 set_bit(CNSS_DRIVER_IDLE_RESTART,
2875 &plat_priv->driver_state);
2876 fallthrough;
2877 case CNSS_DRIVER_EVENT_POWER_UP:
2878 ret = cnss_power_up_hdlr(plat_priv);
2879 break;
2880 case CNSS_DRIVER_EVENT_IDLE_SHUTDOWN:
2881 set_bit(CNSS_DRIVER_IDLE_SHUTDOWN,
2882 &plat_priv->driver_state);
2883 fallthrough;
2884 case CNSS_DRIVER_EVENT_POWER_DOWN:
2885 ret = cnss_power_down_hdlr(plat_priv);
2886 break;
2887 case CNSS_DRIVER_EVENT_IMS_WFC_CALL_IND:
2888 ret = cnss_process_wfc_call_ind_event(plat_priv,
2889 event->data);
2890 break;
2891 case CNSS_DRIVER_EVENT_WLFW_TWT_CFG_IND:
2892 ret = cnss_process_twt_cfg_ind_event(plat_priv,
2893 event->data);
2894 break;
2895 case CNSS_DRIVER_EVENT_QDSS_TRACE_REQ_MEM:
2896 ret = cnss_qdss_trace_req_mem_hdlr(plat_priv);
2897 break;
2898 case CNSS_DRIVER_EVENT_FW_MEM_FILE_SAVE:
2899 ret = cnss_fw_mem_file_save_hdlr(plat_priv,
2900 event->data);
2901 break;
2902 case CNSS_DRIVER_EVENT_QDSS_TRACE_FREE:
2903 ret = cnss_qdss_trace_free_hdlr(plat_priv);
2904 break;
2905 case CNSS_DRIVER_EVENT_QDSS_TRACE_REQ_DATA:
2906 ret = cnss_qdss_trace_req_data_hdlr(plat_priv,
2907 event->data);
2908 break;
2909 default:
2910 cnss_pr_err("Invalid driver event type: %d",
2911 event->type);
2912 kfree(event);
2913 spin_lock_irqsave(&plat_priv->event_lock, flags);
2914 continue;
2915 }
2916
2917 spin_lock_irqsave(&plat_priv->event_lock, flags);
2918 if (event->sync) {
2919 event->ret = ret;
2920 complete(&event->complete);
2921 continue;
2922 }
2923 spin_unlock_irqrestore(&plat_priv->event_lock, flags);
2924
2925 kfree(event);
2926
2927 spin_lock_irqsave(&plat_priv->event_lock, flags);
2928 }
2929 spin_unlock_irqrestore(&plat_priv->event_lock, flags);
2930
2931 cnss_pm_relax(plat_priv);
2932 }
2933
2934 #if IS_ENABLED(CONFIG_MSM_SUBSYSTEM_RESTART)
cnss_register_subsys(struct cnss_plat_data * plat_priv)2935 int cnss_register_subsys(struct cnss_plat_data *plat_priv)
2936 {
2937 int ret = 0;
2938 struct cnss_subsys_info *subsys_info;
2939
2940 subsys_info = &plat_priv->subsys_info;
2941
2942 subsys_info->subsys_desc.name = plat_priv->device_name;
2943 subsys_info->subsys_desc.owner = THIS_MODULE;
2944 subsys_info->subsys_desc.powerup = cnss_subsys_powerup;
2945 subsys_info->subsys_desc.shutdown = cnss_subsys_shutdown;
2946 subsys_info->subsys_desc.ramdump = cnss_subsys_ramdump;
2947 subsys_info->subsys_desc.crash_shutdown = cnss_subsys_crash_shutdown;
2948 subsys_info->subsys_desc.dev = &plat_priv->plat_dev->dev;
2949
2950 subsys_info->subsys_device = subsys_register(&subsys_info->subsys_desc);
2951 if (IS_ERR(subsys_info->subsys_device)) {
2952 ret = PTR_ERR(subsys_info->subsys_device);
2953 cnss_pr_err("Failed to register subsys, err = %d\n", ret);
2954 goto out;
2955 }
2956
2957 subsys_info->subsys_handle =
2958 subsystem_get(subsys_info->subsys_desc.name);
2959 if (!subsys_info->subsys_handle) {
2960 cnss_pr_err("Failed to get subsys_handle!\n");
2961 ret = -EINVAL;
2962 goto unregister_subsys;
2963 } else if (IS_ERR(subsys_info->subsys_handle)) {
2964 ret = PTR_ERR(subsys_info->subsys_handle);
2965 cnss_pr_err("Failed to do subsystem_get, err = %d\n", ret);
2966 goto unregister_subsys;
2967 }
2968
2969 return 0;
2970
2971 unregister_subsys:
2972 subsys_unregister(subsys_info->subsys_device);
2973 out:
2974 return ret;
2975 }
2976
cnss_unregister_subsys(struct cnss_plat_data * plat_priv)2977 void cnss_unregister_subsys(struct cnss_plat_data *plat_priv)
2978 {
2979 struct cnss_subsys_info *subsys_info;
2980
2981 subsys_info = &plat_priv->subsys_info;
2982 subsystem_put(subsys_info->subsys_handle);
2983 subsys_unregister(subsys_info->subsys_device);
2984 }
2985
cnss_create_ramdump_device(struct cnss_plat_data * plat_priv)2986 static void *cnss_create_ramdump_device(struct cnss_plat_data *plat_priv)
2987 {
2988 struct cnss_subsys_info *subsys_info = &plat_priv->subsys_info;
2989
2990 return create_ramdump_device(subsys_info->subsys_desc.name,
2991 subsys_info->subsys_desc.dev);
2992 }
2993
cnss_destroy_ramdump_device(struct cnss_plat_data * plat_priv,void * ramdump_dev)2994 static void cnss_destroy_ramdump_device(struct cnss_plat_data *plat_priv,
2995 void *ramdump_dev)
2996 {
2997 destroy_ramdump_device(ramdump_dev);
2998 }
2999
cnss_do_ramdump(struct cnss_plat_data * plat_priv)3000 int cnss_do_ramdump(struct cnss_plat_data *plat_priv)
3001 {
3002 struct cnss_ramdump_info *ramdump_info = &plat_priv->ramdump_info;
3003 struct ramdump_segment segment;
3004
3005 memset(&segment, 0, sizeof(segment));
3006 segment.v_address = (void __iomem *)ramdump_info->ramdump_va;
3007 segment.size = ramdump_info->ramdump_size;
3008
3009 return qcom_ramdump(ramdump_info->ramdump_dev, &segment, 1);
3010 }
3011
cnss_do_elf_ramdump(struct cnss_plat_data * plat_priv)3012 int cnss_do_elf_ramdump(struct cnss_plat_data *plat_priv)
3013 {
3014 struct cnss_ramdump_info_v2 *info_v2 = &plat_priv->ramdump_info_v2;
3015 struct cnss_dump_data *dump_data = &info_v2->dump_data;
3016 struct cnss_dump_seg *dump_seg = info_v2->dump_data_vaddr;
3017 struct ramdump_segment *ramdump_segs, *s;
3018 struct cnss_dump_meta_info meta_info = {0};
3019 int i, ret = 0;
3020
3021 ramdump_segs = kcalloc(dump_data->nentries + 1,
3022 sizeof(*ramdump_segs),
3023 GFP_KERNEL);
3024 if (!ramdump_segs)
3025 return -ENOMEM;
3026
3027 s = ramdump_segs + 1;
3028 for (i = 0; i < dump_data->nentries; i++) {
3029 if (dump_seg->type >= CNSS_FW_DUMP_TYPE_MAX) {
3030 cnss_pr_err("Unsupported dump type: %d",
3031 dump_seg->type);
3032 continue;
3033 }
3034
3035 if (meta_info.entry[dump_seg->type].entry_start == 0) {
3036 meta_info.entry[dump_seg->type].type = dump_seg->type;
3037 meta_info.entry[dump_seg->type].entry_start = i + 1;
3038 }
3039 meta_info.entry[dump_seg->type].entry_num++;
3040
3041 s->address = dump_seg->address;
3042 s->v_address = (void __iomem *)dump_seg->v_address;
3043 s->size = dump_seg->size;
3044 s++;
3045 dump_seg++;
3046 }
3047
3048 meta_info.magic = CNSS_RAMDUMP_MAGIC;
3049 meta_info.version = CNSS_RAMDUMP_VERSION;
3050 meta_info.chipset = plat_priv->device_id;
3051 meta_info.total_entries = CNSS_FW_DUMP_TYPE_MAX;
3052
3053 ramdump_segs->v_address = (void __iomem *)(&meta_info);
3054 ramdump_segs->size = sizeof(meta_info);
3055
3056 ret = qcom_elf_ramdump(info_v2->ramdump_dev, ramdump_segs,
3057 dump_data->nentries + 1);
3058 kfree(ramdump_segs);
3059
3060 return ret;
3061 }
3062 #else
cnss_panic_handler(struct notifier_block * nb,unsigned long action,void * data)3063 static int cnss_panic_handler(struct notifier_block *nb, unsigned long action,
3064 void *data)
3065 {
3066 struct cnss_plat_data *plat_priv =
3067 container_of(nb, struct cnss_plat_data, panic_nb);
3068
3069 cnss_bus_dev_crash_shutdown(plat_priv);
3070
3071 return NOTIFY_DONE;
3072 }
3073
cnss_register_subsys(struct cnss_plat_data * plat_priv)3074 int cnss_register_subsys(struct cnss_plat_data *plat_priv)
3075 {
3076 int ret;
3077
3078 if (!plat_priv)
3079 return -ENODEV;
3080
3081 plat_priv->panic_nb.notifier_call = cnss_panic_handler;
3082 ret = atomic_notifier_chain_register(&panic_notifier_list,
3083 &plat_priv->panic_nb);
3084 if (ret) {
3085 cnss_pr_err("Failed to register panic handler\n");
3086 return -EINVAL;
3087 }
3088
3089 return 0;
3090 }
3091
cnss_unregister_subsys(struct cnss_plat_data * plat_priv)3092 void cnss_unregister_subsys(struct cnss_plat_data *plat_priv)
3093 {
3094 int ret;
3095
3096 ret = atomic_notifier_chain_unregister(&panic_notifier_list,
3097 &plat_priv->panic_nb);
3098 if (ret)
3099 cnss_pr_err("Failed to unregister panic handler\n");
3100 }
3101
3102 #if IS_ENABLED(CONFIG_QCOM_MEMORY_DUMP_V2)
cnss_create_ramdump_device(struct cnss_plat_data * plat_priv)3103 static void *cnss_create_ramdump_device(struct cnss_plat_data *plat_priv)
3104 {
3105 return &plat_priv->plat_dev->dev;
3106 }
3107
cnss_destroy_ramdump_device(struct cnss_plat_data * plat_priv,void * ramdump_dev)3108 static void cnss_destroy_ramdump_device(struct cnss_plat_data *plat_priv,
3109 void *ramdump_dev)
3110 {
3111 }
3112 #endif
3113
3114 #if IS_ENABLED(CONFIG_QCOM_RAMDUMP)
cnss_do_ramdump(struct cnss_plat_data * plat_priv)3115 int cnss_do_ramdump(struct cnss_plat_data *plat_priv)
3116 {
3117 struct cnss_ramdump_info *ramdump_info = &plat_priv->ramdump_info;
3118 struct qcom_dump_segment segment;
3119 struct list_head head;
3120
3121 if (!dump_enabled()) {
3122 cnss_pr_info("Dump collection is not enabled\n");
3123 return 0;
3124 }
3125 INIT_LIST_HEAD(&head);
3126 memset(&segment, 0, sizeof(segment));
3127 segment.va = ramdump_info->ramdump_va;
3128 segment.size = ramdump_info->ramdump_size;
3129 list_add(&segment.node, &head);
3130
3131 return qcom_dump(&head, ramdump_info->ramdump_dev);
3132 }
3133 #else
cnss_do_ramdump(struct cnss_plat_data * plat_priv)3134 int cnss_do_ramdump(struct cnss_plat_data *plat_priv)
3135 {
3136 return 0;
3137 }
3138
3139 /* Using completion event inside dynamically allocated ramdump_desc
3140 * may result a race between freeing the event after setting it to
3141 * complete inside dev coredump free callback and the thread that is
3142 * waiting for completion.
3143 */
3144 DECLARE_COMPLETION(dump_done);
3145 #define TIMEOUT_SAVE_DUMP_MS 30000
3146
3147 #define SIZEOF_ELF_STRUCT(__xhdr) \
3148 static inline size_t sizeof_elf_##__xhdr(unsigned char class) \
3149 { \
3150 if (class == ELFCLASS32) \
3151 return sizeof(struct elf32_##__xhdr); \
3152 else \
3153 return sizeof(struct elf64_##__xhdr); \
3154 }
3155
3156 SIZEOF_ELF_STRUCT(phdr)
3157 SIZEOF_ELF_STRUCT(hdr)
3158
3159 #define set_xhdr_property(__xhdr, arg, class, member, value) \
3160 do { \
3161 if (class == ELFCLASS32) \
3162 ((struct elf32_##__xhdr *)arg)->member = value; \
3163 else \
3164 ((struct elf64_##__xhdr *)arg)->member = value; \
3165 } while (0)
3166
3167 #define set_ehdr_property(arg, class, member, value) \
3168 set_xhdr_property(hdr, arg, class, member, value)
3169 #define set_phdr_property(arg, class, member, value) \
3170 set_xhdr_property(phdr, arg, class, member, value)
3171
3172 /* These replace qcom_ramdump driver APIs called from common API
3173 * cnss_do_elf_dump() by the ones defined here.
3174 */
3175 #define qcom_dump_segment cnss_qcom_dump_segment
3176 #define qcom_elf_dump cnss_qcom_elf_dump
3177 #define dump_enabled cnss_dump_enabled
3178
3179 struct cnss_qcom_dump_segment {
3180 struct list_head node;
3181 dma_addr_t da;
3182 void *va;
3183 size_t size;
3184 };
3185
3186 struct cnss_qcom_ramdump_desc {
3187 void *data;
3188 struct completion dump_done;
3189 };
3190
cnss_qcom_devcd_readv(char * buffer,loff_t offset,size_t count,void * data,size_t datalen)3191 static ssize_t cnss_qcom_devcd_readv(char *buffer, loff_t offset, size_t count,
3192 void *data, size_t datalen)
3193 {
3194 struct cnss_qcom_ramdump_desc *desc = data;
3195
3196 return memory_read_from_buffer(buffer, count, &offset, desc->data,
3197 datalen);
3198 }
3199
cnss_qcom_devcd_freev(void * data)3200 static void cnss_qcom_devcd_freev(void *data)
3201 {
3202 struct cnss_qcom_ramdump_desc *desc = data;
3203
3204 cnss_pr_dbg("Free dump data for dev coredump\n");
3205
3206 complete(&dump_done);
3207 vfree(desc->data);
3208 kfree(desc);
3209 }
3210
cnss_qcom_devcd_dump(struct device * dev,void * data,size_t datalen,gfp_t gfp)3211 static int cnss_qcom_devcd_dump(struct device *dev, void *data, size_t datalen,
3212 gfp_t gfp)
3213 {
3214 struct cnss_qcom_ramdump_desc *desc;
3215 unsigned int timeout = TIMEOUT_SAVE_DUMP_MS;
3216 int ret;
3217
3218 desc = kmalloc(sizeof(*desc), GFP_KERNEL);
3219 if (!desc)
3220 return -ENOMEM;
3221
3222 desc->data = data;
3223 reinit_completion(&dump_done);
3224
3225 dev_coredumpm(dev, NULL, desc, datalen, gfp,
3226 cnss_qcom_devcd_readv, cnss_qcom_devcd_freev);
3227
3228 ret = wait_for_completion_timeout(&dump_done,
3229 msecs_to_jiffies(timeout));
3230 if (!ret)
3231 cnss_pr_err("Timeout waiting (%dms) for saving dump to file system\n",
3232 timeout);
3233
3234 return ret ? 0 : -ETIMEDOUT;
3235 }
3236
3237 /* Since the elf32 and elf64 identification is identical apart from
3238 * the class, use elf32 by default.
3239 */
init_elf_identification(struct elf32_hdr * ehdr,unsigned char class)3240 static void init_elf_identification(struct elf32_hdr *ehdr, unsigned char class)
3241 {
3242 memcpy(ehdr->e_ident, ELFMAG, SELFMAG);
3243 ehdr->e_ident[EI_CLASS] = class;
3244 ehdr->e_ident[EI_DATA] = ELFDATA2LSB;
3245 ehdr->e_ident[EI_VERSION] = EV_CURRENT;
3246 ehdr->e_ident[EI_OSABI] = ELFOSABI_NONE;
3247 }
3248
cnss_qcom_elf_dump(struct list_head * segs,struct device * dev,unsigned char class)3249 int cnss_qcom_elf_dump(struct list_head *segs, struct device *dev,
3250 unsigned char class)
3251 {
3252 struct cnss_qcom_dump_segment *segment;
3253 void *phdr, *ehdr;
3254 size_t data_size, offset;
3255 int phnum = 0;
3256 void *data;
3257 void __iomem *ptr;
3258
3259 if (!segs || list_empty(segs))
3260 return -EINVAL;
3261
3262 data_size = sizeof_elf_hdr(class);
3263 list_for_each_entry(segment, segs, node) {
3264 data_size += sizeof_elf_phdr(class) + segment->size;
3265 phnum++;
3266 }
3267
3268 data = vmalloc(data_size);
3269 if (!data)
3270 return -ENOMEM;
3271
3272 cnss_pr_dbg("Creating ELF file with size %d\n", data_size);
3273
3274 ehdr = data;
3275 memset(ehdr, 0, sizeof_elf_hdr(class));
3276 init_elf_identification(ehdr, class);
3277 set_ehdr_property(ehdr, class, e_type, ET_CORE);
3278 set_ehdr_property(ehdr, class, e_machine, EM_NONE);
3279 set_ehdr_property(ehdr, class, e_version, EV_CURRENT);
3280 set_ehdr_property(ehdr, class, e_phoff, sizeof_elf_hdr(class));
3281 set_ehdr_property(ehdr, class, e_ehsize, sizeof_elf_hdr(class));
3282 set_ehdr_property(ehdr, class, e_phentsize, sizeof_elf_phdr(class));
3283 set_ehdr_property(ehdr, class, e_phnum, phnum);
3284
3285 phdr = data + sizeof_elf_hdr(class);
3286 offset = sizeof_elf_hdr(class) + sizeof_elf_phdr(class) * phnum;
3287 list_for_each_entry(segment, segs, node) {
3288 memset(phdr, 0, sizeof_elf_phdr(class));
3289 set_phdr_property(phdr, class, p_type, PT_LOAD);
3290 set_phdr_property(phdr, class, p_offset, offset);
3291 set_phdr_property(phdr, class, p_vaddr, segment->da);
3292 set_phdr_property(phdr, class, p_paddr, segment->da);
3293 set_phdr_property(phdr, class, p_filesz, segment->size);
3294 set_phdr_property(phdr, class, p_memsz, segment->size);
3295 set_phdr_property(phdr, class, p_flags, PF_R | PF_W | PF_X);
3296 set_phdr_property(phdr, class, p_align, 0);
3297
3298 if (segment->va) {
3299 memcpy(data + offset, segment->va, segment->size);
3300 } else {
3301 ptr = devm_ioremap(dev, segment->da, segment->size);
3302 if (!ptr) {
3303 cnss_pr_err("Invalid coredump segment (%pad, %zu)\n",
3304 &segment->da, segment->size);
3305 memset(data + offset, 0xff, segment->size);
3306 } else {
3307 memcpy_fromio(data + offset, ptr,
3308 segment->size);
3309 }
3310 }
3311
3312 offset += segment->size;
3313 phdr += sizeof_elf_phdr(class);
3314 }
3315
3316 return cnss_qcom_devcd_dump(dev, data, data_size, GFP_KERNEL);
3317 }
3318
3319 /* Saving dump to file system is always needed in this case. */
cnss_dump_enabled(void)3320 static bool cnss_dump_enabled(void)
3321 {
3322 return true;
3323 }
3324 #endif /* CONFIG_QCOM_RAMDUMP */
3325
cnss_do_elf_ramdump(struct cnss_plat_data * plat_priv)3326 int cnss_do_elf_ramdump(struct cnss_plat_data *plat_priv)
3327 {
3328 struct cnss_ramdump_info_v2 *info_v2 = &plat_priv->ramdump_info_v2;
3329 struct cnss_dump_data *dump_data = &info_v2->dump_data;
3330 struct cnss_dump_seg *dump_seg = info_v2->dump_data_vaddr;
3331 struct qcom_dump_segment *seg;
3332 struct cnss_dump_meta_info meta_info = {0};
3333 struct list_head head;
3334 int i, ret = 0;
3335
3336 if (!dump_enabled()) {
3337 cnss_pr_info("Dump collection is not enabled\n");
3338 return ret;
3339 }
3340
3341 INIT_LIST_HEAD(&head);
3342 for (i = 0; i < dump_data->nentries; i++) {
3343 if (dump_seg->type >= CNSS_FW_DUMP_TYPE_MAX) {
3344 cnss_pr_err("Unsupported dump type: %d",
3345 dump_seg->type);
3346 continue;
3347 }
3348
3349 seg = kcalloc(1, sizeof(*seg), GFP_KERNEL);
3350 if (!seg) {
3351 cnss_pr_err("%s: Failed to allocate mem for seg %d\n",
3352 __func__, i);
3353 continue;
3354 }
3355
3356 if (meta_info.entry[dump_seg->type].entry_start == 0) {
3357 meta_info.entry[dump_seg->type].type = dump_seg->type;
3358 meta_info.entry[dump_seg->type].entry_start = i + 1;
3359 }
3360 meta_info.entry[dump_seg->type].entry_num++;
3361 seg->da = dump_seg->address;
3362 seg->va = dump_seg->v_address;
3363 seg->size = dump_seg->size;
3364 list_add_tail(&seg->node, &head);
3365 dump_seg++;
3366 }
3367
3368 seg = kcalloc(1, sizeof(*seg), GFP_KERNEL);
3369 if (!seg) {
3370 cnss_pr_err("%s: Failed to allocate mem for elf ramdump seg\n",
3371 __func__);
3372 goto skip_elf_dump;
3373 }
3374
3375 meta_info.magic = CNSS_RAMDUMP_MAGIC;
3376 meta_info.version = CNSS_RAMDUMP_VERSION;
3377 meta_info.chipset = plat_priv->device_id;
3378 meta_info.total_entries = CNSS_FW_DUMP_TYPE_MAX;
3379 seg->va = &meta_info;
3380 seg->size = sizeof(meta_info);
3381 list_add(&seg->node, &head);
3382
3383 ret = qcom_elf_dump(&head, info_v2->ramdump_dev, ELF_CLASS);
3384
3385 skip_elf_dump:
3386 while (!list_empty(&head)) {
3387 seg = list_first_entry(&head, struct qcom_dump_segment, node);
3388 list_del(&seg->node);
3389 kfree(seg);
3390 }
3391
3392 return ret;
3393 }
3394
3395 #ifdef CONFIG_CNSS2_SSR_DRIVER_DUMP
3396 /**
3397 * cnss_host_ramdump_dev_release() - callback function for device release
3398 * @dev: device to be released
3399 *
3400 * Return: None
3401 */
cnss_host_ramdump_dev_release(struct device * dev)3402 static void cnss_host_ramdump_dev_release(struct device *dev)
3403 {
3404 cnss_pr_dbg("free host ramdump device\n");
3405 kfree(dev);
3406 }
3407
cnss_do_host_ramdump(struct cnss_plat_data * plat_priv,struct cnss_ssr_driver_dump_entry * ssr_entry,size_t num_entries_loaded)3408 int cnss_do_host_ramdump(struct cnss_plat_data *plat_priv,
3409 struct cnss_ssr_driver_dump_entry *ssr_entry,
3410 size_t num_entries_loaded)
3411 {
3412 struct qcom_dump_segment *seg;
3413 struct cnss_host_dump_meta_info meta_info = {0};
3414 struct list_head head;
3415 int dev_ret = 0;
3416 struct device *new_device;
3417 static const char * const wlan_str[] = {
3418 [CNSS_HOST_WLAN_LOGS] = "wlan_logs",
3419 [CNSS_HOST_HTC_CREDIT] = "htc_credit",
3420 [CNSS_HOST_WMI_TX_CMP] = "wmi_tx_cmp",
3421 [CNSS_HOST_WMI_COMMAND_LOG] = "wmi_command_log",
3422 [CNSS_HOST_WMI_EVENT_LOG] = "wmi_event_log",
3423 [CNSS_HOST_WMI_RX_EVENT] = "wmi_rx_event",
3424 [CNSS_HOST_HAL_SOC] = "hal_soc",
3425 [CNSS_HOST_GWLAN_LOGGING] = "gwlan_logging",
3426 [CNSS_HOST_WMI_DEBUG_LOG_INFO] = "wmi_debug_log_info",
3427 [CNSS_HOST_HTC_CREDIT_IDX] = "htc_credit_history_idx",
3428 [CNSS_HOST_HTC_CREDIT_LEN] = "htc_credit_history_length",
3429 [CNSS_HOST_WMI_TX_CMP_IDX] = "wmi_tx_cmp_idx",
3430 [CNSS_HOST_WMI_COMMAND_LOG_IDX] = "wmi_command_log_idx",
3431 [CNSS_HOST_WMI_EVENT_LOG_IDX] = "wmi_event_log_idx",
3432 [CNSS_HOST_WMI_RX_EVENT_IDX] = "wmi_rx_event_idx",
3433 [CNSS_HOST_HIF_CE_DESC_HISTORY_BUFF] = "hif_ce_desc_history_buff",
3434 [CNSS_HOST_HANG_EVENT_DATA] = "hang_event_data",
3435 [CNSS_HOST_CE_DESC_HIST] = "hif_ce_desc_hist",
3436 [CNSS_HOST_CE_COUNT_MAX] = "hif_ce_count_max",
3437 [CNSS_HOST_CE_HISTORY_MAX] = "hif_ce_history_max",
3438 [CNSS_HOST_ONLY_FOR_CRIT_CE] = "hif_ce_only_for_crit",
3439 [CNSS_HOST_HIF_EVENT_HISTORY] = "hif_event_history",
3440 [CNSS_HOST_HIF_EVENT_HIST_MAX] = "hif_event_hist_max",
3441 [CNSS_HOST_DP_WBM_DESC_REL] = "wbm_desc_rel_ring",
3442 [CNSS_HOST_DP_WBM_DESC_REL_HANDLE] = "wbm_desc_rel_ring_handle",
3443 [CNSS_HOST_DP_TCL_CMD] = "tcl_cmd_ring",
3444 [CNSS_HOST_DP_TCL_CMD_HANDLE] = "tcl_cmd_ring_handle",
3445 [CNSS_HOST_DP_TCL_STATUS] = "tcl_status_ring",
3446 [CNSS_HOST_DP_TCL_STATUS_HANDLE] = "tcl_status_ring_handle",
3447 [CNSS_HOST_DP_REO_REINJ] = "reo_reinject_ring",
3448 [CNSS_HOST_DP_REO_REINJ_HANDLE] = "reo_reinject_ring_handle",
3449 [CNSS_HOST_DP_RX_REL] = "rx_rel_ring",
3450 [CNSS_HOST_DP_RX_REL_HANDLE] = "rx_rel_ring_handle",
3451 [CNSS_HOST_DP_REO_EXP] = "reo_exception_ring",
3452 [CNSS_HOST_DP_REO_EXP_HANDLE] = "reo_exception_ring_handle",
3453 [CNSS_HOST_DP_REO_CMD] = "reo_cmd_ring",
3454 [CNSS_HOST_DP_REO_CMD_HANDLE] = "reo_cmd_ring_handle",
3455 [CNSS_HOST_DP_REO_STATUS] = "reo_status_ring",
3456 [CNSS_HOST_DP_REO_STATUS_HANDLE] = "reo_status_ring_handle",
3457 [CNSS_HOST_DP_TCL_DATA_0] = "tcl_data_ring_0",
3458 [CNSS_HOST_DP_TCL_DATA_0_HANDLE] = "tcl_data_ring_0_handle",
3459 [CNSS_HOST_DP_TX_COMP_0] = "tx_comp_ring_0",
3460 [CNSS_HOST_DP_TX_COMP_0_HANDLE] = "tx_comp_ring_0_handle",
3461 [CNSS_HOST_DP_TCL_DATA_1] = "tcl_data_ring_1",
3462 [CNSS_HOST_DP_TCL_DATA_1_HANDLE] = "tcl_data_ring_1_handle",
3463 [CNSS_HOST_DP_TX_COMP_1] = "tx_comp_ring_1",
3464 [CNSS_HOST_DP_TX_COMP_1_HANDLE] = "tx_comp_ring_1_handle",
3465 [CNSS_HOST_DP_TCL_DATA_2] = "tcl_data_ring_2",
3466 [CNSS_HOST_DP_TCL_DATA_2_HANDLE] = "tcl_data_ring_2_handle",
3467 [CNSS_HOST_DP_TX_COMP_2] = "tx_comp_ring_2",
3468 [CNSS_HOST_DP_TX_COMP_2_HANDLE] = "tx_comp_ring_2_handle",
3469 [CNSS_HOST_DP_REO_DST_0] = "reo_dest_ring_0",
3470 [CNSS_HOST_DP_REO_DST_0_HANDLE] = "reo_dest_ring_0_handle",
3471 [CNSS_HOST_DP_REO_DST_1] = "reo_dest_ring_1",
3472 [CNSS_HOST_DP_REO_DST_1_HANDLE] = "reo_dest_ring_1_handle",
3473 [CNSS_HOST_DP_REO_DST_2] = "reo_dest_ring_2",
3474 [CNSS_HOST_DP_REO_DST_2_HANDLE] = "reo_dest_ring_2_handle",
3475 [CNSS_HOST_DP_REO_DST_3] = "reo_dest_ring_3",
3476 [CNSS_HOST_DP_REO_DST_3_HANDLE] = "reo_dest_ring_3_handle",
3477 [CNSS_HOST_DP_REO_DST_4] = "reo_dest_ring_4",
3478 [CNSS_HOST_DP_REO_DST_4_HANDLE] = "reo_dest_ring_4_handle",
3479 [CNSS_HOST_DP_REO_DST_5] = "reo_dest_ring_5",
3480 [CNSS_HOST_DP_REO_DST_5_HANDLE] = "reo_dest_ring_5_handle",
3481 [CNSS_HOST_DP_REO_DST_6] = "reo_dest_ring_6",
3482 [CNSS_HOST_DP_REO_DST_6_HANDLE] = "reo_dest_ring_6_handle",
3483 [CNSS_HOST_DP_REO_DST_7] = "reo_dest_ring_7",
3484 [CNSS_HOST_DP_REO_DST_7_HANDLE] = "reo_dest_ring_7_handle",
3485 [CNSS_HOST_DP_PDEV_0] = "dp_pdev_0",
3486 [CNSS_HOST_DP_WLAN_CFG_CTX] = "wlan_cfg_ctx",
3487 [CNSS_HOST_DP_SOC] = "dp_soc",
3488 [CNSS_HOST_HAL_RX_FST] = "hal_rx_fst",
3489 [CNSS_HOST_DP_FISA] = "dp_fisa",
3490 [CNSS_HOST_DP_FISA_HW_FSE_TABLE] = "dp_fisa_hw_fse_table",
3491 [CNSS_HOST_DP_FISA_SW_FSE_TABLE] = "dp_fisa_sw_fse_table",
3492 [CNSS_HOST_HIF] = "hif",
3493 [CNSS_HOST_QDF_NBUF_HIST] = "qdf_nbuf_history",
3494 [CNSS_HOST_TCL_WBM_MAP] = "tcl_wbm_map_array",
3495 [CNSS_HOST_RX_MAC_BUF_RING_0] = "rx_mac_buf_ring_0",
3496 [CNSS_HOST_RX_MAC_BUF_RING_0_HANDLE] = "rx_mac_buf_ring_0_handle",
3497 [CNSS_HOST_RX_MAC_BUF_RING_1] = "rx_mac_buf_ring_1",
3498 [CNSS_HOST_RX_MAC_BUF_RING_1_HANDLE] = "rx_mac_buf_ring_1_handle",
3499 [CNSS_HOST_RX_REFILL_0] = "rx_refill_buf_ring_0",
3500 [CNSS_HOST_RX_REFILL_0_HANDLE] = "rx_refill_buf_ring_0_handle",
3501 [CNSS_HOST_CE_0] = "ce_0",
3502 [CNSS_HOST_CE_0_SRC_RING] = "ce_0_src_ring",
3503 [CNSS_HOST_CE_0_SRC_RING_CTX] = "ce_0_src_ring_ctx",
3504 [CNSS_HOST_CE_1] = "ce_1",
3505 [CNSS_HOST_CE_1_STATUS_RING] = "ce_1_status_ring",
3506 [CNSS_HOST_CE_1_STATUS_RING_CTX] = "ce_1_status_ring_ctx",
3507 [CNSS_HOST_CE_1_DEST_RING] = "ce_1_dest_ring",
3508 [CNSS_HOST_CE_1_DEST_RING_CTX] = "ce_1_dest_ring_ctx",
3509 [CNSS_HOST_CE_2] = "ce_2",
3510 [CNSS_HOST_CE_2_STATUS_RING] = "ce_2_status_ring",
3511 [CNSS_HOST_CE_2_STATUS_RING_CTX] = "ce_2_status_ring_ctx",
3512 [CNSS_HOST_CE_2_DEST_RING] = "ce_2_dest_ring",
3513 [CNSS_HOST_CE_2_DEST_RING_CTX] = "ce_2_dest_ring_ctx",
3514 [CNSS_HOST_CE_3] = "ce_3",
3515 [CNSS_HOST_CE_3_SRC_RING] = "ce_3_src_ring",
3516 [CNSS_HOST_CE_3_SRC_RING_CTX] = "ce_3_src_ring_ctx",
3517 [CNSS_HOST_CE_4] = "ce_4",
3518 [CNSS_HOST_CE_4_SRC_RING] = "ce_4_src_ring",
3519 [CNSS_HOST_CE_4_SRC_RING_CTX] = "ce_4_src_ring_ctx",
3520 [CNSS_HOST_CE_5] = "ce_5",
3521 [CNSS_HOST_CE_6] = "ce_6",
3522 [CNSS_HOST_CE_7] = "ce_7",
3523 [CNSS_HOST_CE_7_STATUS_RING] = "ce_7_status_ring",
3524 [CNSS_HOST_CE_7_STATUS_RING_CTX] = "ce_7_status_ring_ctx",
3525 [CNSS_HOST_CE_7_DEST_RING] = "ce_7_dest_ring",
3526 [CNSS_HOST_CE_7_DEST_RING_CTX] = "ce_7_dest_ring_ctx",
3527 [CNSS_HOST_CE_8] = "ce_8",
3528 [CNSS_HOST_DP_TCL_DATA_3] = "tcl_data_ring_3",
3529 [CNSS_HOST_DP_TCL_DATA_3_HANDLE] = "tcl_data_ring_3_handle",
3530 [CNSS_HOST_DP_TX_COMP_3] = "tx_comp_ring_3",
3531 [CNSS_HOST_DP_TX_COMP_3_HANDLE] = "tx_comp_ring_3_handle"
3532 };
3533 int i;
3534 int ret = 0;
3535 enum cnss_host_dump_type j;
3536
3537 if (!dump_enabled()) {
3538 cnss_pr_info("Dump collection is not enabled\n");
3539 return ret;
3540 }
3541
3542 new_device = kcalloc(1, sizeof(*new_device), GFP_KERNEL);
3543 if (!new_device) {
3544 cnss_pr_err("Failed to alloc device mem\n");
3545 return -ENOMEM;
3546 }
3547
3548 new_device->release = cnss_host_ramdump_dev_release;
3549 device_initialize(new_device);
3550 dev_set_name(new_device, "wlan_driver");
3551 dev_ret = device_add(new_device);
3552 if (dev_ret) {
3553 cnss_pr_err("Failed to add new device\n");
3554 goto put_device;
3555 }
3556
3557 INIT_LIST_HEAD(&head);
3558 for (i = 0; i < num_entries_loaded; i++) {
3559 /* If region name registered by driver is not present in
3560 * wlan_str. type for that entry will not be set, but entry will
3561 * be added. Which will result in entry type being 0. Currently
3562 * entry type 0 is for wlan_logs, which will result in parsing
3563 * issue for wlan_logs as parsing is done based upon type field.
3564 * So initialize type with -1(Invalid) to avoid such issues.
3565 */
3566 meta_info.entry[i].type = -1;
3567 seg = kcalloc(1, sizeof(*seg), GFP_KERNEL);
3568 if (!seg) {
3569 cnss_pr_err("Failed to alloc seg entry %d\n", i);
3570 continue;
3571 }
3572
3573 seg->va = ssr_entry[i].buffer_pointer;
3574 seg->da = (dma_addr_t)ssr_entry[i].buffer_pointer;
3575 seg->size = ssr_entry[i].buffer_size;
3576
3577 for (j = 0; j < CNSS_HOST_DUMP_TYPE_MAX; j++) {
3578 if (strcmp(ssr_entry[i].region_name, wlan_str[j]) == 0) {
3579 meta_info.entry[i].type = j;
3580 }
3581 }
3582 meta_info.entry[i].entry_start = i + 1;
3583 meta_info.entry[i].entry_num++;
3584
3585 list_add_tail(&seg->node, &head);
3586 }
3587
3588 seg = kcalloc(1, sizeof(*seg), GFP_KERNEL);
3589
3590 if (!seg) {
3591 cnss_pr_err("%s: Failed to allocate mem for host dump seg\n",
3592 __func__);
3593 goto skip_host_dump;
3594 }
3595
3596 meta_info.magic = CNSS_RAMDUMP_MAGIC;
3597 meta_info.version = CNSS_RAMDUMP_VERSION;
3598 meta_info.chipset = plat_priv->device_id;
3599 meta_info.total_entries = num_entries_loaded;
3600 seg->va = &meta_info;
3601 seg->da = (dma_addr_t)&meta_info;
3602 seg->size = sizeof(meta_info);
3603 list_add(&seg->node, &head);
3604
3605 ret = qcom_elf_dump(&head, new_device, ELF_CLASS);
3606
3607 skip_host_dump:
3608 while (!list_empty(&head)) {
3609 seg = list_first_entry(&head, struct qcom_dump_segment, node);
3610 list_del(&seg->node);
3611 kfree(seg);
3612 }
3613 device_del(new_device);
3614 put_device:
3615 put_device(new_device);
3616 cnss_pr_dbg("host ramdump result %d\n", ret);
3617 return ret;
3618 }
3619 #endif
3620 #endif /* CONFIG_MSM_SUBSYSTEM_RESTART */
3621
3622 #if IS_ENABLED(CONFIG_QCOM_MEMORY_DUMP_V2)
cnss_init_dump_entry(struct cnss_plat_data * plat_priv)3623 static int cnss_init_dump_entry(struct cnss_plat_data *plat_priv)
3624 {
3625 struct cnss_ramdump_info *ramdump_info;
3626 struct msm_dump_entry dump_entry;
3627
3628 ramdump_info = &plat_priv->ramdump_info;
3629 ramdump_info->dump_data.addr = ramdump_info->ramdump_pa;
3630 ramdump_info->dump_data.len = ramdump_info->ramdump_size;
3631 ramdump_info->dump_data.version = CNSS_DUMP_FORMAT_VER;
3632 ramdump_info->dump_data.magic = CNSS_DUMP_MAGIC_VER_V2;
3633 strlcpy(ramdump_info->dump_data.name, CNSS_DUMP_NAME,
3634 sizeof(ramdump_info->dump_data.name));
3635 dump_entry.id = MSM_DUMP_DATA_CNSS_WLAN;
3636 dump_entry.addr = virt_to_phys(&ramdump_info->dump_data);
3637
3638 return msm_dump_data_register_nominidump(MSM_DUMP_TABLE_APPS,
3639 &dump_entry);
3640 }
3641
cnss_register_ramdump_v1(struct cnss_plat_data * plat_priv)3642 static int cnss_register_ramdump_v1(struct cnss_plat_data *plat_priv)
3643 {
3644 int ret = 0;
3645 struct device *dev;
3646 struct cnss_ramdump_info *ramdump_info;
3647 u32 ramdump_size = 0;
3648
3649 dev = &plat_priv->plat_dev->dev;
3650 ramdump_info = &plat_priv->ramdump_info;
3651
3652 if (plat_priv->dt_type != CNSS_DTT_MULTIEXCHG) {
3653 /* dt type: legacy or converged */
3654 ret = of_property_read_u32(dev->of_node,
3655 "qcom,wlan-ramdump-dynamic",
3656 &ramdump_size);
3657 } else {
3658 ret = of_property_read_u32(plat_priv->dev_node,
3659 "qcom,wlan-ramdump-dynamic",
3660 &ramdump_size);
3661 }
3662 if (ret == 0) {
3663 ramdump_info->ramdump_va =
3664 dma_alloc_coherent(dev, ramdump_size,
3665 &ramdump_info->ramdump_pa,
3666 GFP_KERNEL);
3667
3668 if (ramdump_info->ramdump_va)
3669 ramdump_info->ramdump_size = ramdump_size;
3670 }
3671
3672 cnss_pr_dbg("ramdump va: %pK, pa: %pa\n",
3673 ramdump_info->ramdump_va, &ramdump_info->ramdump_pa);
3674
3675 if (ramdump_info->ramdump_size == 0) {
3676 cnss_pr_info("Ramdump will not be collected");
3677 goto out;
3678 }
3679
3680 ret = cnss_init_dump_entry(plat_priv);
3681 if (ret) {
3682 cnss_pr_err("Failed to setup dump table, err = %d\n", ret);
3683 goto free_ramdump;
3684 }
3685
3686 ramdump_info->ramdump_dev = cnss_create_ramdump_device(plat_priv);
3687 if (!ramdump_info->ramdump_dev) {
3688 cnss_pr_err("Failed to create ramdump device!");
3689 ret = -ENOMEM;
3690 goto free_ramdump;
3691 }
3692
3693 return 0;
3694 free_ramdump:
3695 dma_free_coherent(dev, ramdump_info->ramdump_size,
3696 ramdump_info->ramdump_va, ramdump_info->ramdump_pa);
3697 out:
3698 return ret;
3699 }
3700
cnss_unregister_ramdump_v1(struct cnss_plat_data * plat_priv)3701 static void cnss_unregister_ramdump_v1(struct cnss_plat_data *plat_priv)
3702 {
3703 struct device *dev;
3704 struct cnss_ramdump_info *ramdump_info;
3705
3706 dev = &plat_priv->plat_dev->dev;
3707 ramdump_info = &plat_priv->ramdump_info;
3708
3709 if (ramdump_info->ramdump_dev)
3710 cnss_destroy_ramdump_device(plat_priv,
3711 ramdump_info->ramdump_dev);
3712
3713 if (ramdump_info->ramdump_va)
3714 dma_free_coherent(dev, ramdump_info->ramdump_size,
3715 ramdump_info->ramdump_va,
3716 ramdump_info->ramdump_pa);
3717 }
3718
3719 /**
3720 * cnss_ignore_dump_data_reg_fail - Ignore Ramdump table register failure
3721 * @ret: Error returned by msm_dump_data_register_nominidump
3722 *
3723 * For Lahaina GKI boot, we dont have support for mem dump feature. So
3724 * ignore failure.
3725 *
3726 * Return: Same given error code if mem dump feature enabled, 0 otherwise
3727 */
cnss_ignore_dump_data_reg_fail(int ret)3728 static int cnss_ignore_dump_data_reg_fail(int ret)
3729 {
3730 return ret;
3731 }
3732
cnss_register_ramdump_v2(struct cnss_plat_data * plat_priv)3733 static int cnss_register_ramdump_v2(struct cnss_plat_data *plat_priv)
3734 {
3735 int ret = 0;
3736 struct cnss_ramdump_info_v2 *info_v2;
3737 struct cnss_dump_data *dump_data;
3738 struct msm_dump_entry dump_entry;
3739 struct device *dev = &plat_priv->plat_dev->dev;
3740 u32 ramdump_size = 0;
3741
3742 info_v2 = &plat_priv->ramdump_info_v2;
3743 dump_data = &info_v2->dump_data;
3744
3745 if (plat_priv->dt_type != CNSS_DTT_MULTIEXCHG) {
3746 /* dt type: legacy or converged */
3747 ret = of_property_read_u32(dev->of_node,
3748 "qcom,wlan-ramdump-dynamic",
3749 &ramdump_size);
3750 } else {
3751 ret = of_property_read_u32(plat_priv->dev_node,
3752 "qcom,wlan-ramdump-dynamic",
3753 &ramdump_size);
3754 }
3755 if (ret == 0)
3756 info_v2->ramdump_size = ramdump_size;
3757
3758 cnss_pr_dbg("Ramdump size 0x%lx\n", info_v2->ramdump_size);
3759
3760 info_v2->dump_data_vaddr = kzalloc(CNSS_DUMP_DESC_SIZE, GFP_KERNEL);
3761 if (!info_v2->dump_data_vaddr)
3762 return -ENOMEM;
3763
3764 dump_data->paddr = virt_to_phys(info_v2->dump_data_vaddr);
3765 dump_data->version = CNSS_DUMP_FORMAT_VER_V2;
3766 dump_data->magic = CNSS_DUMP_MAGIC_VER_V2;
3767 dump_data->seg_version = CNSS_DUMP_SEG_VER;
3768 strlcpy(dump_data->name, CNSS_DUMP_NAME,
3769 sizeof(dump_data->name));
3770 dump_entry.id = MSM_DUMP_DATA_CNSS_WLAN;
3771 dump_entry.addr = virt_to_phys(dump_data);
3772
3773 ret = msm_dump_data_register_nominidump(MSM_DUMP_TABLE_APPS,
3774 &dump_entry);
3775 if (ret) {
3776 ret = cnss_ignore_dump_data_reg_fail(ret);
3777 cnss_pr_err("Failed to setup dump table, %s (%d)\n",
3778 ret ? "Error" : "Ignoring", ret);
3779 goto free_ramdump;
3780 }
3781
3782 info_v2->ramdump_dev = cnss_create_ramdump_device(plat_priv);
3783 if (!info_v2->ramdump_dev) {
3784 cnss_pr_err("Failed to create ramdump device!\n");
3785 ret = -ENOMEM;
3786 goto free_ramdump;
3787 }
3788
3789 return 0;
3790
3791 free_ramdump:
3792 kfree(info_v2->dump_data_vaddr);
3793 info_v2->dump_data_vaddr = NULL;
3794 return ret;
3795 }
3796
cnss_unregister_ramdump_v2(struct cnss_plat_data * plat_priv)3797 static void cnss_unregister_ramdump_v2(struct cnss_plat_data *plat_priv)
3798 {
3799 struct cnss_ramdump_info_v2 *info_v2;
3800
3801 info_v2 = &plat_priv->ramdump_info_v2;
3802
3803 if (info_v2->ramdump_dev)
3804 cnss_destroy_ramdump_device(plat_priv, info_v2->ramdump_dev);
3805
3806 kfree(info_v2->dump_data_vaddr);
3807 info_v2->dump_data_vaddr = NULL;
3808 info_v2->dump_data_valid = false;
3809 }
3810
cnss_register_ramdump(struct cnss_plat_data * plat_priv)3811 int cnss_register_ramdump(struct cnss_plat_data *plat_priv)
3812 {
3813 int ret = 0;
3814
3815 switch (plat_priv->device_id) {
3816 case QCA6174_DEVICE_ID:
3817 ret = cnss_register_ramdump_v1(plat_priv);
3818 break;
3819 case QCA6290_DEVICE_ID:
3820 case QCA6390_DEVICE_ID:
3821 case QCN7605_DEVICE_ID:
3822 case QCA6490_DEVICE_ID:
3823 case KIWI_DEVICE_ID:
3824 case MANGO_DEVICE_ID:
3825 case PEACH_DEVICE_ID:
3826 ret = cnss_register_ramdump_v2(plat_priv);
3827 break;
3828 default:
3829 cnss_pr_err("Unknown device ID: 0x%lx\n", plat_priv->device_id);
3830 ret = -ENODEV;
3831 break;
3832 }
3833 return ret;
3834 }
3835
cnss_unregister_ramdump(struct cnss_plat_data * plat_priv)3836 void cnss_unregister_ramdump(struct cnss_plat_data *plat_priv)
3837 {
3838 switch (plat_priv->device_id) {
3839 case QCA6174_DEVICE_ID:
3840 cnss_unregister_ramdump_v1(plat_priv);
3841 break;
3842 case QCA6290_DEVICE_ID:
3843 case QCA6390_DEVICE_ID:
3844 case QCN7605_DEVICE_ID:
3845 case QCA6490_DEVICE_ID:
3846 case KIWI_DEVICE_ID:
3847 case MANGO_DEVICE_ID:
3848 case PEACH_DEVICE_ID:
3849 cnss_unregister_ramdump_v2(plat_priv);
3850 break;
3851 default:
3852 cnss_pr_err("Unknown device ID: 0x%lx\n", plat_priv->device_id);
3853 break;
3854 }
3855 }
3856 #else
cnss_register_ramdump(struct cnss_plat_data * plat_priv)3857 int cnss_register_ramdump(struct cnss_plat_data *plat_priv)
3858 {
3859 struct cnss_ramdump_info_v2 *info_v2 = &plat_priv->ramdump_info_v2;
3860 struct cnss_dump_data *dump_data = dump_data = &info_v2->dump_data;
3861 struct device *dev = &plat_priv->plat_dev->dev;
3862 u32 ramdump_size = 0;
3863
3864 if (of_property_read_u32(dev->of_node, "qcom,wlan-ramdump-dynamic",
3865 &ramdump_size) == 0)
3866 info_v2->ramdump_size = ramdump_size;
3867
3868 cnss_pr_dbg("Ramdump size 0x%lx\n", info_v2->ramdump_size);
3869
3870 info_v2->dump_data_vaddr = kzalloc(CNSS_DUMP_DESC_SIZE, GFP_KERNEL);
3871 if (!info_v2->dump_data_vaddr)
3872 return -ENOMEM;
3873
3874 dump_data->paddr = virt_to_phys(info_v2->dump_data_vaddr);
3875 dump_data->version = CNSS_DUMP_FORMAT_VER_V2;
3876 dump_data->magic = CNSS_DUMP_MAGIC_VER_V2;
3877 dump_data->seg_version = CNSS_DUMP_SEG_VER;
3878 strlcpy(dump_data->name, CNSS_DUMP_NAME,
3879 sizeof(dump_data->name));
3880
3881 info_v2->ramdump_dev = dev;
3882
3883 return 0;
3884 }
3885
cnss_unregister_ramdump(struct cnss_plat_data * plat_priv)3886 void cnss_unregister_ramdump(struct cnss_plat_data *plat_priv)
3887 {
3888 struct cnss_ramdump_info_v2 *info_v2 = &plat_priv->ramdump_info_v2;
3889
3890 info_v2->ramdump_dev = NULL;
3891 kfree(info_v2->dump_data_vaddr);
3892 info_v2->dump_data_vaddr = NULL;
3893 info_v2->dump_data_valid = false;
3894 }
3895 #endif /* CONFIG_QCOM_MEMORY_DUMP_V2 */
3896
cnss_va_to_pa(struct device * dev,size_t size,void * va,dma_addr_t dma,phys_addr_t * pa,unsigned long attrs)3897 int cnss_va_to_pa(struct device *dev, size_t size, void *va, dma_addr_t dma,
3898 phys_addr_t *pa, unsigned long attrs)
3899 {
3900 struct sg_table sgt;
3901 int ret;
3902
3903 ret = dma_get_sgtable_attrs(dev, &sgt, va, dma, size, attrs);
3904 if (ret) {
3905 cnss_pr_err("Failed to get sgtable for va: 0x%pK, dma: %pa, size: 0x%zx, attrs: 0x%x\n",
3906 va, &dma, size, attrs);
3907 return -EINVAL;
3908 }
3909
3910 *pa = page_to_phys(sg_page(sgt.sgl));
3911 sg_free_table(&sgt);
3912
3913 return 0;
3914 }
3915
3916 #if IS_ENABLED(CONFIG_QCOM_MINIDUMP)
cnss_minidump_add_region(struct cnss_plat_data * plat_priv,enum cnss_fw_dump_type type,int seg_no,void * va,phys_addr_t pa,size_t size)3917 int cnss_minidump_add_region(struct cnss_plat_data *plat_priv,
3918 enum cnss_fw_dump_type type, int seg_no,
3919 void *va, phys_addr_t pa, size_t size)
3920 {
3921 struct md_region md_entry;
3922 int ret;
3923
3924 switch (type) {
3925 case CNSS_FW_IMAGE:
3926 snprintf(md_entry.name, sizeof(md_entry.name), "FBC_%X",
3927 seg_no);
3928 break;
3929 case CNSS_FW_RDDM:
3930 snprintf(md_entry.name, sizeof(md_entry.name), "RDDM_%X",
3931 seg_no);
3932 break;
3933 case CNSS_FW_REMOTE_HEAP:
3934 snprintf(md_entry.name, sizeof(md_entry.name), "RHEAP_%X",
3935 seg_no);
3936 break;
3937 default:
3938 cnss_pr_err("Unknown dump type ID: %d\n", type);
3939 return -EINVAL;
3940 }
3941
3942 md_entry.phys_addr = pa;
3943 md_entry.virt_addr = (uintptr_t)va;
3944 md_entry.size = size;
3945 md_entry.id = MSM_DUMP_DATA_CNSS_WLAN;
3946
3947 cnss_pr_dbg("Mini dump region: %s, va: %pK, pa: %pa, size: 0x%zx\n",
3948 md_entry.name, va, &pa, size);
3949
3950 ret = msm_minidump_add_region(&md_entry);
3951 if (ret < 0)
3952 cnss_pr_err("Failed to add mini dump region, err = %d\n", ret);
3953
3954 return ret;
3955 }
3956
cnss_minidump_remove_region(struct cnss_plat_data * plat_priv,enum cnss_fw_dump_type type,int seg_no,void * va,phys_addr_t pa,size_t size)3957 int cnss_minidump_remove_region(struct cnss_plat_data *plat_priv,
3958 enum cnss_fw_dump_type type, int seg_no,
3959 void *va, phys_addr_t pa, size_t size)
3960 {
3961 struct md_region md_entry;
3962 int ret;
3963
3964 switch (type) {
3965 case CNSS_FW_IMAGE:
3966 snprintf(md_entry.name, sizeof(md_entry.name), "FBC_%X",
3967 seg_no);
3968 break;
3969 case CNSS_FW_RDDM:
3970 snprintf(md_entry.name, sizeof(md_entry.name), "RDDM_%X",
3971 seg_no);
3972 break;
3973 case CNSS_FW_REMOTE_HEAP:
3974 snprintf(md_entry.name, sizeof(md_entry.name), "RHEAP_%X",
3975 seg_no);
3976 break;
3977 default:
3978 cnss_pr_err("Unknown dump type ID: %d\n", type);
3979 return -EINVAL;
3980 }
3981
3982 md_entry.phys_addr = pa;
3983 md_entry.virt_addr = (uintptr_t)va;
3984 md_entry.size = size;
3985 md_entry.id = MSM_DUMP_DATA_CNSS_WLAN;
3986
3987 cnss_pr_vdbg("Remove mini dump region: %s, va: %pK, pa: %pa, size: 0x%zx\n",
3988 md_entry.name, va, &pa, size);
3989
3990 ret = msm_minidump_remove_region(&md_entry);
3991 if (ret)
3992 cnss_pr_err("Failed to remove mini dump region, err = %d\n",
3993 ret);
3994
3995 return ret;
3996 }
3997 #else
cnss_minidump_add_region(struct cnss_plat_data * plat_priv,enum cnss_fw_dump_type type,int seg_no,void * va,phys_addr_t pa,size_t size)3998 int cnss_minidump_add_region(struct cnss_plat_data *plat_priv,
3999 enum cnss_fw_dump_type type, int seg_no,
4000 void *va, phys_addr_t pa, size_t size)
4001 {
4002 char name[MAX_NAME_LEN];
4003
4004 switch (type) {
4005 case CNSS_FW_IMAGE:
4006 snprintf(name, MAX_NAME_LEN, "FBC_%X", seg_no);
4007 break;
4008 case CNSS_FW_RDDM:
4009 snprintf(name, MAX_NAME_LEN, "RDDM_%X", seg_no);
4010 break;
4011 case CNSS_FW_REMOTE_HEAP:
4012 snprintf(name, MAX_NAME_LEN, "RHEAP_%X", seg_no);
4013 break;
4014 default:
4015 cnss_pr_err("Unknown dump type ID: %d\n", type);
4016 return -EINVAL;
4017 }
4018
4019 cnss_pr_dbg("Dump region: %s, va: %pK, pa: %pa, size: 0x%zx\n",
4020 name, va, &pa, size);
4021 return 0;
4022 }
4023
cnss_minidump_remove_region(struct cnss_plat_data * plat_priv,enum cnss_fw_dump_type type,int seg_no,void * va,phys_addr_t pa,size_t size)4024 int cnss_minidump_remove_region(struct cnss_plat_data *plat_priv,
4025 enum cnss_fw_dump_type type, int seg_no,
4026 void *va, phys_addr_t pa, size_t size)
4027 {
4028 return 0;
4029 }
4030 #endif /* CONFIG_QCOM_MINIDUMP */
4031
cnss_request_firmware_direct(struct cnss_plat_data * plat_priv,const struct firmware ** fw_entry,const char * filename)4032 int cnss_request_firmware_direct(struct cnss_plat_data *plat_priv,
4033 const struct firmware **fw_entry,
4034 const char *filename)
4035 {
4036 if (IS_ENABLED(CONFIG_CNSS_REQ_FW_DIRECT))
4037 return request_firmware_direct(fw_entry, filename,
4038 &plat_priv->plat_dev->dev);
4039 else
4040 return firmware_request_nowarn(fw_entry, filename,
4041 &plat_priv->plat_dev->dev);
4042 }
4043
4044 #if IS_ENABLED(CONFIG_INTERCONNECT)
4045 /**
4046 * cnss_register_bus_scale() - Setup interconnect voting data
4047 * @plat_priv: Platform data structure
4048 *
4049 * For different interconnect path configured in device tree setup voting data
4050 * for list of bandwidth requirements.
4051 *
4052 * Result: 0 for success. -EINVAL if not configured
4053 */
cnss_register_bus_scale(struct cnss_plat_data * plat_priv)4054 static int cnss_register_bus_scale(struct cnss_plat_data *plat_priv)
4055 {
4056 int ret = -EINVAL;
4057 u32 idx, i, j, cfg_arr_size, *cfg_arr = NULL;
4058 struct cnss_bus_bw_info *bus_bw_info, *tmp;
4059 struct device *dev = &plat_priv->plat_dev->dev;
4060
4061 INIT_LIST_HEAD(&plat_priv->icc.list_head);
4062 ret = of_property_read_u32(dev->of_node,
4063 "qcom,icc-path-count",
4064 &plat_priv->icc.path_count);
4065 if (ret) {
4066 cnss_pr_dbg("Platform Bus Interconnect path not configured\n");
4067 return 0;
4068 }
4069 ret = of_property_read_u32(plat_priv->plat_dev->dev.of_node,
4070 "qcom,bus-bw-cfg-count",
4071 &plat_priv->icc.bus_bw_cfg_count);
4072 if (ret) {
4073 cnss_pr_err("Failed to get Bus BW Config table size\n");
4074 goto cleanup;
4075 }
4076 cfg_arr_size = plat_priv->icc.path_count *
4077 plat_priv->icc.bus_bw_cfg_count * CNSS_ICC_VOTE_MAX;
4078 cfg_arr = kcalloc(cfg_arr_size, sizeof(*cfg_arr), GFP_KERNEL);
4079 if (!cfg_arr) {
4080 cnss_pr_err("Failed to alloc cfg table mem\n");
4081 ret = -ENOMEM;
4082 goto cleanup;
4083 }
4084
4085 ret = of_property_read_u32_array(plat_priv->plat_dev->dev.of_node,
4086 "qcom,bus-bw-cfg", cfg_arr,
4087 cfg_arr_size);
4088 if (ret) {
4089 cnss_pr_err("Invalid Bus BW Config Table\n");
4090 goto cleanup;
4091 }
4092
4093 cnss_pr_dbg("ICC Path_Count: %d BW_CFG_Count: %d\n",
4094 plat_priv->icc.path_count, plat_priv->icc.bus_bw_cfg_count);
4095
4096 for (idx = 0; idx < plat_priv->icc.path_count; idx++) {
4097 bus_bw_info = devm_kzalloc(dev, sizeof(*bus_bw_info),
4098 GFP_KERNEL);
4099 if (!bus_bw_info) {
4100 ret = -ENOMEM;
4101 goto out;
4102 }
4103 ret = of_property_read_string_index(dev->of_node,
4104 "interconnect-names", idx,
4105 &bus_bw_info->icc_name);
4106 if (ret)
4107 goto out;
4108
4109 bus_bw_info->icc_path =
4110 of_icc_get(&plat_priv->plat_dev->dev,
4111 bus_bw_info->icc_name);
4112
4113 if (IS_ERR(bus_bw_info->icc_path)) {
4114 ret = PTR_ERR(bus_bw_info->icc_path);
4115 if (ret != -EPROBE_DEFER) {
4116 cnss_pr_err("Failed to get Interconnect path for %s. Err: %d\n",
4117 bus_bw_info->icc_name, ret);
4118 goto out;
4119 }
4120 }
4121
4122 bus_bw_info->cfg_table =
4123 devm_kcalloc(dev, plat_priv->icc.bus_bw_cfg_count,
4124 sizeof(*bus_bw_info->cfg_table),
4125 GFP_KERNEL);
4126 if (!bus_bw_info->cfg_table) {
4127 ret = -ENOMEM;
4128 goto out;
4129 }
4130 cnss_pr_dbg("ICC Vote CFG for path: %s\n",
4131 bus_bw_info->icc_name);
4132 for (i = 0, j = (idx * plat_priv->icc.bus_bw_cfg_count *
4133 CNSS_ICC_VOTE_MAX);
4134 i < plat_priv->icc.bus_bw_cfg_count;
4135 i++, j += 2) {
4136 bus_bw_info->cfg_table[i].avg_bw = cfg_arr[j];
4137 bus_bw_info->cfg_table[i].peak_bw = cfg_arr[j + 1];
4138 cnss_pr_dbg("ICC Vote BW: %d avg: %d peak: %d\n",
4139 i, bus_bw_info->cfg_table[i].avg_bw,
4140 bus_bw_info->cfg_table[i].peak_bw);
4141 }
4142 list_add_tail(&bus_bw_info->list,
4143 &plat_priv->icc.list_head);
4144 }
4145 kfree(cfg_arr);
4146 return 0;
4147 out:
4148 list_for_each_entry_safe(bus_bw_info, tmp,
4149 &plat_priv->icc.list_head, list) {
4150 list_del(&bus_bw_info->list);
4151 }
4152 cleanup:
4153 kfree(cfg_arr);
4154 memset(&plat_priv->icc, 0, sizeof(plat_priv->icc));
4155 return ret;
4156 }
4157
cnss_unregister_bus_scale(struct cnss_plat_data * plat_priv)4158 static void cnss_unregister_bus_scale(struct cnss_plat_data *plat_priv)
4159 {
4160 struct cnss_bus_bw_info *bus_bw_info, *tmp;
4161
4162 list_for_each_entry_safe(bus_bw_info, tmp,
4163 &plat_priv->icc.list_head, list) {
4164 list_del(&bus_bw_info->list);
4165 if (bus_bw_info->icc_path)
4166 icc_put(bus_bw_info->icc_path);
4167 }
4168 memset(&plat_priv->icc, 0, sizeof(plat_priv->icc));
4169 }
4170 #else
cnss_register_bus_scale(struct cnss_plat_data * plat_priv)4171 static int cnss_register_bus_scale(struct cnss_plat_data *plat_priv)
4172 {
4173 return 0;
4174 }
4175
cnss_unregister_bus_scale(struct cnss_plat_data * plat_priv)4176 static void cnss_unregister_bus_scale(struct cnss_plat_data *plat_priv) {}
4177 #endif /* CONFIG_INTERCONNECT */
4178
cnss_daemon_connection_update_cb(void * cb_ctx,bool status)4179 void cnss_daemon_connection_update_cb(void *cb_ctx, bool status)
4180 {
4181 struct cnss_plat_data *plat_priv = cb_ctx;
4182
4183 if (!plat_priv) {
4184 cnss_pr_err("%s: Invalid context\n", __func__);
4185 return;
4186 }
4187 if (status) {
4188 cnss_pr_info("CNSS Daemon connected\n");
4189 set_bit(CNSS_DAEMON_CONNECTED, &plat_priv->driver_state);
4190 complete(&plat_priv->daemon_connected);
4191 } else {
4192 cnss_pr_info("CNSS Daemon disconnected\n");
4193 reinit_completion(&plat_priv->daemon_connected);
4194 clear_bit(CNSS_DAEMON_CONNECTED, &plat_priv->driver_state);
4195 }
4196 }
4197
enable_hds_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)4198 static ssize_t enable_hds_store(struct device *dev,
4199 struct device_attribute *attr,
4200 const char *buf, size_t count)
4201 {
4202 struct cnss_plat_data *plat_priv = dev_get_drvdata(dev);
4203 unsigned int enable_hds = 0;
4204
4205 if (!plat_priv)
4206 return -ENODEV;
4207
4208 if (sscanf(buf, "%du", &enable_hds) != 1) {
4209 cnss_pr_err("Invalid enable_hds sysfs command\n");
4210 return -EINVAL;
4211 }
4212
4213 if (enable_hds)
4214 plat_priv->hds_enabled = true;
4215 else
4216 plat_priv->hds_enabled = false;
4217
4218 cnss_pr_dbg("%s HDS file download, count is %zu\n",
4219 plat_priv->hds_enabled ? "Enable" : "Disable", count);
4220
4221 return count;
4222 }
4223
recovery_show(struct device * dev,struct device_attribute * attr,char * buf)4224 static ssize_t recovery_show(struct device *dev,
4225 struct device_attribute *attr,
4226 char *buf)
4227 {
4228 struct cnss_plat_data *plat_priv = dev_get_drvdata(dev);
4229 u32 buf_size = PAGE_SIZE;
4230 u32 curr_len = 0;
4231 u32 buf_written = 0;
4232
4233 if (!plat_priv)
4234 return -ENODEV;
4235
4236 buf_written = scnprintf(buf, buf_size,
4237 "Usage: echo [recovery_bitmap] > /sys/kernel/cnss/recovery\n"
4238 "BIT0 -- wlan fw recovery\n"
4239 "BIT1 -- wlan pcss recovery\n"
4240 "---------------------------------\n");
4241 curr_len += buf_written;
4242
4243 buf_written = scnprintf(buf + curr_len, buf_size - curr_len,
4244 "WLAN recovery %s[%d]\n",
4245 plat_priv->recovery_enabled ? "Enabled" : "Disabled",
4246 plat_priv->recovery_enabled);
4247 curr_len += buf_written;
4248
4249 buf_written = scnprintf(buf + curr_len, buf_size - curr_len,
4250 "WLAN PCSS recovery %s[%d]\n",
4251 plat_priv->recovery_pcss_enabled ? "Enabled" : "Disabled",
4252 plat_priv->recovery_pcss_enabled);
4253 curr_len += buf_written;
4254
4255 /*
4256 * Now size of curr_len is not over page size for sure,
4257 * later if new item or none-fixed size item added, need
4258 * add check to make sure curr_len is not over page size.
4259 */
4260 return curr_len;
4261 }
4262
tme_opt_file_download_show(struct device * dev,struct device_attribute * attr,char * buf)4263 static ssize_t tme_opt_file_download_show(struct device *dev,
4264 struct device_attribute *attr, char *buf)
4265 {
4266 u32 buf_size = PAGE_SIZE;
4267 u32 curr_len = 0;
4268 u32 buf_written = 0;
4269
4270 buf_written = scnprintf(buf, buf_size,
4271 "Usage: echo [file_type] > /sys/kernel/cnss/tme_opt_file_download\n"
4272 "file_type = sec -- For OEM_FUSE file\n"
4273 "file_type = rpr -- For RPR file\n"
4274 "file_type = dpr -- For DPR file\n");
4275
4276 curr_len += buf_written;
4277 return curr_len;
4278 }
4279
time_sync_period_show(struct device * dev,struct device_attribute * attr,char * buf)4280 static ssize_t time_sync_period_show(struct device *dev,
4281 struct device_attribute *attr,
4282 char *buf)
4283 {
4284 struct cnss_plat_data *plat_priv = dev_get_drvdata(dev);
4285
4286 return scnprintf(buf, PAGE_SIZE, "%u ms\n",
4287 plat_priv->ctrl_params.time_sync_period);
4288 }
4289
4290 /**
4291 * cnss_get_min_time_sync_period_by_vote() - Get minimum time sync period
4292 * @plat_priv: Platform data structure
4293 *
4294 * Result: return minimum time sync period present in vote from wlan and sys
4295 */
cnss_get_min_time_sync_period_by_vote(struct cnss_plat_data * plat_priv)4296 uint32_t cnss_get_min_time_sync_period_by_vote(struct cnss_plat_data *plat_priv)
4297 {
4298 unsigned int i, min_time_sync_period = CNSS_TIME_SYNC_PERIOD_INVALID;
4299 unsigned int time_sync_period;
4300
4301 for (i = 0; i < TIME_SYNC_VOTE_MAX; i++) {
4302 time_sync_period = plat_priv->ctrl_params.time_sync_period_vote[i];
4303 if (min_time_sync_period > time_sync_period)
4304 min_time_sync_period = time_sync_period;
4305 }
4306
4307 return min_time_sync_period;
4308 }
4309
time_sync_period_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)4310 static ssize_t time_sync_period_store(struct device *dev,
4311 struct device_attribute *attr,
4312 const char *buf, size_t count)
4313 {
4314 struct cnss_plat_data *plat_priv = dev_get_drvdata(dev);
4315 unsigned int time_sync_period = 0;
4316
4317 if (!plat_priv)
4318 return -ENODEV;
4319
4320 if (sscanf(buf, "%du", &time_sync_period) != 1) {
4321 cnss_pr_err("Invalid time sync sysfs command\n");
4322 return -EINVAL;
4323 }
4324
4325 if (time_sync_period < CNSS_MIN_TIME_SYNC_PERIOD) {
4326 cnss_pr_err("Invalid time sync value\n");
4327 return -EINVAL;
4328 }
4329 plat_priv->ctrl_params.time_sync_period_vote[TIME_SYNC_VOTE_CNSS] =
4330 time_sync_period;
4331 time_sync_period = cnss_get_min_time_sync_period_by_vote(plat_priv);
4332
4333 if (time_sync_period == CNSS_TIME_SYNC_PERIOD_INVALID) {
4334 cnss_pr_err("Invalid min time sync value\n");
4335 return -EINVAL;
4336 }
4337
4338 cnss_bus_update_time_sync_period(plat_priv, time_sync_period);
4339
4340 return count;
4341 }
4342
4343 /**
4344 * cnss_update_time_sync_period() - Set time sync period given by driver
4345 * @dev: device structure
4346 * @time_sync_period: time sync period value
4347 *
4348 * Update time sync period vote of driver and set minimum of time sync period
4349 * from stored vote through wlan and sys config
4350 * Result: return 0 for success, error in case of invalid value and no dev
4351 */
cnss_update_time_sync_period(struct device * dev,uint32_t time_sync_period)4352 int cnss_update_time_sync_period(struct device *dev, uint32_t time_sync_period)
4353 {
4354 struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
4355
4356 if (!plat_priv)
4357 return -ENODEV;
4358
4359 if (time_sync_period < CNSS_MIN_TIME_SYNC_PERIOD) {
4360 cnss_pr_err("Invalid time sync value\n");
4361 return -EINVAL;
4362 }
4363
4364 plat_priv->ctrl_params.time_sync_period_vote[TIME_SYNC_VOTE_WLAN] =
4365 time_sync_period;
4366 time_sync_period = cnss_get_min_time_sync_period_by_vote(plat_priv);
4367
4368 if (time_sync_period == CNSS_TIME_SYNC_PERIOD_INVALID) {
4369 cnss_pr_err("Invalid min time sync value\n");
4370 return -EINVAL;
4371 }
4372
4373 cnss_bus_update_time_sync_period(plat_priv, time_sync_period);
4374 return 0;
4375 }
4376 EXPORT_SYMBOL(cnss_update_time_sync_period);
4377
4378 /**
4379 * cnss_reset_time_sync_period() - Reset time sync period
4380 * @dev: device structure
4381 *
4382 * Update time sync period vote of driver as invalid
4383 * and reset minimum of time sync period from
4384 * stored vote through wlan and sys config
4385 * Result: return 0 for success, error in case of no dev
4386 */
cnss_reset_time_sync_period(struct device * dev)4387 int cnss_reset_time_sync_period(struct device *dev)
4388 {
4389 struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
4390 unsigned int time_sync_period = 0;
4391
4392 if (!plat_priv)
4393 return -ENODEV;
4394
4395 /* Driver vote is set to invalid in case of reset
4396 * In this case, only vote valid to check is sys config
4397 */
4398 plat_priv->ctrl_params.time_sync_period_vote[TIME_SYNC_VOTE_WLAN] =
4399 CNSS_TIME_SYNC_PERIOD_INVALID;
4400 time_sync_period = cnss_get_min_time_sync_period_by_vote(plat_priv);
4401
4402 if (time_sync_period == CNSS_TIME_SYNC_PERIOD_INVALID) {
4403 cnss_pr_err("Invalid min time sync value\n");
4404 return -EINVAL;
4405 }
4406
4407 cnss_bus_update_time_sync_period(plat_priv, time_sync_period);
4408
4409 return 0;
4410 }
4411 EXPORT_SYMBOL(cnss_reset_time_sync_period);
4412
recovery_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)4413 static ssize_t recovery_store(struct device *dev,
4414 struct device_attribute *attr,
4415 const char *buf, size_t count)
4416 {
4417 struct cnss_plat_data *plat_priv = dev_get_drvdata(dev);
4418 unsigned int recovery = 0;
4419
4420 if (!plat_priv)
4421 return -ENODEV;
4422
4423 if (sscanf(buf, "%du", &recovery) != 1) {
4424 cnss_pr_err("Invalid recovery sysfs command\n");
4425 return -EINVAL;
4426 }
4427
4428 plat_priv->recovery_enabled = !!(recovery & CNSS_WLAN_RECOVERY);
4429 plat_priv->recovery_pcss_enabled = !!(recovery & CNSS_PCSS_RECOVERY);
4430
4431 cnss_pr_dbg("%s WLAN recovery, count is %zu\n",
4432 plat_priv->recovery_enabled ? "Enable" : "Disable", count);
4433 cnss_pr_dbg("%s PCSS recovery, count is %zu\n",
4434 plat_priv->recovery_pcss_enabled ? "Enable" : "Disable", count);
4435
4436 cnss_send_subsys_restart_level_msg(plat_priv);
4437 return count;
4438 }
4439
shutdown_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)4440 static ssize_t shutdown_store(struct device *dev,
4441 struct device_attribute *attr,
4442 const char *buf, size_t count)
4443 {
4444 struct cnss_plat_data *plat_priv = dev_get_drvdata(dev);
4445
4446 cnss_pr_dbg("Received shutdown notification\n");
4447 if (plat_priv) {
4448 set_bit(CNSS_IN_REBOOT, &plat_priv->driver_state);
4449 cnss_bus_update_status(plat_priv, CNSS_SYS_REBOOT);
4450 del_timer(&plat_priv->fw_boot_timer);
4451 complete_all(&plat_priv->power_up_complete);
4452 complete_all(&plat_priv->cal_complete);
4453 cnss_pr_dbg("Shutdown notification handled\n");
4454 }
4455
4456 return count;
4457 }
4458
fs_ready_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)4459 static ssize_t fs_ready_store(struct device *dev,
4460 struct device_attribute *attr,
4461 const char *buf, size_t count)
4462 {
4463 int fs_ready = 0;
4464 struct cnss_plat_data *plat_priv = dev_get_drvdata(dev);
4465
4466 if (sscanf(buf, "%du", &fs_ready) != 1)
4467 return -EINVAL;
4468
4469 cnss_pr_dbg("File system is ready, fs_ready is %d, count is %zu\n",
4470 fs_ready, count);
4471
4472 if (!plat_priv) {
4473 cnss_pr_err("plat_priv is NULL\n");
4474 return count;
4475 }
4476
4477 if (test_bit(QMI_BYPASS, &plat_priv->ctrl_params.quirks)) {
4478 cnss_pr_dbg("QMI is bypassed\n");
4479 return count;
4480 }
4481
4482 set_bit(CNSS_FS_READY, &plat_priv->driver_state);
4483 if (fs_ready == FILE_SYSTEM_READY && plat_priv->cbc_enabled) {
4484 cnss_driver_event_post(plat_priv,
4485 CNSS_DRIVER_EVENT_COLD_BOOT_CAL_START,
4486 0, NULL);
4487 }
4488
4489 return count;
4490 }
4491
qdss_trace_start_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)4492 static ssize_t qdss_trace_start_store(struct device *dev,
4493 struct device_attribute *attr,
4494 const char *buf, size_t count)
4495 {
4496 struct cnss_plat_data *plat_priv = dev_get_drvdata(dev);
4497
4498 wlfw_qdss_trace_start(plat_priv);
4499 cnss_pr_dbg("Received QDSS start command\n");
4500 return count;
4501 }
4502
qdss_trace_stop_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)4503 static ssize_t qdss_trace_stop_store(struct device *dev,
4504 struct device_attribute *attr,
4505 const char *buf, size_t count)
4506 {
4507 struct cnss_plat_data *plat_priv = dev_get_drvdata(dev);
4508 u32 option = 0;
4509
4510 if (sscanf(buf, "%du", &option) != 1)
4511 return -EINVAL;
4512
4513 wlfw_qdss_trace_stop(plat_priv, option);
4514 cnss_pr_dbg("Received QDSS stop command\n");
4515 return count;
4516 }
4517
qdss_conf_download_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)4518 static ssize_t qdss_conf_download_store(struct device *dev,
4519 struct device_attribute *attr,
4520 const char *buf, size_t count)
4521 {
4522 struct cnss_plat_data *plat_priv = dev_get_drvdata(dev);
4523
4524 cnss_wlfw_qdss_dnld_send_sync(plat_priv);
4525 cnss_pr_dbg("Received QDSS download config command\n");
4526 return count;
4527 }
4528
tme_opt_file_download_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)4529 static ssize_t tme_opt_file_download_store(struct device *dev,
4530 struct device_attribute *attr,
4531 const char *buf, size_t count)
4532 {
4533 struct cnss_plat_data *plat_priv = dev_get_drvdata(dev);
4534 char cmd[5];
4535
4536 if (sscanf(buf, "%s", cmd) != 1)
4537 return -EINVAL;
4538
4539 if (!test_bit(CNSS_FW_READY, &plat_priv->driver_state)) {
4540 cnss_pr_err("Firmware is not ready yet\n");
4541 return 0;
4542 }
4543
4544 if (plat_priv->device_id == PEACH_DEVICE_ID &&
4545 cnss_bus_runtime_pm_get_sync(plat_priv) < 0)
4546 goto runtime_pm_put;
4547
4548 if (strcmp(cmd, "sec") == 0) {
4549 cnss_bus_load_tme_opt_file(plat_priv, WLFW_TME_LITE_OEM_FUSE_FILE_V01);
4550 cnss_wlfw_tme_opt_file_dnld_send_sync(plat_priv, WLFW_TME_LITE_OEM_FUSE_FILE_V01);
4551 } else if (strcmp(cmd, "rpr") == 0) {
4552 cnss_bus_load_tme_opt_file(plat_priv, WLFW_TME_LITE_RPR_FILE_V01);
4553 cnss_wlfw_tme_opt_file_dnld_send_sync(plat_priv, WLFW_TME_LITE_RPR_FILE_V01);
4554 } else if (strcmp(cmd, "dpr") == 0) {
4555 cnss_bus_load_tme_opt_file(plat_priv, WLFW_TME_LITE_DPR_FILE_V01);
4556 cnss_wlfw_tme_opt_file_dnld_send_sync(plat_priv, WLFW_TME_LITE_DPR_FILE_V01);
4557 }
4558
4559 cnss_pr_dbg("Received tme_opt_file_download indication cmd: %s\n", cmd);
4560
4561 runtime_pm_put:
4562 if (plat_priv->device_id == PEACH_DEVICE_ID)
4563 cnss_bus_runtime_pm_put(plat_priv);
4564 return count;
4565 }
4566
hw_trace_override_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)4567 static ssize_t hw_trace_override_store(struct device *dev,
4568 struct device_attribute *attr,
4569 const char *buf, size_t count)
4570 {
4571 struct cnss_plat_data *plat_priv = dev_get_drvdata(dev);
4572 int tmp = 0;
4573
4574 if (sscanf(buf, "%du", &tmp) != 1)
4575 return -EINVAL;
4576
4577 plat_priv->hw_trc_override = tmp;
4578 cnss_pr_dbg("Received QDSS hw_trc_override indication\n");
4579 return count;
4580 }
4581
charger_mode_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)4582 static ssize_t charger_mode_store(struct device *dev,
4583 struct device_attribute *attr,
4584 const char *buf, size_t count)
4585 {
4586 struct cnss_plat_data *plat_priv = dev_get_drvdata(dev);
4587 int tmp = 0;
4588
4589 if (sscanf(buf, "%du", &tmp) != 1)
4590 return -EINVAL;
4591
4592 plat_priv->charger_mode = tmp;
4593 cnss_pr_dbg("Received Charger Mode: %d\n", tmp);
4594 return count;
4595 }
4596
4597 static DEVICE_ATTR_WO(fs_ready);
4598 static DEVICE_ATTR_WO(shutdown);
4599 static DEVICE_ATTR_RW(recovery);
4600 static DEVICE_ATTR_WO(enable_hds);
4601 static DEVICE_ATTR_WO(qdss_trace_start);
4602 static DEVICE_ATTR_WO(qdss_trace_stop);
4603 static DEVICE_ATTR_WO(qdss_conf_download);
4604 static DEVICE_ATTR_RW(tme_opt_file_download);
4605 static DEVICE_ATTR_WO(hw_trace_override);
4606 static DEVICE_ATTR_WO(charger_mode);
4607 static DEVICE_ATTR_RW(time_sync_period);
4608
4609 static struct attribute *cnss_attrs[] = {
4610 &dev_attr_fs_ready.attr,
4611 &dev_attr_shutdown.attr,
4612 &dev_attr_recovery.attr,
4613 &dev_attr_enable_hds.attr,
4614 &dev_attr_qdss_trace_start.attr,
4615 &dev_attr_qdss_trace_stop.attr,
4616 &dev_attr_qdss_conf_download.attr,
4617 &dev_attr_tme_opt_file_download.attr,
4618 &dev_attr_hw_trace_override.attr,
4619 &dev_attr_charger_mode.attr,
4620 &dev_attr_time_sync_period.attr,
4621 NULL,
4622 };
4623
4624 static struct attribute_group cnss_attr_group = {
4625 .attrs = cnss_attrs,
4626 };
4627
cnss_create_sysfs_link(struct cnss_plat_data * plat_priv)4628 static int cnss_create_sysfs_link(struct cnss_plat_data *plat_priv)
4629 {
4630 struct device *dev = &plat_priv->plat_dev->dev;
4631 int ret;
4632 char cnss_name[CNSS_FS_NAME_SIZE];
4633 char shutdown_name[32];
4634
4635 if (cnss_is_dual_wlan_enabled()) {
4636 snprintf(cnss_name, CNSS_FS_NAME_SIZE,
4637 CNSS_FS_NAME "_%d", plat_priv->plat_idx);
4638 snprintf(shutdown_name, sizeof(shutdown_name),
4639 "shutdown_wlan_%d", plat_priv->plat_idx);
4640 } else {
4641 snprintf(cnss_name, CNSS_FS_NAME_SIZE, CNSS_FS_NAME);
4642 snprintf(shutdown_name, sizeof(shutdown_name),
4643 "shutdown_wlan");
4644 }
4645
4646 ret = sysfs_create_link(kernel_kobj, &dev->kobj, cnss_name);
4647 if (ret) {
4648 cnss_pr_err("Failed to create cnss link, err = %d\n",
4649 ret);
4650 goto out;
4651 }
4652
4653 /* This is only for backward compatibility. */
4654 ret = sysfs_create_link(kernel_kobj, &dev->kobj, shutdown_name);
4655 if (ret) {
4656 cnss_pr_err("Failed to create shutdown_wlan link, err = %d\n",
4657 ret);
4658 goto rm_cnss_link;
4659 }
4660
4661 return 0;
4662
4663 rm_cnss_link:
4664 sysfs_remove_link(kernel_kobj, cnss_name);
4665 out:
4666 return ret;
4667 }
4668
cnss_remove_sysfs_link(struct cnss_plat_data * plat_priv)4669 static void cnss_remove_sysfs_link(struct cnss_plat_data *plat_priv)
4670 {
4671 char cnss_name[CNSS_FS_NAME_SIZE];
4672 char shutdown_name[32];
4673
4674 if (cnss_is_dual_wlan_enabled()) {
4675 snprintf(cnss_name, CNSS_FS_NAME_SIZE,
4676 CNSS_FS_NAME "_%d", plat_priv->plat_idx);
4677 snprintf(shutdown_name, sizeof(shutdown_name),
4678 "shutdown_wlan_%d", plat_priv->plat_idx);
4679 } else {
4680 snprintf(cnss_name, CNSS_FS_NAME_SIZE, CNSS_FS_NAME);
4681 snprintf(shutdown_name, sizeof(shutdown_name),
4682 "shutdown_wlan");
4683 }
4684
4685 sysfs_remove_link(kernel_kobj, shutdown_name);
4686 sysfs_remove_link(kernel_kobj, cnss_name);
4687 }
4688
cnss_create_sysfs(struct cnss_plat_data * plat_priv)4689 static int cnss_create_sysfs(struct cnss_plat_data *plat_priv)
4690 {
4691 int ret = 0;
4692
4693 ret = devm_device_add_group(&plat_priv->plat_dev->dev,
4694 &cnss_attr_group);
4695 if (ret) {
4696 cnss_pr_err("Failed to create cnss device group, err = %d\n",
4697 ret);
4698 goto out;
4699 }
4700
4701 cnss_create_sysfs_link(plat_priv);
4702
4703 return 0;
4704 out:
4705 return ret;
4706 }
4707
4708 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 2, 0))
4709 union cnss_device_group_devres {
4710 const struct attribute_group *group;
4711 };
4712
devm_cnss_group_remove(struct device * dev,void * res)4713 static void devm_cnss_group_remove(struct device *dev, void *res)
4714 {
4715 union cnss_device_group_devres *devres = res;
4716 const struct attribute_group *group = devres->group;
4717
4718 cnss_pr_dbg("%s: removing group %p\n", __func__, group);
4719 sysfs_remove_group(&dev->kobj, group);
4720 }
4721
devm_cnss_group_match(struct device * dev,void * res,void * data)4722 static int devm_cnss_group_match(struct device *dev, void *res, void *data)
4723 {
4724 return ((union cnss_device_group_devres *)res) == data;
4725 }
4726
cnss_remove_sysfs(struct cnss_plat_data * plat_priv)4727 static void cnss_remove_sysfs(struct cnss_plat_data *plat_priv)
4728 {
4729 cnss_remove_sysfs_link(plat_priv);
4730 WARN_ON(devres_release(&plat_priv->plat_dev->dev,
4731 devm_cnss_group_remove, devm_cnss_group_match,
4732 (void *)&cnss_attr_group));
4733 }
4734 #else
cnss_remove_sysfs(struct cnss_plat_data * plat_priv)4735 static void cnss_remove_sysfs(struct cnss_plat_data *plat_priv)
4736 {
4737 cnss_remove_sysfs_link(plat_priv);
4738 devm_device_remove_group(&plat_priv->plat_dev->dev, &cnss_attr_group);
4739 }
4740 #endif
4741
cnss_event_work_init(struct cnss_plat_data * plat_priv)4742 static int cnss_event_work_init(struct cnss_plat_data *plat_priv)
4743 {
4744 spin_lock_init(&plat_priv->event_lock);
4745 plat_priv->event_wq = alloc_workqueue("cnss_driver_event",
4746 WQ_UNBOUND, 1);
4747 if (!plat_priv->event_wq) {
4748 cnss_pr_err("Failed to create event workqueue!\n");
4749 return -EFAULT;
4750 }
4751
4752 INIT_WORK(&plat_priv->event_work, cnss_driver_event_work);
4753 INIT_LIST_HEAD(&plat_priv->event_list);
4754
4755 return 0;
4756 }
4757
cnss_event_work_deinit(struct cnss_plat_data * plat_priv)4758 static void cnss_event_work_deinit(struct cnss_plat_data *plat_priv)
4759 {
4760 destroy_workqueue(plat_priv->event_wq);
4761 }
4762
cnss_reboot_notifier(struct notifier_block * nb,unsigned long action,void * data)4763 static int cnss_reboot_notifier(struct notifier_block *nb,
4764 unsigned long action,
4765 void *data)
4766 {
4767 struct cnss_plat_data *plat_priv =
4768 container_of(nb, struct cnss_plat_data, reboot_nb);
4769
4770 set_bit(CNSS_IN_REBOOT, &plat_priv->driver_state);
4771 cnss_bus_update_status(plat_priv, CNSS_SYS_REBOOT);
4772 del_timer(&plat_priv->fw_boot_timer);
4773 complete_all(&plat_priv->power_up_complete);
4774 complete_all(&plat_priv->cal_complete);
4775 cnss_pr_dbg("Reboot is in progress with action %d\n", action);
4776
4777 return NOTIFY_DONE;
4778 }
4779
4780 #ifdef CONFIG_CNSS_HW_SECURE_DISABLE
4781 #ifdef CONFIG_CNSS_HW_SECURE_SMEM
cnss_wlan_hw_disable_check(struct cnss_plat_data * plat_priv)4782 int cnss_wlan_hw_disable_check(struct cnss_plat_data *plat_priv)
4783 {
4784 uint32_t *peripheralStateInfo = NULL;
4785 size_t size = 0;
4786
4787 /* Once this flag is set, secure peripheral feature
4788 * will not be supported till next reboot
4789 */
4790 if (plat_priv->sec_peri_feature_disable)
4791 return 0;
4792
4793 peripheralStateInfo = qcom_smem_get(QCOM_SMEM_HOST_ANY, PERISEC_SMEM_ID, &size);
4794 if (IS_ERR_OR_NULL(peripheralStateInfo)) {
4795 if (PTR_ERR(peripheralStateInfo) != -ENOENT &&
4796 PTR_ERR(peripheralStateInfo) != -ENODEV)
4797 CNSS_ASSERT(0);
4798
4799 cnss_pr_dbg("Secure HW feature not enabled. ret = %d\n",
4800 PTR_ERR(peripheralStateInfo));
4801 plat_priv->sec_peri_feature_disable = true;
4802 return 0;
4803 }
4804
4805 cnss_pr_dbg("Secure HW state: %d\n", *peripheralStateInfo);
4806 if ((*peripheralStateInfo >> (HW_WIFI_UID - 0x500)) & 0x1)
4807 set_bit(CNSS_WLAN_HW_DISABLED,
4808 &plat_priv->driver_state);
4809 else
4810 clear_bit(CNSS_WLAN_HW_DISABLED,
4811 &plat_priv->driver_state);
4812
4813 return 0;
4814 }
4815 #else
cnss_wlan_hw_disable_check(struct cnss_plat_data * plat_priv)4816 int cnss_wlan_hw_disable_check(struct cnss_plat_data *plat_priv)
4817 {
4818 struct Object client_env;
4819 struct Object app_object;
4820 u32 wifi_uid = HW_WIFI_UID;
4821 union ObjectArg obj_arg[2] = {{{0, 0}}};
4822 int ret;
4823 u8 state = 0;
4824
4825 /* Once this flag is set, secure peripheral feature
4826 * will not be supported till next reboot
4827 */
4828 if (plat_priv->sec_peri_feature_disable)
4829 return 0;
4830
4831 /* get rootObj */
4832 ret = get_client_env_object(&client_env);
4833 if (ret) {
4834 cnss_pr_dbg("Failed to get client_env_object, ret: %d\n", ret);
4835 goto end;
4836 }
4837 ret = IClientEnv_open(client_env, HW_STATE_UID, &app_object);
4838 if (ret) {
4839 cnss_pr_dbg("Failed to get app_object, ret: %d\n", ret);
4840 if (ret == FEATURE_NOT_SUPPORTED) {
4841 ret = 0; /* Do not Assert */
4842 plat_priv->sec_peri_feature_disable = true;
4843 cnss_pr_dbg("Secure HW feature not supported\n");
4844 }
4845 goto exit_release_clientenv;
4846 }
4847
4848 obj_arg[0].b = (struct ObjectBuf) {&wifi_uid, sizeof(u32)};
4849 obj_arg[1].b = (struct ObjectBuf) {&state, sizeof(u8)};
4850 ret = Object_invoke(app_object, HW_OP_GET_STATE, obj_arg,
4851 ObjectCounts_pack(1, 1, 0, 0));
4852
4853 cnss_pr_dbg("SMC invoke ret: %d state: %d\n", ret, state);
4854 if (ret) {
4855 if (ret == PERIPHERAL_NOT_FOUND) {
4856 ret = 0; /* Do not Assert */
4857 plat_priv->sec_peri_feature_disable = true;
4858 cnss_pr_dbg("Secure HW mode is not updated. Peripheral not found\n");
4859 }
4860 goto exit_release_app_obj;
4861 }
4862
4863 if (state == 1)
4864 set_bit(CNSS_WLAN_HW_DISABLED,
4865 &plat_priv->driver_state);
4866 else
4867 clear_bit(CNSS_WLAN_HW_DISABLED,
4868 &plat_priv->driver_state);
4869
4870 exit_release_app_obj:
4871 Object_release(app_object);
4872 exit_release_clientenv:
4873 Object_release(client_env);
4874 end:
4875 if (ret) {
4876 cnss_pr_err("Unable to get HW disable status\n");
4877 CNSS_ASSERT(0);
4878 }
4879 return ret;
4880 }
4881 #endif
4882 #else
cnss_wlan_hw_disable_check(struct cnss_plat_data * plat_priv)4883 int cnss_wlan_hw_disable_check(struct cnss_plat_data *plat_priv)
4884 {
4885 return 0;
4886 }
4887 #endif
4888
4889 #ifdef CONFIG_DISABLE_CNSS_SRAM_DUMP
cnss_sram_dump_init(struct cnss_plat_data * plat_priv)4890 static void cnss_sram_dump_init(struct cnss_plat_data *plat_priv)
4891 {
4892 }
4893 #else
cnss_sram_dump_init(struct cnss_plat_data * plat_priv)4894 static void cnss_sram_dump_init(struct cnss_plat_data *plat_priv)
4895 {
4896 if (plat_priv->device_id == QCA6490_DEVICE_ID &&
4897 cnss_get_host_build_type() == QMI_HOST_BUILD_TYPE_PRIMARY_V01)
4898 plat_priv->sram_dump = kcalloc(SRAM_DUMP_SIZE, 1, GFP_KERNEL);
4899 }
4900 #endif
4901
4902 #if IS_ENABLED(CONFIG_WCNSS_MEM_PRE_ALLOC)
cnss_initialize_mem_pool(unsigned long device_id)4903 static void cnss_initialize_mem_pool(unsigned long device_id)
4904 {
4905 cnss_initialize_prealloc_pool(device_id);
4906 }
cnss_deinitialize_mem_pool(void)4907 static void cnss_deinitialize_mem_pool(void)
4908 {
4909 cnss_deinitialize_prealloc_pool();
4910 }
4911 #else
cnss_initialize_mem_pool(unsigned long device_id)4912 static void cnss_initialize_mem_pool(unsigned long device_id)
4913 {
4914 }
cnss_deinitialize_mem_pool(void)4915 static void cnss_deinitialize_mem_pool(void)
4916 {
4917 }
4918 #endif
4919
cnss_misc_init(struct cnss_plat_data * plat_priv)4920 static int cnss_misc_init(struct cnss_plat_data *plat_priv)
4921 {
4922 int ret;
4923
4924 ret = cnss_init_sol_gpio(plat_priv);
4925 if (ret)
4926 return ret;
4927
4928 timer_setup(&plat_priv->fw_boot_timer,
4929 cnss_bus_fw_boot_timeout_hdlr, 0);
4930
4931 ret = device_init_wakeup(&plat_priv->plat_dev->dev, true);
4932 if (ret)
4933 cnss_pr_err("Failed to init platform device wakeup source, err = %d\n",
4934 ret);
4935
4936 INIT_WORK(&plat_priv->recovery_work, cnss_recovery_work_handler);
4937 init_completion(&plat_priv->power_up_complete);
4938 init_completion(&plat_priv->cal_complete);
4939 init_completion(&plat_priv->rddm_complete);
4940 init_completion(&plat_priv->recovery_complete);
4941 init_completion(&plat_priv->daemon_connected);
4942 mutex_init(&plat_priv->dev_lock);
4943 mutex_init(&plat_priv->driver_ops_lock);
4944
4945 plat_priv->reboot_nb.notifier_call = cnss_reboot_notifier;
4946 ret = register_reboot_notifier(&plat_priv->reboot_nb);
4947 if (ret)
4948 cnss_pr_err("Failed to register reboot notifier, err = %d\n",
4949 ret);
4950
4951 plat_priv->recovery_ws =
4952 wakeup_source_register(&plat_priv->plat_dev->dev,
4953 "CNSS_FW_RECOVERY");
4954 if (!plat_priv->recovery_ws)
4955 cnss_pr_err("Failed to setup FW recovery wake source\n");
4956
4957 ret = cnss_plat_ipc_register(CNSS_PLAT_IPC_DAEMON_QMI_CLIENT_V01,
4958 cnss_daemon_connection_update_cb,
4959 plat_priv);
4960 if (ret)
4961 cnss_pr_err("QMI IPC connection call back register failed, err = %d\n",
4962 ret);
4963
4964 cnss_sram_dump_init(plat_priv);
4965
4966 if (of_property_read_bool(plat_priv->plat_dev->dev.of_node,
4967 "qcom,rc-ep-short-channel"))
4968 cnss_set_feature_list(plat_priv, CNSS_RC_EP_ULTRASHORT_CHANNEL_V01);
4969 if (plat_priv->device_id == PEACH_DEVICE_ID)
4970 cnss_set_feature_list(plat_priv, CNSS_AUX_UC_SUPPORT_V01);
4971
4972 return 0;
4973 }
4974
4975 #ifdef CONFIG_DISABLE_CNSS_SRAM_DUMP
cnss_sram_dump_deinit(struct cnss_plat_data * plat_priv)4976 static void cnss_sram_dump_deinit(struct cnss_plat_data *plat_priv)
4977 {
4978 }
4979 #else
cnss_sram_dump_deinit(struct cnss_plat_data * plat_priv)4980 static void cnss_sram_dump_deinit(struct cnss_plat_data *plat_priv)
4981 {
4982 if (plat_priv->device_id == QCA6490_DEVICE_ID &&
4983 cnss_get_host_build_type() == QMI_HOST_BUILD_TYPE_PRIMARY_V01)
4984 kfree(plat_priv->sram_dump);
4985 }
4986 #endif
4987
cnss_misc_deinit(struct cnss_plat_data * plat_priv)4988 static void cnss_misc_deinit(struct cnss_plat_data *plat_priv)
4989 {
4990 cnss_plat_ipc_unregister(CNSS_PLAT_IPC_DAEMON_QMI_CLIENT_V01,
4991 plat_priv);
4992 complete_all(&plat_priv->recovery_complete);
4993 complete_all(&plat_priv->rddm_complete);
4994 complete_all(&plat_priv->cal_complete);
4995 complete_all(&plat_priv->power_up_complete);
4996 complete_all(&plat_priv->daemon_connected);
4997 device_init_wakeup(&plat_priv->plat_dev->dev, false);
4998 unregister_reboot_notifier(&plat_priv->reboot_nb);
4999 del_timer(&plat_priv->fw_boot_timer);
5000 wakeup_source_unregister(plat_priv->recovery_ws);
5001 cnss_deinit_sol_gpio(plat_priv);
5002 cnss_sram_dump_deinit(plat_priv);
5003 kfree(plat_priv->on_chip_pmic_board_ids);
5004 }
5005
cnss_init_time_sync_period_default(struct cnss_plat_data * plat_priv)5006 static void cnss_init_time_sync_period_default(struct cnss_plat_data *plat_priv)
5007 {
5008 plat_priv->ctrl_params.time_sync_period_vote[TIME_SYNC_VOTE_WLAN] =
5009 CNSS_TIME_SYNC_PERIOD_INVALID;
5010 plat_priv->ctrl_params.time_sync_period_vote[TIME_SYNC_VOTE_CNSS] =
5011 CNSS_TIME_SYNC_PERIOD_DEFAULT;
5012 }
5013
cnss_init_control_params(struct cnss_plat_data * plat_priv)5014 static void cnss_init_control_params(struct cnss_plat_data *plat_priv)
5015 {
5016 plat_priv->ctrl_params.quirks = CNSS_QUIRKS_DEFAULT;
5017
5018 plat_priv->cbc_enabled = !IS_ENABLED(CONFIG_CNSS_EMULATION) &&
5019 of_property_read_bool(plat_priv->plat_dev->dev.of_node,
5020 "qcom,wlan-cbc-enabled");
5021
5022 plat_priv->ctrl_params.mhi_timeout = CNSS_MHI_TIMEOUT_DEFAULT;
5023 plat_priv->ctrl_params.mhi_m2_timeout = CNSS_MHI_M2_TIMEOUT_DEFAULT;
5024 plat_priv->ctrl_params.qmi_timeout = CNSS_QMI_TIMEOUT_DEFAULT;
5025 plat_priv->ctrl_params.bdf_type = CNSS_BDF_TYPE_DEFAULT;
5026 plat_priv->ctrl_params.time_sync_period = CNSS_TIME_SYNC_PERIOD_DEFAULT;
5027 cnss_init_time_sync_period_default(plat_priv);
5028 /* Set adsp_pc_enabled default value to true as ADSP pc is always
5029 * enabled by default
5030 */
5031 plat_priv->adsp_pc_enabled = true;
5032 }
5033
cnss_get_pm_domain_info(struct cnss_plat_data * plat_priv)5034 static void cnss_get_pm_domain_info(struct cnss_plat_data *plat_priv)
5035 {
5036 struct device *dev = &plat_priv->plat_dev->dev;
5037
5038 plat_priv->use_pm_domain =
5039 of_property_read_bool(dev->of_node, "use-pm-domain");
5040
5041 cnss_pr_dbg("use-pm-domain is %d\n", plat_priv->use_pm_domain);
5042 }
5043
cnss_get_wlaon_pwr_ctrl_info(struct cnss_plat_data * plat_priv)5044 static void cnss_get_wlaon_pwr_ctrl_info(struct cnss_plat_data *plat_priv)
5045 {
5046 struct device *dev = &plat_priv->plat_dev->dev;
5047
5048 plat_priv->set_wlaon_pwr_ctrl =
5049 of_property_read_bool(dev->of_node, "qcom,set-wlaon-pwr-ctrl");
5050
5051 cnss_pr_dbg("set_wlaon_pwr_ctrl is %d\n",
5052 plat_priv->set_wlaon_pwr_ctrl);
5053 }
5054
cnss_use_fw_path_with_prefix(struct cnss_plat_data * plat_priv)5055 static bool cnss_use_fw_path_with_prefix(struct cnss_plat_data *plat_priv)
5056 {
5057 return (of_property_read_bool(plat_priv->plat_dev->dev.of_node,
5058 "qcom,converged-dt") ||
5059 of_property_read_bool(plat_priv->plat_dev->dev.of_node,
5060 "qcom,same-dt-multi-dev") ||
5061 of_property_read_bool(plat_priv->plat_dev->dev.of_node,
5062 "qcom,multi-wlan-exchg"));
5063 }
5064
5065 static const struct platform_device_id cnss_platform_id_table[] = {
5066 { .name = "qca6174", .driver_data = QCA6174_DEVICE_ID, },
5067 { .name = "qca6290", .driver_data = QCA6290_DEVICE_ID, },
5068 { .name = "qca6390", .driver_data = QCA6390_DEVICE_ID, },
5069 { .name = "qca6490", .driver_data = QCA6490_DEVICE_ID, },
5070 { .name = "kiwi", .driver_data = KIWI_DEVICE_ID, },
5071 { .name = "mango", .driver_data = MANGO_DEVICE_ID, },
5072 { .name = "peach", .driver_data = PEACH_DEVICE_ID, },
5073 { .name = "qcaconv", .driver_data = 0, },
5074 { },
5075 };
5076
5077 static const struct of_device_id cnss_of_match_table[] = {
5078 {
5079 .compatible = "qcom,cnss",
5080 .data = (void *)&cnss_platform_id_table[0]},
5081 {
5082 .compatible = "qcom,cnss-qca6290",
5083 .data = (void *)&cnss_platform_id_table[1]},
5084 {
5085 .compatible = "qcom,cnss-qca6390",
5086 .data = (void *)&cnss_platform_id_table[2]},
5087 {
5088 .compatible = "qcom,cnss-qca6490",
5089 .data = (void *)&cnss_platform_id_table[3]},
5090 {
5091 .compatible = "qcom,cnss-kiwi",
5092 .data = (void *)&cnss_platform_id_table[4]},
5093 {
5094 .compatible = "qcom,cnss-mango",
5095 .data = (void *)&cnss_platform_id_table[5]},
5096 {
5097 .compatible = "qcom,cnss-peach",
5098 .data = (void *)&cnss_platform_id_table[6]},
5099 {
5100 .compatible = "qcom,cnss-qca-converged",
5101 .data = (void *)&cnss_platform_id_table[7]},
5102 { },
5103 };
5104 MODULE_DEVICE_TABLE(of, cnss_of_match_table);
5105
5106 static inline bool
cnss_use_nv_mac(struct cnss_plat_data * plat_priv)5107 cnss_use_nv_mac(struct cnss_plat_data *plat_priv)
5108 {
5109 return of_property_read_bool(plat_priv->plat_dev->dev.of_node,
5110 "use-nv-mac");
5111 }
5112
cnss_get_dev_cfg_node(struct cnss_plat_data * plat_priv)5113 static int cnss_get_dev_cfg_node(struct cnss_plat_data *plat_priv)
5114 {
5115 struct device_node *child;
5116 u32 id, i;
5117 int id_n, device_identifier_gpio, ret;
5118 u8 gpio_value;
5119
5120
5121 if (plat_priv->dt_type != CNSS_DTT_CONVERGED)
5122 return 0;
5123
5124 /* Parses the wlan_sw_ctrl gpio which is used to identify device */
5125 ret = cnss_get_wlan_sw_ctrl(plat_priv);
5126 if (ret) {
5127 cnss_pr_dbg("Failed to parse wlan_sw_ctrl gpio, error:%d", ret);
5128 return ret;
5129 }
5130
5131 device_identifier_gpio = plat_priv->pinctrl_info.wlan_sw_ctrl_gpio;
5132
5133 gpio_value = gpio_get_value(device_identifier_gpio);
5134 cnss_pr_dbg("Value of Device Identifier GPIO: %d\n", gpio_value);
5135
5136 for_each_available_child_of_node(plat_priv->plat_dev->dev.of_node,
5137 child) {
5138 if (strcmp(child->name, "chip_cfg"))
5139 continue;
5140
5141 id_n = of_property_count_u32_elems(child, "supported-ids");
5142 if (id_n <= 0) {
5143 cnss_pr_err("Device id is NOT set\n");
5144 return -EINVAL;
5145 }
5146
5147 for (i = 0; i < id_n; i++) {
5148 ret = of_property_read_u32_index(child,
5149 "supported-ids",
5150 i, &id);
5151 if (ret) {
5152 cnss_pr_err("Failed to read supported ids\n");
5153 return -EINVAL;
5154 }
5155
5156 if (gpio_value && id == QCA6490_DEVICE_ID) {
5157 plat_priv->plat_dev->dev.of_node = child;
5158 plat_priv->device_id = QCA6490_DEVICE_ID;
5159 cnss_utils_update_device_type(CNSS_HSP_DEVICE_TYPE);
5160 cnss_pr_dbg("got node[%s@%d] for device[0x%x]\n",
5161 child->name, i, id);
5162 return 0;
5163 } else if (!gpio_value && id == KIWI_DEVICE_ID) {
5164 plat_priv->plat_dev->dev.of_node = child;
5165 plat_priv->device_id = KIWI_DEVICE_ID;
5166 cnss_utils_update_device_type(CNSS_HMT_DEVICE_TYPE);
5167 cnss_pr_dbg("got node[%s@%d] for device[0x%x]\n",
5168 child->name, i, id);
5169 return 0;
5170 }
5171 }
5172 }
5173
5174 return -EINVAL;
5175 }
5176
5177 static inline u32
cnss_dt_type(struct cnss_plat_data * plat_priv)5178 cnss_dt_type(struct cnss_plat_data *plat_priv)
5179 {
5180 bool is_converged_dt = of_property_read_bool(
5181 plat_priv->plat_dev->dev.of_node, "qcom,converged-dt");
5182 bool is_multi_wlan_xchg;
5183
5184 if (is_converged_dt)
5185 return CNSS_DTT_CONVERGED;
5186
5187 is_multi_wlan_xchg = of_property_read_bool(
5188 plat_priv->plat_dev->dev.of_node, "qcom,multi-wlan-exchg");
5189
5190 if (is_multi_wlan_xchg)
5191 return CNSS_DTT_MULTIEXCHG;
5192 return CNSS_DTT_LEGACY;
5193 }
5194
cnss_wlan_device_init(struct cnss_plat_data * plat_priv)5195 static int cnss_wlan_device_init(struct cnss_plat_data *plat_priv)
5196 {
5197 int ret = 0;
5198 int retry = 0;
5199
5200 if (test_bit(SKIP_DEVICE_BOOT, &plat_priv->ctrl_params.quirks))
5201 return 0;
5202
5203 retry:
5204 ret = cnss_power_on_device(plat_priv, true);
5205 if (ret)
5206 goto end;
5207
5208 ret = cnss_bus_init(plat_priv);
5209 if (ret) {
5210 if ((ret != -EPROBE_DEFER) &&
5211 retry++ < POWER_ON_RETRY_MAX_TIMES) {
5212 cnss_power_off_device(plat_priv);
5213 cnss_pr_dbg("Retry cnss_bus_init #%d\n", retry);
5214 msleep(POWER_ON_RETRY_DELAY_MS * retry);
5215 goto retry;
5216 }
5217 goto power_off;
5218 }
5219 return 0;
5220
5221 power_off:
5222 cnss_power_off_device(plat_priv);
5223 end:
5224 return ret;
5225 }
5226
cnss_wlan_hw_enable(void)5227 int cnss_wlan_hw_enable(void)
5228 {
5229 struct cnss_plat_data *plat_priv;
5230 int ret = 0;
5231
5232 if (cnss_is_dual_wlan_enabled())
5233 plat_priv = cnss_get_first_plat_priv(NULL);
5234 else
5235 plat_priv = cnss_get_plat_priv(NULL);
5236
5237 if (!plat_priv)
5238 return -ENODEV;
5239
5240 clear_bit(CNSS_WLAN_HW_DISABLED, &plat_priv->driver_state);
5241
5242 if (test_bit(CNSS_PCI_PROBE_DONE, &plat_priv->driver_state))
5243 goto register_driver;
5244 ret = cnss_wlan_device_init(plat_priv);
5245 if (ret) {
5246 if (!test_bit(CNSS_WLAN_HW_DISABLED, &plat_priv->driver_state))
5247 CNSS_ASSERT(0);
5248 return ret;
5249 }
5250
5251 if (test_bit(CNSS_FS_READY, &plat_priv->driver_state))
5252 cnss_driver_event_post(plat_priv,
5253 CNSS_DRIVER_EVENT_COLD_BOOT_CAL_START,
5254 0, NULL);
5255
5256 register_driver:
5257 if (plat_priv->driver_ops)
5258 ret = cnss_wlan_register_driver(plat_priv->driver_ops);
5259
5260 return ret;
5261 }
5262 EXPORT_SYMBOL(cnss_wlan_hw_enable);
5263
cnss_set_wfc_mode(struct device * dev,struct cnss_wfc_cfg cfg)5264 int cnss_set_wfc_mode(struct device *dev, struct cnss_wfc_cfg cfg)
5265 {
5266 struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
5267 int ret = 0;
5268
5269 if (!plat_priv)
5270 return -ENODEV;
5271
5272 /* If IMS server is connected, return success without QMI send */
5273 if (test_bit(CNSS_IMS_CONNECTED, &plat_priv->driver_state)) {
5274 cnss_pr_dbg("Ignore host request as IMS server is connected");
5275 return ret;
5276 }
5277
5278 ret = cnss_wlfw_send_host_wfc_call_status(plat_priv, cfg);
5279
5280 return ret;
5281 }
5282 EXPORT_SYMBOL(cnss_set_wfc_mode);
5283
cnss_tcdev_get_max_state(struct thermal_cooling_device * tcdev,unsigned long * thermal_state)5284 static int cnss_tcdev_get_max_state(struct thermal_cooling_device *tcdev,
5285 unsigned long *thermal_state)
5286 {
5287 struct cnss_thermal_cdev *cnss_tcdev = NULL;
5288
5289 if (!tcdev || !tcdev->devdata) {
5290 cnss_pr_err("tcdev or tcdev->devdata is null!\n");
5291 return -EINVAL;
5292 }
5293
5294 cnss_tcdev = tcdev->devdata;
5295 *thermal_state = cnss_tcdev->max_thermal_state;
5296
5297 return 0;
5298 }
5299
cnss_tcdev_get_cur_state(struct thermal_cooling_device * tcdev,unsigned long * thermal_state)5300 static int cnss_tcdev_get_cur_state(struct thermal_cooling_device *tcdev,
5301 unsigned long *thermal_state)
5302 {
5303 struct cnss_thermal_cdev *cnss_tcdev = NULL;
5304
5305 if (!tcdev || !tcdev->devdata) {
5306 cnss_pr_err("tcdev or tcdev->devdata is null!\n");
5307 return -EINVAL;
5308 }
5309
5310 cnss_tcdev = tcdev->devdata;
5311 *thermal_state = cnss_tcdev->curr_thermal_state;
5312
5313 return 0;
5314 }
5315
cnss_tcdev_set_cur_state(struct thermal_cooling_device * tcdev,unsigned long thermal_state)5316 static int cnss_tcdev_set_cur_state(struct thermal_cooling_device *tcdev,
5317 unsigned long thermal_state)
5318 {
5319 struct cnss_thermal_cdev *cnss_tcdev = NULL;
5320 struct cnss_plat_data *plat_priv = cnss_get_plat_priv(NULL);
5321 int ret = 0;
5322
5323 if (!tcdev || !tcdev->devdata) {
5324 cnss_pr_err("tcdev or tcdev->devdata is null!\n");
5325 return -EINVAL;
5326 }
5327
5328 cnss_tcdev = tcdev->devdata;
5329
5330 if (thermal_state > cnss_tcdev->max_thermal_state)
5331 return -EINVAL;
5332
5333 cnss_pr_vdbg("Cooling device set current state: %ld,for cdev id %d",
5334 thermal_state, cnss_tcdev->tcdev_id);
5335
5336 mutex_lock(&plat_priv->tcdev_lock);
5337 ret = cnss_bus_set_therm_cdev_state(plat_priv,
5338 thermal_state,
5339 cnss_tcdev->tcdev_id);
5340 if (!ret)
5341 cnss_tcdev->curr_thermal_state = thermal_state;
5342 mutex_unlock(&plat_priv->tcdev_lock);
5343 if (ret) {
5344 cnss_pr_err("Setting Current Thermal State Failed: %d,for cdev id %d",
5345 ret, cnss_tcdev->tcdev_id);
5346 return ret;
5347 }
5348
5349 return 0;
5350 }
5351
5352 static struct thermal_cooling_device_ops cnss_cooling_ops = {
5353 .get_max_state = cnss_tcdev_get_max_state,
5354 .get_cur_state = cnss_tcdev_get_cur_state,
5355 .set_cur_state = cnss_tcdev_set_cur_state,
5356 };
5357
cnss_thermal_cdev_register(struct device * dev,unsigned long max_state,int tcdev_id)5358 int cnss_thermal_cdev_register(struct device *dev, unsigned long max_state,
5359 int tcdev_id)
5360 {
5361 struct cnss_plat_data *priv = cnss_get_plat_priv(NULL);
5362 struct cnss_thermal_cdev *cnss_tcdev = NULL;
5363 char cdev_node_name[THERMAL_NAME_LENGTH] = "";
5364 struct device_node *dev_node;
5365 int ret = 0;
5366
5367 if (!priv) {
5368 cnss_pr_err("Platform driver is not initialized!\n");
5369 return -ENODEV;
5370 }
5371
5372 cnss_tcdev = kzalloc(sizeof(*cnss_tcdev), GFP_KERNEL);
5373 if (!cnss_tcdev) {
5374 cnss_pr_err("Failed to allocate cnss_tcdev object!\n");
5375 return -ENOMEM;
5376 }
5377
5378 cnss_tcdev->tcdev_id = tcdev_id;
5379 cnss_tcdev->max_thermal_state = max_state;
5380
5381 snprintf(cdev_node_name, THERMAL_NAME_LENGTH,
5382 "qcom,cnss_cdev%d", tcdev_id);
5383
5384 dev_node = of_find_node_by_name(NULL, cdev_node_name);
5385 if (!dev_node) {
5386 cnss_pr_err("Failed to get cooling device node\n");
5387 kfree(cnss_tcdev);
5388 return -EINVAL;
5389 }
5390
5391 cnss_pr_dbg("tcdev node->name=%s\n", dev_node->name);
5392
5393 if (of_find_property(dev_node, "#cooling-cells", NULL)) {
5394 cnss_tcdev->tcdev = thermal_of_cooling_device_register(dev_node,
5395 cdev_node_name,
5396 cnss_tcdev,
5397 &cnss_cooling_ops);
5398 if (IS_ERR_OR_NULL(cnss_tcdev->tcdev)) {
5399 ret = PTR_ERR(cnss_tcdev->tcdev);
5400 cnss_pr_err("Cooling device register failed: %d, for cdev id %d\n",
5401 ret, cnss_tcdev->tcdev_id);
5402 kfree(cnss_tcdev);
5403 } else {
5404 cnss_pr_dbg("Cooling device registered for cdev id %d",
5405 cnss_tcdev->tcdev_id);
5406 mutex_lock(&priv->tcdev_lock);
5407 list_add(&cnss_tcdev->tcdev_list,
5408 &priv->cnss_tcdev_list);
5409 mutex_unlock(&priv->tcdev_lock);
5410 }
5411 } else {
5412 cnss_pr_dbg("Cooling device registration not supported");
5413 kfree(cnss_tcdev);
5414 ret = -EOPNOTSUPP;
5415 }
5416
5417 return ret;
5418 }
5419 EXPORT_SYMBOL(cnss_thermal_cdev_register);
5420
cnss_thermal_cdev_unregister(struct device * dev,int tcdev_id)5421 void cnss_thermal_cdev_unregister(struct device *dev, int tcdev_id)
5422 {
5423 struct cnss_plat_data *priv = cnss_get_plat_priv(NULL);
5424 struct cnss_thermal_cdev *cnss_tcdev = NULL;
5425
5426 if (!priv) {
5427 cnss_pr_err("Platform driver is not initialized!\n");
5428 return;
5429 }
5430
5431 mutex_lock(&priv->tcdev_lock);
5432 while (!list_empty(&priv->cnss_tcdev_list)) {
5433 cnss_tcdev = list_first_entry(&priv->cnss_tcdev_list,
5434 struct cnss_thermal_cdev,
5435 tcdev_list);
5436 thermal_cooling_device_unregister(cnss_tcdev->tcdev);
5437 list_del(&cnss_tcdev->tcdev_list);
5438 kfree(cnss_tcdev);
5439 }
5440 mutex_unlock(&priv->tcdev_lock);
5441 }
5442 EXPORT_SYMBOL(cnss_thermal_cdev_unregister);
5443
cnss_get_curr_therm_cdev_state(struct device * dev,unsigned long * thermal_state,int tcdev_id)5444 int cnss_get_curr_therm_cdev_state(struct device *dev,
5445 unsigned long *thermal_state,
5446 int tcdev_id)
5447 {
5448 struct cnss_plat_data *priv = cnss_get_plat_priv(NULL);
5449 struct cnss_thermal_cdev *cnss_tcdev = NULL;
5450
5451 if (!priv) {
5452 cnss_pr_err("Platform driver is not initialized!\n");
5453 return -ENODEV;
5454 }
5455
5456 mutex_lock(&priv->tcdev_lock);
5457 list_for_each_entry(cnss_tcdev, &priv->cnss_tcdev_list, tcdev_list) {
5458 if (cnss_tcdev->tcdev_id != tcdev_id)
5459 continue;
5460
5461 *thermal_state = cnss_tcdev->curr_thermal_state;
5462 mutex_unlock(&priv->tcdev_lock);
5463 cnss_pr_dbg("Cooling device current state: %ld, for cdev id %d",
5464 cnss_tcdev->curr_thermal_state, tcdev_id);
5465 return 0;
5466 }
5467 mutex_unlock(&priv->tcdev_lock);
5468 cnss_pr_dbg("Cooling device ID not found: %d", tcdev_id);
5469 return -EINVAL;
5470 }
5471 EXPORT_SYMBOL(cnss_get_curr_therm_cdev_state);
5472
cnss_probe(struct platform_device * plat_dev)5473 static int cnss_probe(struct platform_device *plat_dev)
5474 {
5475 int ret = 0;
5476 struct cnss_plat_data *plat_priv;
5477 const struct of_device_id *of_id;
5478 const struct platform_device_id *device_id;
5479
5480 if (cnss_get_plat_priv(plat_dev)) {
5481 cnss_pr_err("Driver is already initialized!\n");
5482 ret = -EEXIST;
5483 goto out;
5484 }
5485
5486 ret = cnss_plat_env_available();
5487 if (ret)
5488 goto out;
5489
5490 of_id = of_match_device(cnss_of_match_table, &plat_dev->dev);
5491 if (!of_id || !of_id->data) {
5492 cnss_pr_err("Failed to find of match device!\n");
5493 ret = -ENODEV;
5494 goto out;
5495 }
5496
5497 device_id = of_id->data;
5498
5499 plat_priv = devm_kzalloc(&plat_dev->dev, sizeof(*plat_priv),
5500 GFP_KERNEL);
5501 if (!plat_priv) {
5502 ret = -ENOMEM;
5503 goto out;
5504 }
5505
5506 plat_priv->plat_dev = plat_dev;
5507 plat_priv->dev_node = NULL;
5508 plat_priv->device_id = device_id->driver_data;
5509 plat_priv->dt_type = cnss_dt_type(plat_priv);
5510 cnss_pr_dbg("Probing platform driver from dt type: %d\n",
5511 plat_priv->dt_type);
5512
5513 plat_priv->use_fw_path_with_prefix =
5514 cnss_use_fw_path_with_prefix(plat_priv);
5515
5516 ret = cnss_get_dev_cfg_node(plat_priv);
5517 if (ret) {
5518 cnss_pr_err("Failed to get device cfg node, err = %d\n", ret);
5519 goto reset_plat_dev;
5520 }
5521
5522 cnss_initialize_mem_pool(plat_priv->device_id);
5523
5524 ret = cnss_get_pld_bus_ops_name(plat_priv);
5525 if (ret)
5526 cnss_pr_vdbg("Failed to find bus ops name, err = %d\n",
5527 ret);
5528
5529 ret = cnss_get_rc_num(plat_priv);
5530
5531 if (ret)
5532 cnss_pr_err("Failed to find PCIe RC number, err = %d\n", ret);
5533
5534 cnss_pr_dbg("rc_num=%d\n", plat_priv->rc_num);
5535
5536 plat_priv->bus_type = cnss_get_bus_type(plat_priv);
5537 plat_priv->use_nv_mac = cnss_use_nv_mac(plat_priv);
5538 cnss_set_plat_priv(plat_dev, plat_priv);
5539 cnss_set_device_name(plat_priv);
5540 platform_set_drvdata(plat_dev, plat_priv);
5541 INIT_LIST_HEAD(&plat_priv->vreg_list);
5542 INIT_LIST_HEAD(&plat_priv->clk_list);
5543
5544 cnss_get_pm_domain_info(plat_priv);
5545 cnss_get_wlaon_pwr_ctrl_info(plat_priv);
5546 cnss_power_misc_params_init(plat_priv);
5547 cnss_get_tcs_info(plat_priv);
5548 cnss_get_cpr_info(plat_priv);
5549 cnss_aop_interface_init(plat_priv);
5550 cnss_init_control_params(plat_priv);
5551
5552 ret = cnss_get_resources(plat_priv);
5553 if (ret)
5554 goto reset_ctx;
5555
5556 ret = cnss_register_esoc(plat_priv);
5557 if (ret)
5558 goto free_res;
5559
5560 ret = cnss_register_bus_scale(plat_priv);
5561 if (ret)
5562 goto unreg_esoc;
5563
5564 ret = cnss_create_sysfs(plat_priv);
5565 if (ret)
5566 goto unreg_bus_scale;
5567
5568 ret = cnss_event_work_init(plat_priv);
5569 if (ret)
5570 goto remove_sysfs;
5571
5572 ret = cnss_dms_init(plat_priv);
5573 if (ret)
5574 goto deinit_event_work;
5575
5576 ret = cnss_debugfs_create(plat_priv);
5577 if (ret)
5578 goto deinit_dms;
5579
5580 ret = cnss_misc_init(plat_priv);
5581 if (ret)
5582 goto destroy_debugfs;
5583
5584 ret = cnss_wlan_hw_disable_check(plat_priv);
5585 if (ret)
5586 goto deinit_misc;
5587
5588 /* Make sure all platform related init are done before
5589 * device power on and bus init.
5590 */
5591 if (!test_bit(CNSS_WLAN_HW_DISABLED, &plat_priv->driver_state)) {
5592 ret = cnss_wlan_device_init(plat_priv);
5593 if (ret)
5594 goto deinit_misc;
5595 } else {
5596 cnss_pr_info("WLAN HW Disabled. Defer PCI enumeration\n");
5597 }
5598 cnss_register_coex_service(plat_priv);
5599 cnss_register_ims_service(plat_priv);
5600
5601 mutex_init(&plat_priv->tcdev_lock);
5602 INIT_LIST_HEAD(&plat_priv->cnss_tcdev_list);
5603
5604 cnss_pr_info("Platform driver probed successfully.\n");
5605
5606 return 0;
5607
5608 deinit_misc:
5609 cnss_misc_deinit(plat_priv);
5610 destroy_debugfs:
5611 cnss_debugfs_destroy(plat_priv);
5612 deinit_dms:
5613 cnss_dms_deinit(plat_priv);
5614 deinit_event_work:
5615 cnss_event_work_deinit(plat_priv);
5616 remove_sysfs:
5617 cnss_remove_sysfs(plat_priv);
5618 unreg_bus_scale:
5619 cnss_unregister_bus_scale(plat_priv);
5620 unreg_esoc:
5621 cnss_unregister_esoc(plat_priv);
5622 free_res:
5623 cnss_put_resources(plat_priv);
5624 reset_ctx:
5625 cnss_aop_interface_deinit(plat_priv);
5626 platform_set_drvdata(plat_dev, NULL);
5627 cnss_deinitialize_mem_pool();
5628 reset_plat_dev:
5629 cnss_clear_plat_priv(plat_priv);
5630 out:
5631 return ret;
5632 }
5633
cnss_remove(struct platform_device * plat_dev)5634 static int cnss_remove(struct platform_device *plat_dev)
5635 {
5636 struct cnss_plat_data *plat_priv = platform_get_drvdata(plat_dev);
5637
5638 plat_priv->audio_iommu_domain = NULL;
5639 cnss_genl_exit();
5640 cnss_unregister_ims_service(plat_priv);
5641 cnss_unregister_coex_service(plat_priv);
5642 cnss_bus_deinit(plat_priv);
5643 cnss_misc_deinit(plat_priv);
5644 cnss_debugfs_destroy(plat_priv);
5645 cnss_dms_deinit(plat_priv);
5646 cnss_qmi_deinit(plat_priv);
5647 cnss_event_work_deinit(plat_priv);
5648 cnss_cancel_dms_work();
5649 cnss_remove_sysfs(plat_priv);
5650 cnss_unregister_bus_scale(plat_priv);
5651 cnss_unregister_esoc(plat_priv);
5652 cnss_put_resources(plat_priv);
5653 cnss_aop_interface_deinit(plat_priv);
5654 cnss_deinitialize_mem_pool();
5655 platform_set_drvdata(plat_dev, NULL);
5656 cnss_clear_plat_priv(plat_priv);
5657
5658 return 0;
5659 }
5660
5661 static struct platform_driver cnss_platform_driver = {
5662 .probe = cnss_probe,
5663 .remove = cnss_remove,
5664 .driver = {
5665 .name = "cnss2",
5666 .of_match_table = cnss_of_match_table,
5667 #ifdef CONFIG_CNSS_ASYNC
5668 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
5669 #endif
5670 },
5671 };
5672
cnss_check_compatible_node(void)5673 static bool cnss_check_compatible_node(void)
5674 {
5675 struct device_node *dn = NULL;
5676
5677 for_each_matching_node(dn, cnss_of_match_table) {
5678 if (of_device_is_available(dn)) {
5679 cnss_allow_driver_loading = true;
5680 return true;
5681 }
5682 }
5683
5684 return false;
5685 }
5686
5687 /**
5688 * cnss_is_valid_dt_node_found - Check if valid device tree node present
5689 *
5690 * Valid device tree node means a node with "compatible" property from the
5691 * device match table and "status" property is not disabled.
5692 *
5693 * Return: true if valid device tree node found, false if not found
5694 */
cnss_is_valid_dt_node_found(void)5695 static bool cnss_is_valid_dt_node_found(void)
5696 {
5697 struct device_node *dn = NULL;
5698
5699 for_each_matching_node(dn, cnss_of_match_table) {
5700 if (of_device_is_available(dn))
5701 break;
5702 }
5703
5704 if (dn)
5705 return true;
5706
5707 return false;
5708 }
5709
cnss_initialize(void)5710 static int __init cnss_initialize(void)
5711 {
5712 int ret = 0;
5713
5714 if (!cnss_is_valid_dt_node_found())
5715 return -ENODEV;
5716
5717 if (!cnss_check_compatible_node())
5718 return ret;
5719
5720 cnss_debug_init();
5721 ret = platform_driver_register(&cnss_platform_driver);
5722 if (ret)
5723 cnss_debug_deinit();
5724
5725 ret = cnss_genl_init();
5726 if (ret < 0)
5727 cnss_pr_err("CNSS genl init failed %d\n", ret);
5728
5729 cnss_init_plat_env_count();
5730 return ret;
5731 }
5732
cnss_exit(void)5733 static void __exit cnss_exit(void)
5734 {
5735 cnss_genl_exit();
5736 platform_driver_unregister(&cnss_platform_driver);
5737 cnss_debug_deinit();
5738 }
5739
5740 module_init(cnss_initialize);
5741 module_exit(cnss_exit);
5742
5743 MODULE_LICENSE("GPL v2");
5744 MODULE_DESCRIPTION("CNSS2 Platform Driver");
5745