1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
4 * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
5 */
6
7 #include <linux/module.h>
8 #include <linux/soc/qcom/qmi.h>
9
10 #include "bus.h"
11 #include "debug.h"
12 #include "main.h"
13 #include "qmi.h"
14 #include "genl.h"
15
16 #define WLFW_SERVICE_INS_ID_V01 1
17 #define WLFW_CLIENT_ID 0x4b4e454c
18 #define BDF_FILE_NAME_PREFIX "bdwlan"
19 #define ELF_BDF_FILE_NAME "bdwlan.elf"
20 #define ELF_BDF_FILE_NAME_GF "bdwlang.elf"
21 #define ELF_BDF_FILE_NAME_PREFIX "bdwlan.e"
22 #define ELF_BDF_FILE_NAME_GF_PREFIX "bdwlang.e"
23 #define BIN_BDF_FILE_NAME "bdwlan.bin"
24 #define BIN_BDF_FILE_NAME_GF "bdwlang.bin"
25 #define BIN_BDF_FILE_NAME_PREFIX "bdwlan.b"
26 #define BIN_BDF_FILE_NAME_GF_PREFIX "bdwlang.b"
27 #define REGDB_FILE_NAME "regdb.bin"
28 #define HDS_FILE_NAME "hds.bin"
29 #define CHIP_ID_GF_MASK 0x10
30
31 #define QDSS_TRACE_CONFIG_FILE "qdss_trace_config"
32 /*
33 * Download QDSS config file based on build type. Add build type string to
34 * file name. Download "qdss_trace_config_debug_v<n>.cfg" for debug build
35 * and "qdss_trace_config_perf_v<n>.cfg" for perf build.
36 */
37 #ifdef CONFIG_CNSS2_DEBUG
38 #define QDSS_FILE_BUILD_STR "debug_"
39 #else
40 #define QDSS_FILE_BUILD_STR "perf_"
41 #endif
42 #define HW_V1_NUMBER "v1"
43 #define HW_V2_NUMBER "v2"
44 #define CE_MSI_NAME "CE"
45
46 #define QMI_WLFW_TIMEOUT_MS (plat_priv->ctrl_params.qmi_timeout)
47 #define QMI_WLFW_TIMEOUT_JF msecs_to_jiffies(QMI_WLFW_TIMEOUT_MS)
48 #define COEX_TIMEOUT QMI_WLFW_TIMEOUT_JF
49 #define IMS_TIMEOUT QMI_WLFW_TIMEOUT_JF
50
51 #define QMI_WLFW_MAX_RECV_BUF_SIZE SZ_8K
52 #define IMSPRIVATE_SERVICE_MAX_MSG_LEN SZ_8K
53 #define DMS_QMI_MAX_MSG_LEN SZ_256
54 #define MAX_SHADOW_REG_RESERVED 2
55 #define MAX_NUM_SHADOW_REG_V3 (QMI_WLFW_MAX_NUM_SHADOW_REG_V3_USAGE_V01 - \
56 MAX_SHADOW_REG_RESERVED)
57
58 #define QMI_WLFW_MAC_READY_TIMEOUT_MS 50
59 #define QMI_WLFW_MAC_READY_MAX_RETRY 200
60
61 // these error values are not defined in <linux/soc/qcom/qmi.h> and fw is sending as error response
62 #define QMI_ERR_HARDWARE_RESTRICTED_V01 0x0053
63 #define QMI_ERR_ENOMEM_V01 0x0002
64
65 enum nm_modem_bit {
66 SLEEP_CLOCK_SELECT_INTERNAL_BIT = BIT(1),
67 HOST_CSTATE_BIT = BIT(2),
68 };
69
70 #ifdef CONFIG_CNSS2_DEBUG
71 static bool ignore_qmi_failure;
72 #define CNSS_QMI_ASSERT() CNSS_ASSERT(ignore_qmi_failure)
cnss_ignore_qmi_failure(bool ignore)73 void cnss_ignore_qmi_failure(bool ignore)
74 {
75 ignore_qmi_failure = ignore;
76 }
77 #else
78 #define CNSS_QMI_ASSERT() do { } while (0)
cnss_ignore_qmi_failure(bool ignore)79 void cnss_ignore_qmi_failure(bool ignore) { }
80 #endif
81
cnss_qmi_mode_to_str(enum cnss_driver_mode mode)82 static char *cnss_qmi_mode_to_str(enum cnss_driver_mode mode)
83 {
84 switch (mode) {
85 case CNSS_MISSION:
86 return "MISSION";
87 case CNSS_FTM:
88 return "FTM";
89 case CNSS_EPPING:
90 return "EPPING";
91 case CNSS_WALTEST:
92 return "WALTEST";
93 case CNSS_OFF:
94 return "OFF";
95 case CNSS_CCPM:
96 return "CCPM";
97 case CNSS_QVIT:
98 return "QVIT";
99 case CNSS_CALIBRATION:
100 return "CALIBRATION";
101 default:
102 return "UNKNOWN";
103 }
104 }
105
qmi_send_wait(struct qmi_handle * qmi,void * req,void * rsp,struct qmi_elem_info * req_ei,struct qmi_elem_info * rsp_ei,int req_id,size_t req_len,unsigned long timeout)106 static int qmi_send_wait(struct qmi_handle *qmi, void *req, void *rsp,
107 struct qmi_elem_info *req_ei,
108 struct qmi_elem_info *rsp_ei,
109 int req_id, size_t req_len,
110 unsigned long timeout)
111 {
112 struct qmi_txn txn;
113 int ret;
114 char *err_msg;
115 struct qmi_response_type_v01 *resp = rsp;
116
117 ret = qmi_txn_init(qmi, &txn, rsp_ei, rsp);
118 if (ret < 0) {
119 err_msg = "Qmi fail: fail to init txn,";
120 goto out;
121 }
122
123 ret = qmi_send_request(qmi, NULL, &txn, req_id,
124 req_len, req_ei, req);
125 if (ret < 0) {
126 qmi_txn_cancel(&txn);
127 err_msg = "Qmi fail: fail to send req,";
128 goto out;
129 }
130
131 ret = qmi_txn_wait(&txn, timeout);
132 if (ret < 0) {
133 err_msg = "Qmi fail: wait timeout,";
134 goto out;
135 } else if (resp->result != QMI_RESULT_SUCCESS_V01) {
136 err_msg = "Qmi fail: request rejected,";
137 cnss_pr_err("Qmi fail: respons with error:%d\n",
138 resp->error);
139 ret = -resp->result;
140 goto out;
141 }
142
143 cnss_pr_dbg("req %x success\n", req_id);
144 return 0;
145 out:
146 cnss_pr_err("%s req %x, ret %d\n", err_msg, req_id, ret);
147 return ret;
148 }
149
cnss_wlfw_ind_register_send_sync(struct cnss_plat_data * plat_priv)150 static int cnss_wlfw_ind_register_send_sync(struct cnss_plat_data *plat_priv)
151 {
152 struct wlfw_ind_register_req_msg_v01 *req;
153 struct wlfw_ind_register_resp_msg_v01 *resp;
154 struct qmi_txn txn;
155 int ret = 0;
156
157 cnss_pr_dbg("Sending indication register message, state: 0x%lx\n",
158 plat_priv->driver_state);
159
160 req = kzalloc(sizeof(*req), GFP_KERNEL);
161 if (!req)
162 return -ENOMEM;
163
164 resp = kzalloc(sizeof(*resp), GFP_KERNEL);
165 if (!resp) {
166 kfree(req);
167 return -ENOMEM;
168 }
169
170 req->client_id_valid = 1;
171 req->client_id = WLFW_CLIENT_ID;
172 req->request_mem_enable_valid = 1;
173 req->request_mem_enable = 1;
174 req->fw_mem_ready_enable_valid = 1;
175 req->fw_mem_ready_enable = 1;
176 /* fw_ready indication is replaced by fw_init_done in HST/HSP */
177 req->fw_init_done_enable_valid = 1;
178 req->fw_init_done_enable = 1;
179 req->pin_connect_result_enable_valid = 1;
180 req->pin_connect_result_enable = 1;
181 req->cal_done_enable_valid = 1;
182 req->cal_done_enable = 1;
183 req->qdss_trace_req_mem_enable_valid = 1;
184 req->qdss_trace_req_mem_enable = 1;
185 req->qdss_trace_save_enable_valid = 1;
186 req->qdss_trace_save_enable = 1;
187 req->qdss_trace_free_enable_valid = 1;
188 req->qdss_trace_free_enable = 1;
189 req->respond_get_info_enable_valid = 1;
190 req->respond_get_info_enable = 1;
191 req->wfc_call_twt_config_enable_valid = 1;
192 req->wfc_call_twt_config_enable = 1;
193 req->async_data_enable_valid = 1;
194 req->async_data_enable = 1;
195
196 ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
197 wlfw_ind_register_resp_msg_v01_ei, resp);
198 if (ret < 0) {
199 cnss_pr_err("Failed to initialize txn for indication register request, err: %d\n",
200 ret);
201 goto out;
202 }
203
204 ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
205 QMI_WLFW_IND_REGISTER_REQ_V01,
206 WLFW_IND_REGISTER_REQ_MSG_V01_MAX_MSG_LEN,
207 wlfw_ind_register_req_msg_v01_ei, req);
208 if (ret < 0) {
209 qmi_txn_cancel(&txn);
210 cnss_pr_err("Failed to send indication register request, err: %d\n",
211 ret);
212 goto out;
213 }
214
215 ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
216 if (ret < 0) {
217 cnss_pr_err("Failed to wait for response of indication register request, err: %d\n",
218 ret);
219 goto out;
220 }
221
222 if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
223 cnss_pr_err("Indication register request failed, result: %d, err: %d\n",
224 resp->resp.result, resp->resp.error);
225 ret = -resp->resp.result;
226 goto out;
227 }
228
229 if (resp->fw_status_valid) {
230 if (resp->fw_status & QMI_WLFW_ALREADY_REGISTERED_V01) {
231 ret = -EALREADY;
232 goto qmi_registered;
233 }
234 }
235
236 kfree(req);
237 kfree(resp);
238 return 0;
239
240 out:
241 CNSS_QMI_ASSERT();
242
243 qmi_registered:
244 kfree(req);
245 kfree(resp);
246 return ret;
247 }
248
cnss_wlfw_host_cap_parse_mlo(struct cnss_plat_data * plat_priv,struct wlfw_host_cap_req_msg_v01 * req)249 static void cnss_wlfw_host_cap_parse_mlo(struct cnss_plat_data *plat_priv,
250 struct wlfw_host_cap_req_msg_v01 *req)
251 {
252 if (plat_priv->device_id == KIWI_DEVICE_ID ||
253 plat_priv->device_id == MANGO_DEVICE_ID ||
254 plat_priv->device_id == PEACH_DEVICE_ID) {
255 req->mlo_capable_valid = 1;
256 req->mlo_capable = 1;
257 req->mlo_chip_id_valid = 1;
258 req->mlo_chip_id = 0;
259 req->mlo_group_id_valid = 1;
260 req->mlo_group_id = 0;
261 req->max_mlo_peer_valid = 1;
262 /* Max peer number generally won't change for the same device
263 * but needs to be synced with host driver.
264 */
265 req->max_mlo_peer = 32;
266 req->mlo_num_chips_valid = 1;
267 req->mlo_num_chips = 1;
268 req->mlo_chip_info_valid = 1;
269 req->mlo_chip_info[0].chip_id = 0;
270 req->mlo_chip_info[0].num_local_links = 2;
271 req->mlo_chip_info[0].hw_link_id[0] = 0;
272 req->mlo_chip_info[0].hw_link_id[1] = 1;
273 req->mlo_chip_info[0].valid_mlo_link_id[0] = 1;
274 req->mlo_chip_info[0].valid_mlo_link_id[1] = 1;
275 }
276 }
277
cnss_wlfw_host_cap_send_sync(struct cnss_plat_data * plat_priv)278 static int cnss_wlfw_host_cap_send_sync(struct cnss_plat_data *plat_priv)
279 {
280 struct wlfw_host_cap_req_msg_v01 *req;
281 struct wlfw_host_cap_resp_msg_v01 *resp;
282 struct qmi_txn txn;
283 int ret = 0;
284 u64 iova_start = 0, iova_size = 0,
285 iova_ipa_start = 0, iova_ipa_size = 0;
286 u64 feature_list = 0;
287
288 cnss_pr_dbg("Sending host capability message, state: 0x%lx\n",
289 plat_priv->driver_state);
290
291 req = kzalloc(sizeof(*req), GFP_KERNEL);
292 if (!req)
293 return -ENOMEM;
294
295 resp = kzalloc(sizeof(*resp), GFP_KERNEL);
296 if (!resp) {
297 kfree(req);
298 return -ENOMEM;
299 }
300
301 req->num_clients_valid = 1;
302 req->num_clients = 1;
303 cnss_pr_dbg("Number of clients is %d\n", req->num_clients);
304
305 req->wake_msi = cnss_bus_get_wake_irq(plat_priv);
306 if (req->wake_msi) {
307 cnss_pr_dbg("WAKE MSI base data is %d\n", req->wake_msi);
308 req->wake_msi_valid = 1;
309 }
310
311 req->bdf_support_valid = 1;
312 req->bdf_support = 1;
313
314 req->m3_support_valid = 1;
315 req->m3_support = 1;
316
317 req->m3_cache_support_valid = 1;
318 req->m3_cache_support = 1;
319
320 req->cal_done_valid = 1;
321 req->cal_done = plat_priv->cal_done;
322 cnss_pr_dbg("Calibration done is %d\n", plat_priv->cal_done);
323
324 if (plat_priv->sleep_clk) {
325 req->nm_modem_valid = 1;
326 /* Notify firmware about the sleep clock selection,
327 * nm_modem_bit[1] is used for this purpose.
328 */
329 req->nm_modem |= SLEEP_CLOCK_SELECT_INTERNAL_BIT;
330 }
331
332 if (plat_priv->supported_link_speed) {
333 req->pcie_link_info_valid = 1;
334 req->pcie_link_info.pci_link_speed =
335 plat_priv->supported_link_speed;
336 cnss_pr_dbg("Supported link speed in Host Cap %d\n",
337 plat_priv->supported_link_speed);
338 }
339
340 if (cnss_bus_is_smmu_s1_enabled(plat_priv) &&
341 !cnss_bus_get_iova(plat_priv, &iova_start, &iova_size) &&
342 !cnss_bus_get_iova_ipa(plat_priv, &iova_ipa_start,
343 &iova_ipa_size)) {
344 req->ddr_range_valid = 1;
345 req->ddr_range[0].start = iova_start;
346 req->ddr_range[0].size = iova_size + iova_ipa_size;
347 cnss_pr_dbg("Sending iova starting 0x%llx with size 0x%llx\n",
348 req->ddr_range[0].start, req->ddr_range[0].size);
349 }
350
351 req->host_build_type_valid = 1;
352 req->host_build_type = cnss_get_host_build_type();
353
354 cnss_wlfw_host_cap_parse_mlo(plat_priv, req);
355
356 ret = cnss_get_feature_list(plat_priv, &feature_list);
357 if (!ret) {
358 req->feature_list_valid = 1;
359 req->feature_list = feature_list;
360 cnss_pr_dbg("Sending feature list 0x%llx\n",
361 req->feature_list);
362 }
363
364 if (cnss_get_platform_name(plat_priv, req->platform_name,
365 QMI_WLFW_MAX_PLATFORM_NAME_LEN_V01))
366 req->platform_name_valid = 1;
367
368 ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
369 wlfw_host_cap_resp_msg_v01_ei, resp);
370 if (ret < 0) {
371 cnss_pr_err("Failed to initialize txn for host capability request, err: %d\n",
372 ret);
373 goto out;
374 }
375
376 ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
377 QMI_WLFW_HOST_CAP_REQ_V01,
378 WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN,
379 wlfw_host_cap_req_msg_v01_ei, req);
380 if (ret < 0) {
381 qmi_txn_cancel(&txn);
382 cnss_pr_err("Failed to send host capability request, err: %d\n",
383 ret);
384 goto out;
385 }
386
387 ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
388 if (ret < 0) {
389 cnss_pr_err("Failed to wait for response of host capability request, err: %d\n",
390 ret);
391 goto out;
392 }
393
394 if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
395 cnss_pr_err("Host capability request failed, result: %d, err: %d\n",
396 resp->resp.result, resp->resp.error);
397 ret = -resp->resp.result;
398 goto out;
399 }
400
401 kfree(req);
402 kfree(resp);
403 return 0;
404
405 out:
406 CNSS_QMI_ASSERT();
407 kfree(req);
408 kfree(resp);
409 return ret;
410 }
411
cnss_wlfw_respond_mem_send_sync(struct cnss_plat_data * plat_priv)412 int cnss_wlfw_respond_mem_send_sync(struct cnss_plat_data *plat_priv)
413 {
414 struct wlfw_respond_mem_req_msg_v01 *req;
415 struct wlfw_respond_mem_resp_msg_v01 *resp;
416 struct qmi_txn txn;
417 struct cnss_fw_mem *fw_mem = plat_priv->fw_mem;
418 int ret = 0, i;
419
420 cnss_pr_dbg("Sending respond memory message, state: 0x%lx\n",
421 plat_priv->driver_state);
422
423 req = kzalloc(sizeof(*req), GFP_KERNEL);
424 if (!req)
425 return -ENOMEM;
426
427 resp = kzalloc(sizeof(*resp), GFP_KERNEL);
428 if (!resp) {
429 kfree(req);
430 return -ENOMEM;
431 }
432
433 if (plat_priv->fw_mem_seg_len > QMI_WLFW_MAX_NUM_MEM_SEG_V01) {
434 cnss_pr_err("Invalid seg len %u\n", plat_priv->fw_mem_seg_len);
435 ret = -EINVAL;
436 goto out;
437 }
438
439 req->mem_seg_len = plat_priv->fw_mem_seg_len;
440 for (i = 0; i < req->mem_seg_len; i++) {
441 if (!fw_mem[i].pa || !fw_mem[i].size) {
442 if (fw_mem[i].type == 0) {
443 cnss_pr_err("Invalid memory for FW type, segment = %d\n",
444 i);
445 ret = -EINVAL;
446 goto out;
447 }
448 cnss_pr_err("Memory for FW is not available for type: %u\n",
449 fw_mem[i].type);
450 ret = -ENOMEM;
451 goto out;
452 }
453
454 cnss_pr_dbg("Memory for FW, va: 0x%pK, pa: %pa, size: 0x%zx, type: %u\n",
455 fw_mem[i].va, &fw_mem[i].pa,
456 fw_mem[i].size, fw_mem[i].type);
457
458 req->mem_seg[i].addr = fw_mem[i].pa;
459 req->mem_seg[i].size = fw_mem[i].size;
460 req->mem_seg[i].type = fw_mem[i].type;
461 }
462
463 ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
464 wlfw_respond_mem_resp_msg_v01_ei, resp);
465 if (ret < 0) {
466 cnss_pr_err("Failed to initialize txn for respond memory request, err: %d\n",
467 ret);
468 goto out;
469 }
470
471 ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
472 QMI_WLFW_RESPOND_MEM_REQ_V01,
473 WLFW_RESPOND_MEM_REQ_MSG_V01_MAX_MSG_LEN,
474 wlfw_respond_mem_req_msg_v01_ei, req);
475 if (ret < 0) {
476 qmi_txn_cancel(&txn);
477 cnss_pr_err("Failed to send respond memory request, err: %d\n",
478 ret);
479 goto out;
480 }
481
482 ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
483 if (ret < 0) {
484 cnss_pr_err("Failed to wait for response of respond memory request, err: %d\n",
485 ret);
486 goto out;
487 }
488
489 if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
490 cnss_pr_err("Respond memory request failed, result: %d, err: %d\n",
491 resp->resp.result, resp->resp.error);
492 ret = -resp->resp.result;
493 goto out;
494 }
495
496 kfree(req);
497 kfree(resp);
498 return 0;
499
500 out:
501 CNSS_QMI_ASSERT();
502 kfree(req);
503 kfree(resp);
504 return ret;
505 }
506
cnss_wlfw_tgt_cap_send_sync(struct cnss_plat_data * plat_priv)507 int cnss_wlfw_tgt_cap_send_sync(struct cnss_plat_data *plat_priv)
508 {
509 struct wlfw_cap_req_msg_v01 *req;
510 struct wlfw_cap_resp_msg_v01 *resp;
511 struct qmi_txn txn;
512 char *fw_build_timestamp;
513 int ret = 0, i;
514
515 cnss_pr_dbg("Sending target capability message, state: 0x%lx\n",
516 plat_priv->driver_state);
517
518 req = kzalloc(sizeof(*req), GFP_KERNEL);
519 if (!req)
520 return -ENOMEM;
521
522 resp = kzalloc(sizeof(*resp), GFP_KERNEL);
523 if (!resp) {
524 kfree(req);
525 return -ENOMEM;
526 }
527
528 ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
529 wlfw_cap_resp_msg_v01_ei, resp);
530 if (ret < 0) {
531 cnss_pr_err("Failed to initialize txn for target capability request, err: %d\n",
532 ret);
533 goto out;
534 }
535
536 ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
537 QMI_WLFW_CAP_REQ_V01,
538 WLFW_CAP_REQ_MSG_V01_MAX_MSG_LEN,
539 wlfw_cap_req_msg_v01_ei, req);
540 if (ret < 0) {
541 qmi_txn_cancel(&txn);
542 cnss_pr_err("Failed to send respond target capability request, err: %d\n",
543 ret);
544 goto out;
545 }
546
547 ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
548 if (ret < 0) {
549 cnss_pr_err("Failed to wait for response of target capability request, err: %d\n",
550 ret);
551 goto out;
552 }
553
554 if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
555 cnss_pr_err("Target capability request failed, result: %d, err: %d\n",
556 resp->resp.result, resp->resp.error);
557 ret = -resp->resp.result;
558 goto out;
559 }
560
561 if (resp->chip_info_valid) {
562 plat_priv->chip_info.chip_id = resp->chip_info.chip_id;
563 plat_priv->chip_info.chip_family = resp->chip_info.chip_family;
564 }
565 if (resp->board_info_valid)
566 plat_priv->board_info.board_id = resp->board_info.board_id;
567 else
568 plat_priv->board_info.board_id = 0xFF;
569 if (resp->soc_info_valid)
570 plat_priv->soc_info.soc_id = resp->soc_info.soc_id;
571 if (resp->fw_version_info_valid) {
572 plat_priv->fw_version_info.fw_version =
573 resp->fw_version_info.fw_version;
574 fw_build_timestamp = resp->fw_version_info.fw_build_timestamp;
575 fw_build_timestamp[QMI_WLFW_MAX_TIMESTAMP_LEN] = '\0';
576 strlcpy(plat_priv->fw_version_info.fw_build_timestamp,
577 resp->fw_version_info.fw_build_timestamp,
578 QMI_WLFW_MAX_TIMESTAMP_LEN + 1);
579 }
580 if (resp->fw_build_id_valid) {
581 resp->fw_build_id[QMI_WLFW_MAX_BUILD_ID_LEN] = '\0';
582 strlcpy(plat_priv->fw_build_id, resp->fw_build_id,
583 QMI_WLFW_MAX_BUILD_ID_LEN + 1);
584 }
585 /* FW will send aop retention volatage for qca6490 */
586 if (resp->voltage_mv_valid) {
587 plat_priv->cpr_info.voltage = resp->voltage_mv;
588 cnss_pr_dbg("Voltage for CPR: %dmV\n",
589 plat_priv->cpr_info.voltage);
590 cnss_update_cpr_info(plat_priv);
591 }
592 if (resp->time_freq_hz_valid) {
593 plat_priv->device_freq_hz = resp->time_freq_hz;
594 cnss_pr_dbg("Device frequency is %d HZ\n",
595 plat_priv->device_freq_hz);
596 }
597 if (resp->otp_version_valid)
598 plat_priv->otp_version = resp->otp_version;
599 if (resp->dev_mem_info_valid) {
600 for (i = 0; i < QMI_WLFW_MAX_DEV_MEM_NUM_V01; i++) {
601 plat_priv->dev_mem_info[i].start =
602 resp->dev_mem_info[i].start;
603 plat_priv->dev_mem_info[i].size =
604 resp->dev_mem_info[i].size;
605 cnss_pr_buf("Device memory info[%d]: start = 0x%llx, size = 0x%llx\n",
606 i, plat_priv->dev_mem_info[i].start,
607 plat_priv->dev_mem_info[i].size);
608 }
609 }
610 if (resp->fw_caps_valid) {
611 plat_priv->fw_pcie_gen_switch =
612 !!(resp->fw_caps & QMI_WLFW_HOST_PCIE_GEN_SWITCH_V01);
613 plat_priv->fw_aux_uc_support =
614 !!(resp->fw_caps & QMI_WLFW_AUX_UC_SUPPORT_V01);
615 cnss_pr_dbg("FW aux uc support capability: %d\n",
616 plat_priv->fw_aux_uc_support);
617 plat_priv->fw_caps = resp->fw_caps;
618 }
619
620 if (resp->hang_data_length_valid &&
621 resp->hang_data_length &&
622 resp->hang_data_length <= WLFW_MAX_HANG_EVENT_DATA_SIZE)
623 plat_priv->hang_event_data_len = resp->hang_data_length;
624 else
625 plat_priv->hang_event_data_len = 0;
626
627 if (resp->hang_data_addr_offset_valid)
628 plat_priv->hang_data_addr_offset = resp->hang_data_addr_offset;
629 else
630 plat_priv->hang_data_addr_offset = 0;
631
632 if (resp->hwid_bitmap_valid)
633 plat_priv->hwid_bitmap = resp->hwid_bitmap;
634
635 if (resp->ol_cpr_cfg_valid)
636 cnss_aop_ol_cpr_cfg_setup(plat_priv, &resp->ol_cpr_cfg);
637
638 /* Disable WLAN PDC in AOP firmware for boards which support on chip PMIC
639 * so AOP will ignore SW_CTRL changes and do not update regulator votes.
640 **/
641 for (i = 0; i < plat_priv->on_chip_pmic_devices_count; i++) {
642 if (plat_priv->board_info.board_id ==
643 plat_priv->on_chip_pmic_board_ids[i]) {
644 cnss_pr_dbg("Disabling WLAN PDC for board_id: %02x\n",
645 plat_priv->board_info.board_id);
646 ret = cnss_aop_send_msg(plat_priv,
647 "{class: wlan_pdc, ss: rf, res: pdc, enable: 0}");
648 if (ret < 0)
649 cnss_pr_dbg("Failed to Send AOP Msg");
650 break;
651 }
652 }
653
654 if (resp->serial_id_valid) {
655 plat_priv->serial_id = resp->serial_id;
656 cnss_pr_info("serial id 0x%x 0x%x\n",
657 resp->serial_id.serial_id_msb,
658 resp->serial_id.serial_id_lsb);
659 }
660
661 cnss_pr_dbg("Target capability: chip_id: 0x%x, chip_family: 0x%x, board_id: 0x%x, soc_id: 0x%x, otp_version: 0x%x\n",
662 plat_priv->chip_info.chip_id,
663 plat_priv->chip_info.chip_family,
664 plat_priv->board_info.board_id, plat_priv->soc_info.soc_id,
665 plat_priv->otp_version);
666 cnss_pr_dbg("fw_version: 0x%x, fw_build_timestamp: %s, fw_build_id: %s, hwid_bitmap:0x%x\n",
667 plat_priv->fw_version_info.fw_version,
668 plat_priv->fw_version_info.fw_build_timestamp,
669 plat_priv->fw_build_id,
670 plat_priv->hwid_bitmap);
671 cnss_pr_dbg("Hang event params, Length: 0x%x, Offset Address: 0x%x\n",
672 plat_priv->hang_event_data_len,
673 plat_priv->hang_data_addr_offset);
674
675 kfree(req);
676 kfree(resp);
677 return 0;
678
679 out:
680 CNSS_QMI_ASSERT();
681 kfree(req);
682 kfree(resp);
683 return ret;
684 }
685
cnss_bdf_type_to_str(enum cnss_bdf_type bdf_type)686 static char *cnss_bdf_type_to_str(enum cnss_bdf_type bdf_type)
687 {
688 switch (bdf_type) {
689 case CNSS_BDF_BIN:
690 case CNSS_BDF_ELF:
691 return "BDF";
692 case CNSS_BDF_REGDB:
693 return "REGDB";
694 case CNSS_BDF_HDS:
695 return "HDS";
696 default:
697 return "UNKNOWN";
698 }
699 }
700
cnss_get_bdf_file_name(struct cnss_plat_data * plat_priv,u32 bdf_type,char * filename,u32 filename_len)701 static int cnss_get_bdf_file_name(struct cnss_plat_data *plat_priv,
702 u32 bdf_type, char *filename,
703 u32 filename_len)
704 {
705 char filename_tmp[MAX_FIRMWARE_NAME_LEN];
706 int ret = 0;
707
708 switch (bdf_type) {
709 case CNSS_BDF_ELF:
710 /* Board ID will be equal or less than 0xFF in GF mask case */
711 if (plat_priv->board_info.board_id == 0xFF) {
712 if (plat_priv->chip_info.chip_id & CHIP_ID_GF_MASK)
713 snprintf(filename_tmp, filename_len,
714 ELF_BDF_FILE_NAME_GF);
715 else
716 snprintf(filename_tmp, filename_len,
717 ELF_BDF_FILE_NAME);
718 } else if (plat_priv->board_info.board_id < 0xFF) {
719 if (plat_priv->chip_info.chip_id & CHIP_ID_GF_MASK)
720 snprintf(filename_tmp, filename_len,
721 ELF_BDF_FILE_NAME_GF_PREFIX "%02x",
722 plat_priv->board_info.board_id);
723 else
724 snprintf(filename_tmp, filename_len,
725 ELF_BDF_FILE_NAME_PREFIX "%02x",
726 plat_priv->board_info.board_id);
727 } else {
728 snprintf(filename_tmp, filename_len,
729 BDF_FILE_NAME_PREFIX "%02x.e%02x",
730 plat_priv->board_info.board_id >> 8 & 0xFF,
731 plat_priv->board_info.board_id & 0xFF);
732 }
733 break;
734 case CNSS_BDF_BIN:
735 if (plat_priv->board_info.board_id == 0xFF) {
736 if (plat_priv->chip_info.chip_id & CHIP_ID_GF_MASK)
737 snprintf(filename_tmp, filename_len,
738 BIN_BDF_FILE_NAME_GF);
739 else
740 snprintf(filename_tmp, filename_len,
741 BIN_BDF_FILE_NAME);
742 } else if (plat_priv->board_info.board_id < 0xFF) {
743 if (plat_priv->chip_info.chip_id & CHIP_ID_GF_MASK)
744 snprintf(filename_tmp, filename_len,
745 BIN_BDF_FILE_NAME_GF_PREFIX "%02x",
746 plat_priv->board_info.board_id);
747 else
748 snprintf(filename_tmp, filename_len,
749 BIN_BDF_FILE_NAME_PREFIX "%02x",
750 plat_priv->board_info.board_id);
751 } else {
752 snprintf(filename_tmp, filename_len,
753 BDF_FILE_NAME_PREFIX "%02x.b%02x",
754 plat_priv->board_info.board_id >> 8 & 0xFF,
755 plat_priv->board_info.board_id & 0xFF);
756 }
757 break;
758 case CNSS_BDF_REGDB:
759 snprintf(filename_tmp, filename_len, REGDB_FILE_NAME);
760 break;
761 case CNSS_BDF_HDS:
762 snprintf(filename_tmp, filename_len, HDS_FILE_NAME);
763 break;
764 default:
765 cnss_pr_err("Invalid BDF type: %d\n",
766 plat_priv->ctrl_params.bdf_type);
767 ret = -EINVAL;
768 break;
769 }
770
771 if (!ret)
772 cnss_bus_add_fw_prefix_name(plat_priv, filename, filename_tmp);
773
774 return ret;
775 }
776
cnss_wlfw_bdf_dnld_send_sync(struct cnss_plat_data * plat_priv,u32 bdf_type)777 int cnss_wlfw_bdf_dnld_send_sync(struct cnss_plat_data *plat_priv,
778 u32 bdf_type)
779 {
780 struct wlfw_bdf_download_req_msg_v01 *req;
781 struct wlfw_bdf_download_resp_msg_v01 *resp;
782 struct qmi_txn txn;
783 char filename[MAX_FIRMWARE_NAME_LEN];
784 const struct firmware *fw_entry = NULL;
785 const u8 *temp;
786 unsigned int remaining;
787 int ret = 0;
788
789 cnss_pr_dbg("Sending QMI_WLFW_BDF_DOWNLOAD_REQ_V01 message for bdf_type: %d (%s), state: 0x%lx\n",
790 bdf_type, cnss_bdf_type_to_str(bdf_type), plat_priv->driver_state);
791
792 req = kzalloc(sizeof(*req), GFP_KERNEL);
793 if (!req)
794 return -ENOMEM;
795
796 resp = kzalloc(sizeof(*resp), GFP_KERNEL);
797 if (!resp) {
798 kfree(req);
799 return -ENOMEM;
800 }
801
802 ret = cnss_get_bdf_file_name(plat_priv, bdf_type,
803 filename, sizeof(filename));
804 if (ret)
805 goto err_req_fw;
806
807 cnss_pr_dbg("Invoke firmware_request_nowarn for %s\n", filename);
808 if (bdf_type == CNSS_BDF_REGDB)
809 ret = cnss_request_firmware_direct(plat_priv, &fw_entry,
810 filename);
811 else
812 ret = firmware_request_nowarn(&fw_entry, filename,
813 &plat_priv->plat_dev->dev);
814
815 if (ret) {
816 cnss_pr_err("Failed to load %s: %s, ret: %d\n",
817 cnss_bdf_type_to_str(bdf_type), filename, ret);
818 goto err_req_fw;
819 }
820
821 temp = fw_entry->data;
822 remaining = fw_entry->size;
823
824 cnss_pr_dbg("Downloading %s: %s, size: %u\n",
825 cnss_bdf_type_to_str(bdf_type), filename, remaining);
826
827 while (remaining) {
828 req->valid = 1;
829 req->file_id_valid = 1;
830 req->file_id = plat_priv->board_info.board_id;
831 req->total_size_valid = 1;
832 req->total_size = remaining;
833 req->seg_id_valid = 1;
834 req->data_valid = 1;
835 req->end_valid = 1;
836 req->bdf_type_valid = 1;
837 req->bdf_type = bdf_type;
838
839 if (remaining > QMI_WLFW_MAX_DATA_SIZE_V01) {
840 req->data_len = QMI_WLFW_MAX_DATA_SIZE_V01;
841 } else {
842 req->data_len = remaining;
843 req->end = 1;
844 }
845
846 memcpy(req->data, temp, req->data_len);
847
848 ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
849 wlfw_bdf_download_resp_msg_v01_ei, resp);
850 if (ret < 0) {
851 cnss_pr_err("Failed to initialize txn for QMI_WLFW_BDF_DOWNLOAD_REQ_V01 request for %s, error: %d\n",
852 cnss_bdf_type_to_str(bdf_type), ret);
853 goto err_send;
854 }
855
856 ret = qmi_send_request
857 (&plat_priv->qmi_wlfw, NULL, &txn,
858 QMI_WLFW_BDF_DOWNLOAD_REQ_V01,
859 WLFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN,
860 wlfw_bdf_download_req_msg_v01_ei, req);
861 if (ret < 0) {
862 qmi_txn_cancel(&txn);
863 cnss_pr_err("Failed to send QMI_WLFW_BDF_DOWNLOAD_REQ_V01 request for %s, error: %d\n",
864 cnss_bdf_type_to_str(bdf_type), ret);
865 goto err_send;
866 }
867
868 ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
869 if (ret < 0) {
870 cnss_pr_err("Timeout while waiting for FW response for QMI_WLFW_BDF_DOWNLOAD_REQ_V01 request for %s, err: %d\n",
871 cnss_bdf_type_to_str(bdf_type), ret);
872 goto err_send;
873 }
874
875 if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
876 cnss_pr_err("FW response for QMI_WLFW_BDF_DOWNLOAD_REQ_V01 request for %s failed, result: %d, err: %d\n",
877 cnss_bdf_type_to_str(bdf_type), resp->resp.result,
878 resp->resp.error);
879 ret = -resp->resp.result;
880 goto err_send;
881 }
882
883 remaining -= req->data_len;
884 temp += req->data_len;
885 req->seg_id++;
886 }
887
888 release_firmware(fw_entry);
889
890 if (resp->host_bdf_data_valid) {
891 /* QCA6490 enable S3E regulator for IPA configuration only */
892 if (!(resp->host_bdf_data & QMI_WLFW_HW_XPA_V01))
893 cnss_enable_int_pow_amp_vreg(plat_priv);
894
895 plat_priv->cbc_file_download =
896 resp->host_bdf_data & QMI_WLFW_CBC_FILE_DOWNLOAD_V01;
897 cnss_pr_info("Host BDF config: HW_XPA: %d CalDB: %d\n",
898 resp->host_bdf_data & QMI_WLFW_HW_XPA_V01,
899 plat_priv->cbc_file_download);
900 }
901 kfree(req);
902 kfree(resp);
903 return 0;
904
905 err_send:
906 release_firmware(fw_entry);
907 err_req_fw:
908 if (!(bdf_type == CNSS_BDF_REGDB ||
909 test_bit(CNSS_IN_REBOOT, &plat_priv->driver_state) ||
910 ret == -EAGAIN))
911 CNSS_QMI_ASSERT();
912 kfree(req);
913 kfree(resp);
914 return ret;
915 }
916
cnss_wlfw_tme_patch_dnld_send_sync(struct cnss_plat_data * plat_priv,enum wlfw_tme_lite_file_type_v01 file)917 int cnss_wlfw_tme_patch_dnld_send_sync(struct cnss_plat_data *plat_priv,
918 enum wlfw_tme_lite_file_type_v01 file)
919 {
920 struct wlfw_tme_lite_info_req_msg_v01 *req;
921 struct wlfw_tme_lite_info_resp_msg_v01 *resp;
922 struct qmi_txn txn;
923 struct cnss_fw_mem *tme_lite_mem = &plat_priv->tme_lite_mem;
924 int ret = 0;
925
926 cnss_pr_dbg("Sending TME patch information message, state: 0x%lx\n",
927 plat_priv->driver_state);
928
929 if (plat_priv->device_id != PEACH_DEVICE_ID)
930 return 0;
931
932 req = kzalloc(sizeof(*req), GFP_KERNEL);
933 if (!req)
934 return -ENOMEM;
935
936 resp = kzalloc(sizeof(*resp), GFP_KERNEL);
937 if (!resp) {
938 kfree(req);
939 return -ENOMEM;
940 }
941
942 if (!tme_lite_mem->pa || !tme_lite_mem->size) {
943 cnss_pr_err("Memory for TME patch is not available\n");
944 ret = -ENOMEM;
945 goto out;
946 }
947
948 cnss_pr_dbg("TME-L patch memory, va: 0x%pK, pa: %pa, size: 0x%zx\n",
949 tme_lite_mem->va, &tme_lite_mem->pa, tme_lite_mem->size);
950
951 req->tme_file = file;
952 req->addr = plat_priv->tme_lite_mem.pa;
953 req->size = plat_priv->tme_lite_mem.size;
954
955 ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
956 wlfw_tme_lite_info_resp_msg_v01_ei, resp);
957 if (ret < 0) {
958 cnss_pr_err("Failed to initialize txn for TME patch information request, err: %d\n",
959 ret);
960 goto out;
961 }
962
963 ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
964 QMI_WLFW_TME_LITE_INFO_REQ_V01,
965 WLFW_TME_LITE_INFO_REQ_MSG_V01_MAX_MSG_LEN,
966 wlfw_tme_lite_info_req_msg_v01_ei, req);
967 if (ret < 0) {
968 qmi_txn_cancel(&txn);
969 cnss_pr_err("Failed to send TME patch information request, err: %d\n",
970 ret);
971 goto out;
972 }
973
974 ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
975 if (ret < 0) {
976 cnss_pr_err("Failed to wait for response of TME patch information request, err: %d\n",
977 ret);
978 goto out;
979 }
980
981 if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
982 cnss_pr_err("TME patch information request failed, result: %d, err: %d\n",
983 resp->resp.result, resp->resp.error);
984 ret = -resp->resp.result;
985 goto out;
986 }
987
988 kfree(req);
989 kfree(resp);
990 return 0;
991
992 out:
993 kfree(req);
994 kfree(resp);
995 return ret;
996 }
997
cnss_wlfw_tme_opt_file_dnld_send_sync(struct cnss_plat_data * plat_priv,enum wlfw_tme_lite_file_type_v01 file)998 int cnss_wlfw_tme_opt_file_dnld_send_sync(struct cnss_plat_data *plat_priv,
999 enum wlfw_tme_lite_file_type_v01 file)
1000 {
1001 struct wlfw_tme_lite_info_req_msg_v01 *req;
1002 struct wlfw_tme_lite_info_resp_msg_v01 *resp;
1003 struct qmi_txn txn;
1004 struct cnss_fw_mem *tme_opt_file_mem = NULL;
1005 char *file_name = NULL;
1006 int ret = 0;
1007
1008 if (plat_priv->device_id != PEACH_DEVICE_ID)
1009 return 0;
1010
1011 cnss_pr_dbg("Sending TME opt file information message, state: 0x%lx\n",
1012 plat_priv->driver_state);
1013
1014 req = kzalloc(sizeof(*req), GFP_KERNEL);
1015 if (!req)
1016 return -ENOMEM;
1017
1018 resp = kzalloc(sizeof(*resp), GFP_KERNEL);
1019 if (!resp) {
1020 kfree(req);
1021 return -ENOMEM;
1022 }
1023
1024 if (file == WLFW_TME_LITE_OEM_FUSE_FILE_V01) {
1025 tme_opt_file_mem = &plat_priv->tme_opt_file_mem[0];
1026 file_name = TME_OEM_FUSE_FILE_NAME;
1027 } else if (file == WLFW_TME_LITE_RPR_FILE_V01) {
1028 tme_opt_file_mem = &plat_priv->tme_opt_file_mem[1];
1029 file_name = TME_RPR_FILE_NAME;
1030 } else if (file == WLFW_TME_LITE_DPR_FILE_V01) {
1031 tme_opt_file_mem = &plat_priv->tme_opt_file_mem[2];
1032 file_name = TME_DPR_FILE_NAME;
1033 }
1034
1035 if (!tme_opt_file_mem || !tme_opt_file_mem->pa ||
1036 !tme_opt_file_mem->size) {
1037 cnss_pr_err("Memory for TME opt file is not available\n");
1038 ret = -ENOMEM;
1039 goto out;
1040 }
1041
1042 cnss_pr_dbg("TME opt file %s memory, va: 0x%pK, pa: %pa, size: 0x%zx\n",
1043 file_name, tme_opt_file_mem->va, &tme_opt_file_mem->pa, tme_opt_file_mem->size);
1044
1045 req->tme_file = file;
1046 req->addr = tme_opt_file_mem->pa;
1047 req->size = tme_opt_file_mem->size;
1048
1049 ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
1050 wlfw_tme_lite_info_resp_msg_v01_ei, resp);
1051 if (ret < 0) {
1052 cnss_pr_err("Failed to initialize txn for TME opt file information request, err: %d\n",
1053 ret);
1054 goto out;
1055 }
1056
1057 ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
1058 QMI_WLFW_TME_LITE_INFO_REQ_V01,
1059 WLFW_TME_LITE_INFO_REQ_MSG_V01_MAX_MSG_LEN,
1060 wlfw_tme_lite_info_req_msg_v01_ei, req);
1061 if (ret < 0) {
1062 qmi_txn_cancel(&txn);
1063 cnss_pr_err("Failed to send TME opt file information request, err: %d\n",
1064 ret);
1065 goto out;
1066 }
1067
1068 ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
1069 if (ret < 0) {
1070 cnss_pr_err("Failed to wait for response of TME opt file information request, err: %d\n",
1071 ret);
1072 goto out;
1073 }
1074
1075 if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
1076 ret = -resp->resp.result;
1077 if (resp->resp.error == QMI_ERR_HARDWARE_RESTRICTED_V01) {
1078 cnss_pr_err("TME Power On failed\n");
1079 goto out;
1080 } else if (resp->resp.error == QMI_ERR_ENOMEM_V01) {
1081 cnss_pr_err("malloc SRAM failed\n");
1082 goto out;
1083 }
1084 cnss_pr_err("TME opt file information request failed, result: %d, err: %d\n",
1085 resp->resp.result, resp->resp.error);
1086 goto out;
1087 }
1088
1089 kfree(req);
1090 kfree(resp);
1091 return 0;
1092
1093 out:
1094 CNSS_QMI_ASSERT();
1095 kfree(req);
1096 kfree(resp);
1097 return ret;
1098 }
1099
cnss_wlfw_m3_dnld_send_sync(struct cnss_plat_data * plat_priv)1100 int cnss_wlfw_m3_dnld_send_sync(struct cnss_plat_data *plat_priv)
1101 {
1102 struct wlfw_m3_info_req_msg_v01 *req;
1103 struct wlfw_m3_info_resp_msg_v01 *resp;
1104 struct qmi_txn txn;
1105 struct cnss_fw_mem *m3_mem = &plat_priv->m3_mem;
1106 int ret = 0;
1107
1108 cnss_pr_dbg("Sending M3 information message, state: 0x%lx\n",
1109 plat_priv->driver_state);
1110
1111 req = kzalloc(sizeof(*req), GFP_KERNEL);
1112 if (!req)
1113 return -ENOMEM;
1114
1115 resp = kzalloc(sizeof(*resp), GFP_KERNEL);
1116 if (!resp) {
1117 kfree(req);
1118 return -ENOMEM;
1119 }
1120
1121 if (!m3_mem->pa || !m3_mem->size) {
1122 cnss_pr_err("Memory for M3 is not available\n");
1123 ret = -ENOMEM;
1124 goto out;
1125 }
1126
1127 cnss_pr_dbg("M3 memory, va: 0x%pK, pa: %pa, size: 0x%zx\n",
1128 m3_mem->va, &m3_mem->pa, m3_mem->size);
1129
1130 req->addr = plat_priv->m3_mem.pa;
1131 req->size = plat_priv->m3_mem.size;
1132
1133 ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
1134 wlfw_m3_info_resp_msg_v01_ei, resp);
1135 if (ret < 0) {
1136 cnss_pr_err("Failed to initialize txn for M3 information request, err: %d\n",
1137 ret);
1138 goto out;
1139 }
1140
1141 ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
1142 QMI_WLFW_M3_INFO_REQ_V01,
1143 WLFW_M3_INFO_REQ_MSG_V01_MAX_MSG_LEN,
1144 wlfw_m3_info_req_msg_v01_ei, req);
1145 if (ret < 0) {
1146 qmi_txn_cancel(&txn);
1147 cnss_pr_err("Failed to send M3 information request, err: %d\n",
1148 ret);
1149 goto out;
1150 }
1151
1152 ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
1153 if (ret < 0) {
1154 cnss_pr_err("Failed to wait for response of M3 information request, err: %d\n",
1155 ret);
1156 goto out;
1157 }
1158
1159 if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
1160 cnss_pr_err("M3 information request failed, result: %d, err: %d\n",
1161 resp->resp.result, resp->resp.error);
1162 ret = -resp->resp.result;
1163 goto out;
1164 }
1165
1166 kfree(req);
1167 kfree(resp);
1168 return 0;
1169
1170 out:
1171 CNSS_QMI_ASSERT();
1172 kfree(req);
1173 kfree(resp);
1174 return ret;
1175 }
1176
cnss_wlfw_aux_dnld_send_sync(struct cnss_plat_data * plat_priv)1177 int cnss_wlfw_aux_dnld_send_sync(struct cnss_plat_data *plat_priv)
1178 {
1179 struct wlfw_aux_uc_info_req_msg_v01 *req;
1180 struct wlfw_aux_uc_info_resp_msg_v01 *resp;
1181 struct qmi_txn txn;
1182 struct cnss_fw_mem *aux_mem = &plat_priv->aux_mem;
1183 int ret = 0;
1184
1185 cnss_pr_dbg("Sending QMI_WLFW_AUX_UC_INFO_REQ_V01 message, state: 0x%lx\n",
1186 plat_priv->driver_state);
1187
1188 req = kzalloc(sizeof(*req), GFP_KERNEL);
1189 if (!req)
1190 return -ENOMEM;
1191
1192 resp = kzalloc(sizeof(*resp), GFP_KERNEL);
1193 if (!resp) {
1194 kfree(req);
1195 return -ENOMEM;
1196 }
1197
1198 if (!aux_mem->pa || !aux_mem->size) {
1199 cnss_pr_err("Memory for AUX is not available\n");
1200 ret = -ENOMEM;
1201 goto out;
1202 }
1203
1204 cnss_pr_dbg("AUX memory, va: 0x%pK, pa: %pa, size: 0x%zx\n",
1205 aux_mem->va, &aux_mem->pa, aux_mem->size);
1206
1207 req->addr = plat_priv->aux_mem.pa;
1208 req->size = plat_priv->aux_mem.size;
1209
1210 ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
1211 wlfw_aux_uc_info_resp_msg_v01_ei, resp);
1212 if (ret < 0) {
1213 cnss_pr_err("Failed to initialize txn for QMI_WLFW_AUX_UC_INFO_REQ_V01 request, err: %d\n",
1214 ret);
1215 goto out;
1216 }
1217
1218 ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
1219 QMI_WLFW_AUX_UC_INFO_REQ_V01,
1220 WLFW_AUX_UC_INFO_REQ_MSG_V01_MAX_MSG_LEN,
1221 wlfw_aux_uc_info_req_msg_v01_ei, req);
1222 if (ret < 0) {
1223 qmi_txn_cancel(&txn);
1224 cnss_pr_err("Failed to send QMI_WLFW_AUX_UC_INFO_REQ_V01 request, err: %d\n",
1225 ret);
1226 goto out;
1227 }
1228
1229 ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
1230 if (ret < 0) {
1231 cnss_pr_err("Failed to wait for response of QMI_WLFW_AUX_UC_INFO_REQ_V01 request, err: %d\n",
1232 ret);
1233 goto out;
1234 }
1235
1236 if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
1237 cnss_pr_err("QMI_WLFW_AUX_UC_INFO_REQ_V01 request failed, result: %d, err: %d\n",
1238 resp->resp.result, resp->resp.error);
1239 ret = -resp->resp.result;
1240 goto out;
1241 }
1242
1243 kfree(req);
1244 kfree(resp);
1245 return 0;
1246
1247 out:
1248 CNSS_QMI_ASSERT();
1249 kfree(req);
1250 kfree(resp);
1251 return ret;
1252 }
1253
cnss_wlfw_wlan_mac_req_send_sync(struct cnss_plat_data * plat_priv,u8 * mac,u32 mac_len)1254 int cnss_wlfw_wlan_mac_req_send_sync(struct cnss_plat_data *plat_priv,
1255 u8 *mac, u32 mac_len)
1256 {
1257 struct wlfw_mac_addr_req_msg_v01 req;
1258 struct wlfw_mac_addr_resp_msg_v01 resp = {0};
1259 struct qmi_txn txn;
1260 int ret;
1261
1262 if (!plat_priv || !mac || mac_len != QMI_WLFW_MAC_ADDR_SIZE_V01)
1263 return -EINVAL;
1264
1265 ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
1266 wlfw_mac_addr_resp_msg_v01_ei, &resp);
1267 if (ret < 0) {
1268 cnss_pr_err("Failed to initialize txn for mac req, err: %d\n",
1269 ret);
1270 ret = -EIO;
1271 goto out;
1272 }
1273
1274 cnss_pr_dbg("Sending WLAN mac req [%pM], state: 0x%lx\n",
1275 mac, plat_priv->driver_state);
1276 memcpy(req.mac_addr, mac, mac_len);
1277 req.mac_addr_valid = 1;
1278
1279 ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
1280 QMI_WLFW_MAC_ADDR_REQ_V01,
1281 WLFW_MAC_ADDR_REQ_MSG_V01_MAX_MSG_LEN,
1282 wlfw_mac_addr_req_msg_v01_ei, &req);
1283 if (ret < 0) {
1284 qmi_txn_cancel(&txn);
1285 cnss_pr_err("Failed to send mac req, err: %d\n", ret);
1286 ret = -EIO;
1287 goto out;
1288 }
1289
1290 ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
1291 if (ret < 0) {
1292 cnss_pr_err("Failed to wait for resp of mac req, err: %d\n",
1293 ret);
1294 ret = -EIO;
1295 goto out;
1296 }
1297
1298 if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
1299 cnss_pr_err("WLAN mac req failed, result: %d, err: %d\n",
1300 resp.resp.result);
1301 ret = -resp.resp.result;
1302 }
1303 out:
1304 return ret;
1305 }
1306
cnss_wlfw_qdss_data_send_sync(struct cnss_plat_data * plat_priv,char * file_name,u32 total_size)1307 int cnss_wlfw_qdss_data_send_sync(struct cnss_plat_data *plat_priv, char *file_name,
1308 u32 total_size)
1309 {
1310 int ret = 0;
1311 struct wlfw_qdss_trace_data_req_msg_v01 *req;
1312 struct wlfw_qdss_trace_data_resp_msg_v01 *resp;
1313 unsigned char *p_qdss_trace_data_temp, *p_qdss_trace_data = NULL;
1314 unsigned int remaining;
1315 struct qmi_txn txn;
1316
1317 cnss_pr_dbg("%s\n", __func__);
1318
1319 req = kzalloc(sizeof(*req), GFP_KERNEL);
1320 if (!req)
1321 return -ENOMEM;
1322
1323 resp = kzalloc(sizeof(*resp), GFP_KERNEL);
1324 if (!resp) {
1325 kfree(req);
1326 return -ENOMEM;
1327 }
1328
1329 p_qdss_trace_data = kzalloc(total_size, GFP_KERNEL);
1330 if (!p_qdss_trace_data) {
1331 ret = ENOMEM;
1332 goto end;
1333 }
1334
1335 remaining = total_size;
1336 p_qdss_trace_data_temp = p_qdss_trace_data;
1337 while (remaining && resp->end == 0) {
1338 ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
1339 wlfw_qdss_trace_data_resp_msg_v01_ei, resp);
1340
1341 if (ret < 0) {
1342 cnss_pr_err("Fail to init txn for QDSS trace resp %d\n",
1343 ret);
1344 goto fail;
1345 }
1346
1347 ret = qmi_send_request
1348 (&plat_priv->qmi_wlfw, NULL, &txn,
1349 QMI_WLFW_QDSS_TRACE_DATA_REQ_V01,
1350 WLFW_QDSS_TRACE_DATA_REQ_MSG_V01_MAX_MSG_LEN,
1351 wlfw_qdss_trace_data_req_msg_v01_ei, req);
1352
1353 if (ret < 0) {
1354 qmi_txn_cancel(&txn);
1355 cnss_pr_err("Fail to send QDSS trace data req %d\n",
1356 ret);
1357 goto fail;
1358 }
1359
1360 ret = qmi_txn_wait(&txn, plat_priv->ctrl_params.qmi_timeout);
1361
1362 if (ret < 0) {
1363 cnss_pr_err("QDSS trace resp wait failed with rc %d\n",
1364 ret);
1365 goto fail;
1366 } else if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
1367 cnss_pr_err("QMI QDSS trace request rejected, result:%d error:%d\n",
1368 resp->resp.result, resp->resp.error);
1369 ret = -resp->resp.result;
1370 goto fail;
1371 } else {
1372 ret = 0;
1373 }
1374
1375 cnss_pr_dbg("%s: response total size %d data len %d",
1376 __func__, resp->total_size, resp->data_len);
1377
1378 if ((resp->total_size_valid == 1 &&
1379 resp->total_size == total_size) &&
1380 (resp->seg_id_valid == 1 && resp->seg_id == req->seg_id) &&
1381 (resp->data_valid == 1 &&
1382 resp->data_len <= QMI_WLFW_MAX_DATA_SIZE_V01) &&
1383 resp->data_len <= remaining) {
1384 memcpy(p_qdss_trace_data_temp,
1385 resp->data, resp->data_len);
1386 } else {
1387 cnss_pr_err("%s: Unmatched qdss trace data, Expect total_size %u, seg_id %u, Recv total_size_valid %u, total_size %u, seg_id_valid %u, seg_id %u, data_len_valid %u, data_len %u",
1388 __func__,
1389 total_size, req->seg_id,
1390 resp->total_size_valid,
1391 resp->total_size,
1392 resp->seg_id_valid,
1393 resp->seg_id,
1394 resp->data_valid,
1395 resp->data_len);
1396 ret = -1;
1397 goto fail;
1398 }
1399
1400 remaining -= resp->data_len;
1401 p_qdss_trace_data_temp += resp->data_len;
1402 req->seg_id++;
1403 }
1404
1405 if (remaining == 0 && (resp->end_valid && resp->end)) {
1406 ret = cnss_genl_send_msg(p_qdss_trace_data,
1407 CNSS_GENL_MSG_TYPE_QDSS, file_name,
1408 total_size);
1409 if (ret < 0) {
1410 cnss_pr_err("Fail to save QDSS trace data: %d\n",
1411 ret);
1412 ret = -1;
1413 goto fail;
1414 }
1415 } else {
1416 cnss_pr_err("%s: QDSS trace file corrupted: remaining %u, end_valid %u, end %u",
1417 __func__,
1418 remaining, resp->end_valid, resp->end);
1419 ret = -1;
1420 goto fail;
1421 }
1422
1423 fail:
1424 kfree(p_qdss_trace_data);
1425
1426 end:
1427 kfree(req);
1428 kfree(resp);
1429 return ret;
1430 }
1431
cnss_get_qdss_cfg_filename(struct cnss_plat_data * plat_priv,char * filename,u32 filename_len,bool fallback_file)1432 void cnss_get_qdss_cfg_filename(struct cnss_plat_data *plat_priv,
1433 char *filename, u32 filename_len,
1434 bool fallback_file)
1435 {
1436 char filename_tmp[MAX_FIRMWARE_NAME_LEN];
1437 char *build_str = QDSS_FILE_BUILD_STR;
1438
1439 if (fallback_file)
1440 build_str = "";
1441
1442 if (plat_priv->device_version.major_version == FW_V2_NUMBER)
1443 snprintf(filename_tmp, filename_len, QDSS_TRACE_CONFIG_FILE
1444 "_%s%s.cfg", build_str, HW_V2_NUMBER);
1445 else
1446 snprintf(filename_tmp, filename_len, QDSS_TRACE_CONFIG_FILE
1447 "_%s%s.cfg", build_str, HW_V1_NUMBER);
1448
1449 cnss_bus_add_fw_prefix_name(plat_priv, filename, filename_tmp);
1450 }
1451
cnss_wlfw_qdss_dnld_send_sync(struct cnss_plat_data * plat_priv)1452 int cnss_wlfw_qdss_dnld_send_sync(struct cnss_plat_data *plat_priv)
1453 {
1454 struct wlfw_qdss_trace_config_download_req_msg_v01 *req;
1455 struct wlfw_qdss_trace_config_download_resp_msg_v01 *resp;
1456 struct qmi_txn txn;
1457 const struct firmware *fw_entry = NULL;
1458 const u8 *temp;
1459 char qdss_cfg_filename[MAX_FIRMWARE_NAME_LEN];
1460 unsigned int remaining;
1461 int ret = 0;
1462
1463 cnss_pr_dbg("Sending QDSS config download message, state: 0x%lx\n",
1464 plat_priv->driver_state);
1465
1466 req = kzalloc(sizeof(*req), GFP_KERNEL);
1467 if (!req)
1468 return -ENOMEM;
1469
1470 resp = kzalloc(sizeof(*resp), GFP_KERNEL);
1471 if (!resp) {
1472 kfree(req);
1473 return -ENOMEM;
1474 }
1475
1476 cnss_get_qdss_cfg_filename(plat_priv, qdss_cfg_filename,
1477 sizeof(qdss_cfg_filename), false);
1478
1479 cnss_pr_dbg("Invoke firmware_request_nowarn for %s\n",
1480 qdss_cfg_filename);
1481 ret = cnss_request_firmware_direct(plat_priv, &fw_entry,
1482 qdss_cfg_filename);
1483 if (ret) {
1484 cnss_pr_dbg("Unable to load %s ret %d, try default file\n",
1485 qdss_cfg_filename, ret);
1486 cnss_get_qdss_cfg_filename(plat_priv, qdss_cfg_filename,
1487 sizeof(qdss_cfg_filename),
1488 true);
1489 cnss_pr_dbg("Invoke firmware_request_nowarn for %s\n",
1490 qdss_cfg_filename);
1491 ret = cnss_request_firmware_direct(plat_priv, &fw_entry,
1492 qdss_cfg_filename);
1493 if (ret) {
1494 cnss_pr_err("Unable to load %s ret %d\n",
1495 qdss_cfg_filename, ret);
1496 goto err_req_fw;
1497 }
1498 }
1499
1500 temp = fw_entry->data;
1501 remaining = fw_entry->size;
1502
1503 cnss_pr_dbg("Downloading QDSS: %s, size: %u\n",
1504 qdss_cfg_filename, remaining);
1505
1506 while (remaining) {
1507 req->total_size_valid = 1;
1508 req->total_size = remaining;
1509 req->seg_id_valid = 1;
1510 req->data_valid = 1;
1511 req->end_valid = 1;
1512
1513 if (remaining > QMI_WLFW_MAX_DATA_SIZE_V01) {
1514 req->data_len = QMI_WLFW_MAX_DATA_SIZE_V01;
1515 } else {
1516 req->data_len = remaining;
1517 req->end = 1;
1518 }
1519
1520 memcpy(req->data, temp, req->data_len);
1521
1522 ret = qmi_txn_init
1523 (&plat_priv->qmi_wlfw, &txn,
1524 wlfw_qdss_trace_config_download_resp_msg_v01_ei,
1525 resp);
1526 if (ret < 0) {
1527 cnss_pr_err("Failed to initialize txn for QDSS download request, err: %d\n",
1528 ret);
1529 goto err_send;
1530 }
1531
1532 ret = qmi_send_request
1533 (&plat_priv->qmi_wlfw, NULL, &txn,
1534 QMI_WLFW_QDSS_TRACE_CONFIG_DOWNLOAD_REQ_V01,
1535 WLFW_QDSS_TRACE_CONFIG_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN,
1536 wlfw_qdss_trace_config_download_req_msg_v01_ei, req);
1537 if (ret < 0) {
1538 qmi_txn_cancel(&txn);
1539 cnss_pr_err("Failed to send respond QDSS download request, err: %d\n",
1540 ret);
1541 goto err_send;
1542 }
1543
1544 ret = qmi_txn_wait(&txn, plat_priv->ctrl_params.qmi_timeout);
1545 if (ret < 0) {
1546 cnss_pr_err("Failed to wait for response of QDSS download request, err: %d\n",
1547 ret);
1548 goto err_send;
1549 }
1550
1551 if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
1552 cnss_pr_err("QDSS download request failed, result: %d, err: %d\n",
1553 resp->resp.result, resp->resp.error);
1554 ret = -resp->resp.result;
1555 goto err_send;
1556 }
1557
1558 remaining -= req->data_len;
1559 temp += req->data_len;
1560 req->seg_id++;
1561 }
1562
1563 release_firmware(fw_entry);
1564 kfree(req);
1565 kfree(resp);
1566 return 0;
1567
1568 err_send:
1569 release_firmware(fw_entry);
1570 err_req_fw:
1571
1572 kfree(req);
1573 kfree(resp);
1574 return ret;
1575 }
1576
wlfw_send_qdss_trace_mode_req(struct cnss_plat_data * plat_priv,enum wlfw_qdss_trace_mode_enum_v01 mode,unsigned long long option)1577 static int wlfw_send_qdss_trace_mode_req
1578 (struct cnss_plat_data *plat_priv,
1579 enum wlfw_qdss_trace_mode_enum_v01 mode,
1580 unsigned long long option)
1581 {
1582 int rc = 0;
1583 int tmp = 0;
1584 struct wlfw_qdss_trace_mode_req_msg_v01 *req;
1585 struct wlfw_qdss_trace_mode_resp_msg_v01 *resp;
1586 struct qmi_txn txn;
1587
1588 if (!plat_priv)
1589 return -ENODEV;
1590
1591 req = kzalloc(sizeof(*req), GFP_KERNEL);
1592 if (!req)
1593 return -ENOMEM;
1594
1595 resp = kzalloc(sizeof(*resp), GFP_KERNEL);
1596 if (!resp) {
1597 kfree(req);
1598 return -ENOMEM;
1599 }
1600
1601 req->mode_valid = 1;
1602 req->mode = mode;
1603 req->option_valid = 1;
1604 req->option = option;
1605
1606 tmp = plat_priv->hw_trc_override;
1607
1608 req->hw_trc_disable_override_valid = 1;
1609 req->hw_trc_disable_override =
1610 (tmp > QMI_PARAM_DISABLE_V01 ? QMI_PARAM_DISABLE_V01 :
1611 (tmp < 0 ? QMI_PARAM_INVALID_V01 : tmp));
1612
1613 cnss_pr_dbg("%s: mode %u, option %llu, hw_trc_disable_override: %u",
1614 __func__, mode, option, req->hw_trc_disable_override);
1615
1616 rc = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
1617 wlfw_qdss_trace_mode_resp_msg_v01_ei, resp);
1618 if (rc < 0) {
1619 cnss_pr_err("Fail to init txn for QDSS Mode resp %d\n",
1620 rc);
1621 goto out;
1622 }
1623
1624 rc = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
1625 QMI_WLFW_QDSS_TRACE_MODE_REQ_V01,
1626 WLFW_QDSS_TRACE_MODE_REQ_MSG_V01_MAX_MSG_LEN,
1627 wlfw_qdss_trace_mode_req_msg_v01_ei, req);
1628 if (rc < 0) {
1629 qmi_txn_cancel(&txn);
1630 cnss_pr_err("Fail to send QDSS Mode req %d\n", rc);
1631 goto out;
1632 }
1633
1634 rc = qmi_txn_wait(&txn, plat_priv->ctrl_params.qmi_timeout);
1635 if (rc < 0) {
1636 cnss_pr_err("QDSS Mode resp wait failed with rc %d\n",
1637 rc);
1638 goto out;
1639 } else if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
1640 cnss_pr_err("QMI QDSS Mode request rejected, result:%d error:%d\n",
1641 resp->resp.result, resp->resp.error);
1642 rc = -resp->resp.result;
1643 goto out;
1644 }
1645
1646 kfree(resp);
1647 kfree(req);
1648 return rc;
1649 out:
1650 kfree(resp);
1651 kfree(req);
1652 CNSS_QMI_ASSERT();
1653 return rc;
1654 }
1655
wlfw_qdss_trace_start(struct cnss_plat_data * plat_priv)1656 int wlfw_qdss_trace_start(struct cnss_plat_data *plat_priv)
1657 {
1658 return wlfw_send_qdss_trace_mode_req(plat_priv,
1659 QMI_WLFW_QDSS_TRACE_ON_V01, 0);
1660 }
1661
wlfw_qdss_trace_stop(struct cnss_plat_data * plat_priv,unsigned long long option)1662 int wlfw_qdss_trace_stop(struct cnss_plat_data *plat_priv, unsigned long long option)
1663 {
1664 return wlfw_send_qdss_trace_mode_req(plat_priv, QMI_WLFW_QDSS_TRACE_OFF_V01,
1665 option);
1666 }
1667
cnss_wlfw_wlan_mode_send_sync(struct cnss_plat_data * plat_priv,enum cnss_driver_mode mode)1668 int cnss_wlfw_wlan_mode_send_sync(struct cnss_plat_data *plat_priv,
1669 enum cnss_driver_mode mode)
1670 {
1671 struct wlfw_wlan_mode_req_msg_v01 *req;
1672 struct wlfw_wlan_mode_resp_msg_v01 *resp;
1673 struct qmi_txn txn;
1674 int ret = 0;
1675
1676 if (!plat_priv)
1677 return -ENODEV;
1678
1679 cnss_pr_dbg("Sending mode message, mode: %s(%d), state: 0x%lx\n",
1680 cnss_qmi_mode_to_str(mode), mode, plat_priv->driver_state);
1681
1682 if (mode == CNSS_OFF &&
1683 test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) {
1684 cnss_pr_dbg("Recovery is in progress, ignore mode off request\n");
1685 return 0;
1686 }
1687
1688 req = kzalloc(sizeof(*req), GFP_KERNEL);
1689 if (!req)
1690 return -ENOMEM;
1691
1692 resp = kzalloc(sizeof(*resp), GFP_KERNEL);
1693 if (!resp) {
1694 kfree(req);
1695 return -ENOMEM;
1696 }
1697
1698 req->mode = (enum wlfw_driver_mode_enum_v01)mode;
1699 req->hw_debug_valid = 1;
1700 req->hw_debug = 0;
1701
1702 ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
1703 wlfw_wlan_mode_resp_msg_v01_ei, resp);
1704 if (ret < 0) {
1705 cnss_pr_err("Failed to initialize txn for mode request, mode: %s(%d), err: %d\n",
1706 cnss_qmi_mode_to_str(mode), mode, ret);
1707 goto out;
1708 }
1709
1710 ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
1711 QMI_WLFW_WLAN_MODE_REQ_V01,
1712 WLFW_WLAN_MODE_REQ_MSG_V01_MAX_MSG_LEN,
1713 wlfw_wlan_mode_req_msg_v01_ei, req);
1714 if (ret < 0) {
1715 qmi_txn_cancel(&txn);
1716 cnss_pr_err("Failed to send mode request, mode: %s(%d), err: %d\n",
1717 cnss_qmi_mode_to_str(mode), mode, ret);
1718 goto out;
1719 }
1720
1721 ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
1722 if (ret < 0) {
1723 cnss_pr_err("Failed to wait for response of mode request, mode: %s(%d), err: %d\n",
1724 cnss_qmi_mode_to_str(mode), mode, ret);
1725 goto out;
1726 }
1727
1728 if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
1729 cnss_pr_err("Mode request failed, mode: %s(%d), result: %d, err: %d\n",
1730 cnss_qmi_mode_to_str(mode), mode, resp->resp.result,
1731 resp->resp.error);
1732 ret = -resp->resp.result;
1733 goto out;
1734 }
1735
1736 kfree(req);
1737 kfree(resp);
1738 return 0;
1739
1740 out:
1741 if (mode == CNSS_OFF) {
1742 cnss_pr_dbg("WLFW service is disconnected while sending mode off request\n");
1743 ret = 0;
1744 } else {
1745 CNSS_QMI_ASSERT();
1746 }
1747 kfree(req);
1748 kfree(resp);
1749 return ret;
1750 }
1751
cnss_wlfw_wlan_cfg_send_sync(struct cnss_plat_data * plat_priv,struct cnss_wlan_enable_cfg * config,const char * host_version)1752 int cnss_wlfw_wlan_cfg_send_sync(struct cnss_plat_data *plat_priv,
1753 struct cnss_wlan_enable_cfg *config,
1754 const char *host_version)
1755 {
1756 struct wlfw_wlan_cfg_req_msg_v01 *req;
1757 struct wlfw_wlan_cfg_resp_msg_v01 *resp;
1758 struct qmi_txn txn;
1759 u32 i, ce_id, num_vectors, user_base_data, base_vector;
1760 int ret = 0;
1761
1762 if (!plat_priv)
1763 return -ENODEV;
1764
1765 cnss_pr_dbg("Sending WLAN config message, state: 0x%lx\n",
1766 plat_priv->driver_state);
1767
1768 req = kzalloc(sizeof(*req), GFP_KERNEL);
1769 if (!req)
1770 return -ENOMEM;
1771
1772 resp = kzalloc(sizeof(*resp), GFP_KERNEL);
1773 if (!resp) {
1774 kfree(req);
1775 return -ENOMEM;
1776 }
1777
1778 req->host_version_valid = 1;
1779 strlcpy(req->host_version, host_version,
1780 QMI_WLFW_MAX_STR_LEN_V01 + 1);
1781
1782 req->tgt_cfg_valid = 1;
1783 if (config->num_ce_tgt_cfg > QMI_WLFW_MAX_NUM_CE_V01)
1784 req->tgt_cfg_len = QMI_WLFW_MAX_NUM_CE_V01;
1785 else
1786 req->tgt_cfg_len = config->num_ce_tgt_cfg;
1787 for (i = 0; i < req->tgt_cfg_len; i++) {
1788 req->tgt_cfg[i].pipe_num = config->ce_tgt_cfg[i].pipe_num;
1789 req->tgt_cfg[i].pipe_dir = config->ce_tgt_cfg[i].pipe_dir;
1790 req->tgt_cfg[i].nentries = config->ce_tgt_cfg[i].nentries;
1791 req->tgt_cfg[i].nbytes_max = config->ce_tgt_cfg[i].nbytes_max;
1792 req->tgt_cfg[i].flags = config->ce_tgt_cfg[i].flags;
1793 }
1794
1795 req->svc_cfg_valid = 1;
1796 if (config->num_ce_svc_pipe_cfg > QMI_WLFW_MAX_NUM_SVC_V01)
1797 req->svc_cfg_len = QMI_WLFW_MAX_NUM_SVC_V01;
1798 else
1799 req->svc_cfg_len = config->num_ce_svc_pipe_cfg;
1800 for (i = 0; i < req->svc_cfg_len; i++) {
1801 req->svc_cfg[i].service_id = config->ce_svc_cfg[i].service_id;
1802 req->svc_cfg[i].pipe_dir = config->ce_svc_cfg[i].pipe_dir;
1803 req->svc_cfg[i].pipe_num = config->ce_svc_cfg[i].pipe_num;
1804 }
1805
1806 if (plat_priv->device_id != KIWI_DEVICE_ID &&
1807 plat_priv->device_id != MANGO_DEVICE_ID &&
1808 plat_priv->device_id != PEACH_DEVICE_ID) {
1809 if (plat_priv->device_id == QCN7605_DEVICE_ID &&
1810 config->num_shadow_reg_cfg) {
1811 req->shadow_reg_valid = 1;
1812 if (config->num_shadow_reg_cfg >
1813 QMI_WLFW_MAX_NUM_SHADOW_REG_V01)
1814 req->shadow_reg_len =
1815 QMI_WLFW_MAX_NUM_SHADOW_REG_V01;
1816 else
1817 req->shadow_reg_len =
1818 config->num_shadow_reg_cfg;
1819 memcpy(req->shadow_reg, config->shadow_reg_cfg,
1820 sizeof(struct wlfw_shadow_reg_cfg_s_v01) *
1821 req->shadow_reg_len);
1822 } else {
1823 req->shadow_reg_v2_valid = 1;
1824
1825 if (config->num_shadow_reg_v2_cfg >
1826 QMI_WLFW_MAX_NUM_SHADOW_REG_V2_V01)
1827 req->shadow_reg_v2_len =
1828 QMI_WLFW_MAX_NUM_SHADOW_REG_V2_V01;
1829 else
1830 req->shadow_reg_v2_len =
1831 config->num_shadow_reg_v2_cfg;
1832
1833 memcpy(req->shadow_reg_v2, config->shadow_reg_v2_cfg,
1834 sizeof(struct wlfw_shadow_reg_v2_cfg_s_v01) *
1835 req->shadow_reg_v2_len);
1836 }
1837 } else {
1838 req->shadow_reg_v3_valid = 1;
1839 if (config->num_shadow_reg_v3_cfg >
1840 MAX_NUM_SHADOW_REG_V3)
1841 req->shadow_reg_v3_len = MAX_NUM_SHADOW_REG_V3;
1842 else
1843 req->shadow_reg_v3_len = config->num_shadow_reg_v3_cfg;
1844
1845 plat_priv->num_shadow_regs_v3 = req->shadow_reg_v3_len;
1846
1847 cnss_pr_dbg("Shadow reg v3 len: %d\n",
1848 plat_priv->num_shadow_regs_v3);
1849
1850 memcpy(req->shadow_reg_v3, config->shadow_reg_v3_cfg,
1851 sizeof(struct wlfw_shadow_reg_v3_cfg_s_v01) *
1852 req->shadow_reg_v3_len);
1853 }
1854
1855 if (config->rri_over_ddr_cfg_valid) {
1856 req->rri_over_ddr_cfg_valid = 1;
1857 req->rri_over_ddr_cfg.base_addr_low =
1858 config->rri_over_ddr_cfg.base_addr_low;
1859 req->rri_over_ddr_cfg.base_addr_high =
1860 config->rri_over_ddr_cfg.base_addr_high;
1861 }
1862 if (config->send_msi_ce) {
1863 ret = cnss_bus_get_msi_assignment(plat_priv,
1864 CE_MSI_NAME,
1865 &num_vectors,
1866 &user_base_data,
1867 &base_vector);
1868 if (!ret) {
1869 req->msi_cfg_valid = 1;
1870 req->msi_cfg_len = QMI_WLFW_MAX_NUM_CE_V01;
1871 for (ce_id = 0; ce_id < QMI_WLFW_MAX_NUM_CE_V01;
1872 ce_id++) {
1873 req->msi_cfg[ce_id].ce_id = ce_id;
1874 req->msi_cfg[ce_id].msi_vector =
1875 (ce_id % num_vectors) + base_vector;
1876 }
1877 }
1878 }
1879
1880 ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
1881 wlfw_wlan_cfg_resp_msg_v01_ei, resp);
1882 if (ret < 0) {
1883 cnss_pr_err("Failed to initialize txn for WLAN config request, err: %d\n",
1884 ret);
1885 goto out;
1886 }
1887
1888 ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
1889 QMI_WLFW_WLAN_CFG_REQ_V01,
1890 WLFW_WLAN_CFG_REQ_MSG_V01_MAX_MSG_LEN,
1891 wlfw_wlan_cfg_req_msg_v01_ei, req);
1892 if (ret < 0) {
1893 qmi_txn_cancel(&txn);
1894 cnss_pr_err("Failed to send WLAN config request, err: %d\n",
1895 ret);
1896 goto out;
1897 }
1898
1899 ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
1900 if (ret < 0) {
1901 cnss_pr_err("Failed to wait for response of WLAN config request, err: %d\n",
1902 ret);
1903 goto out;
1904 }
1905
1906 if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
1907 cnss_pr_err("WLAN config request failed, result: %d, err: %d\n",
1908 resp->resp.result, resp->resp.error);
1909 ret = -resp->resp.result;
1910 goto out;
1911 }
1912
1913 kfree(req);
1914 kfree(resp);
1915 return 0;
1916
1917 out:
1918 CNSS_QMI_ASSERT();
1919 kfree(req);
1920 kfree(resp);
1921 return ret;
1922 }
1923
cnss_wlfw_athdiag_read_send_sync(struct cnss_plat_data * plat_priv,u32 offset,u32 mem_type,u32 data_len,u8 * data)1924 int cnss_wlfw_athdiag_read_send_sync(struct cnss_plat_data *plat_priv,
1925 u32 offset, u32 mem_type,
1926 u32 data_len, u8 *data)
1927 {
1928 struct wlfw_athdiag_read_req_msg_v01 *req;
1929 struct wlfw_athdiag_read_resp_msg_v01 *resp;
1930 struct qmi_txn txn;
1931 int ret = 0;
1932
1933 if (!plat_priv)
1934 return -ENODEV;
1935
1936 if (!data || data_len == 0 || data_len > QMI_WLFW_MAX_DATA_SIZE_V01) {
1937 cnss_pr_err("Invalid parameters for athdiag read: data %pK, data_len %u\n",
1938 data, data_len);
1939 return -EINVAL;
1940 }
1941
1942 cnss_pr_dbg("athdiag read: state 0x%lx, offset %x, mem_type %x, data_len %u\n",
1943 plat_priv->driver_state, offset, mem_type, data_len);
1944
1945 req = kzalloc(sizeof(*req), GFP_KERNEL);
1946 if (!req)
1947 return -ENOMEM;
1948
1949 resp = kzalloc(sizeof(*resp), GFP_KERNEL);
1950 if (!resp) {
1951 kfree(req);
1952 return -ENOMEM;
1953 }
1954
1955 req->offset = offset;
1956 req->mem_type = mem_type;
1957 req->data_len = data_len;
1958
1959 ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
1960 wlfw_athdiag_read_resp_msg_v01_ei, resp);
1961 if (ret < 0) {
1962 cnss_pr_err("Failed to initialize txn for athdiag read request, err: %d\n",
1963 ret);
1964 goto out;
1965 }
1966
1967 ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
1968 QMI_WLFW_ATHDIAG_READ_REQ_V01,
1969 WLFW_ATHDIAG_READ_REQ_MSG_V01_MAX_MSG_LEN,
1970 wlfw_athdiag_read_req_msg_v01_ei, req);
1971 if (ret < 0) {
1972 qmi_txn_cancel(&txn);
1973 cnss_pr_err("Failed to send athdiag read request, err: %d\n",
1974 ret);
1975 goto out;
1976 }
1977
1978 ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
1979 if (ret < 0) {
1980 cnss_pr_err("Failed to wait for response of athdiag read request, err: %d\n",
1981 ret);
1982 goto out;
1983 }
1984
1985 if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
1986 cnss_pr_err("Athdiag read request failed, result: %d, err: %d\n",
1987 resp->resp.result, resp->resp.error);
1988 ret = -resp->resp.result;
1989 goto out;
1990 }
1991
1992 if (!resp->data_valid || resp->data_len != data_len) {
1993 cnss_pr_err("athdiag read data is invalid, data_valid = %u, data_len = %u\n",
1994 resp->data_valid, resp->data_len);
1995 ret = -EINVAL;
1996 goto out;
1997 }
1998
1999 memcpy(data, resp->data, resp->data_len);
2000
2001 kfree(req);
2002 kfree(resp);
2003 return 0;
2004
2005 out:
2006 kfree(req);
2007 kfree(resp);
2008 return ret;
2009 }
2010
cnss_wlfw_athdiag_write_send_sync(struct cnss_plat_data * plat_priv,u32 offset,u32 mem_type,u32 data_len,u8 * data)2011 int cnss_wlfw_athdiag_write_send_sync(struct cnss_plat_data *plat_priv,
2012 u32 offset, u32 mem_type,
2013 u32 data_len, u8 *data)
2014 {
2015 struct wlfw_athdiag_write_req_msg_v01 *req;
2016 struct wlfw_athdiag_write_resp_msg_v01 *resp;
2017 struct qmi_txn txn;
2018 int ret = 0;
2019
2020 if (!plat_priv)
2021 return -ENODEV;
2022
2023 if (!data || data_len == 0 || data_len > QMI_WLFW_MAX_DATA_SIZE_V01) {
2024 cnss_pr_err("Invalid parameters for athdiag write: data %pK, data_len %u\n",
2025 data, data_len);
2026 return -EINVAL;
2027 }
2028
2029 cnss_pr_dbg("athdiag write: state 0x%lx, offset %x, mem_type %x, data_len %u, data %pK\n",
2030 plat_priv->driver_state, offset, mem_type, data_len, data);
2031
2032 req = kzalloc(sizeof(*req), GFP_KERNEL);
2033 if (!req)
2034 return -ENOMEM;
2035
2036 resp = kzalloc(sizeof(*resp), GFP_KERNEL);
2037 if (!resp) {
2038 kfree(req);
2039 return -ENOMEM;
2040 }
2041
2042 req->offset = offset;
2043 req->mem_type = mem_type;
2044 req->data_len = data_len;
2045 memcpy(req->data, data, data_len);
2046
2047 ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
2048 wlfw_athdiag_write_resp_msg_v01_ei, resp);
2049 if (ret < 0) {
2050 cnss_pr_err("Failed to initialize txn for athdiag write request, err: %d\n",
2051 ret);
2052 goto out;
2053 }
2054
2055 ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
2056 QMI_WLFW_ATHDIAG_WRITE_REQ_V01,
2057 WLFW_ATHDIAG_WRITE_REQ_MSG_V01_MAX_MSG_LEN,
2058 wlfw_athdiag_write_req_msg_v01_ei, req);
2059 if (ret < 0) {
2060 qmi_txn_cancel(&txn);
2061 cnss_pr_err("Failed to send athdiag write request, err: %d\n",
2062 ret);
2063 goto out;
2064 }
2065
2066 ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
2067 if (ret < 0) {
2068 cnss_pr_err("Failed to wait for response of athdiag write request, err: %d\n",
2069 ret);
2070 goto out;
2071 }
2072
2073 if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
2074 cnss_pr_err("Athdiag write request failed, result: %d, err: %d\n",
2075 resp->resp.result, resp->resp.error);
2076 ret = -resp->resp.result;
2077 goto out;
2078 }
2079
2080 kfree(req);
2081 kfree(resp);
2082 return 0;
2083
2084 out:
2085 kfree(req);
2086 kfree(resp);
2087 return ret;
2088 }
2089
cnss_wlfw_ini_send_sync(struct cnss_plat_data * plat_priv,u8 fw_log_mode)2090 int cnss_wlfw_ini_send_sync(struct cnss_plat_data *plat_priv,
2091 u8 fw_log_mode)
2092 {
2093 struct wlfw_ini_req_msg_v01 *req;
2094 struct wlfw_ini_resp_msg_v01 *resp;
2095 struct qmi_txn txn;
2096 int ret = 0;
2097
2098 if (!plat_priv)
2099 return -ENODEV;
2100
2101 cnss_pr_dbg("Sending ini sync request, state: 0x%lx, fw_log_mode: %d\n",
2102 plat_priv->driver_state, fw_log_mode);
2103
2104 req = kzalloc(sizeof(*req), GFP_KERNEL);
2105 if (!req)
2106 return -ENOMEM;
2107
2108 resp = kzalloc(sizeof(*resp), GFP_KERNEL);
2109 if (!resp) {
2110 kfree(req);
2111 return -ENOMEM;
2112 }
2113
2114 req->enablefwlog_valid = 1;
2115 req->enablefwlog = fw_log_mode;
2116
2117 ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
2118 wlfw_ini_resp_msg_v01_ei, resp);
2119 if (ret < 0) {
2120 cnss_pr_err("Failed to initialize txn for ini request, fw_log_mode: %d, err: %d\n",
2121 fw_log_mode, ret);
2122 goto out;
2123 }
2124
2125 ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
2126 QMI_WLFW_INI_REQ_V01,
2127 WLFW_INI_REQ_MSG_V01_MAX_MSG_LEN,
2128 wlfw_ini_req_msg_v01_ei, req);
2129 if (ret < 0) {
2130 qmi_txn_cancel(&txn);
2131 cnss_pr_err("Failed to send ini request, fw_log_mode: %d, err: %d\n",
2132 fw_log_mode, ret);
2133 goto out;
2134 }
2135
2136 ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
2137 if (ret < 0) {
2138 cnss_pr_err("Failed to wait for response of ini request, fw_log_mode: %d, err: %d\n",
2139 fw_log_mode, ret);
2140 goto out;
2141 }
2142
2143 if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
2144 cnss_pr_err("Ini request failed, fw_log_mode: %d, result: %d, err: %d\n",
2145 fw_log_mode, resp->resp.result, resp->resp.error);
2146 ret = -resp->resp.result;
2147 goto out;
2148 }
2149
2150 kfree(req);
2151 kfree(resp);
2152 return 0;
2153
2154 out:
2155 kfree(req);
2156 kfree(resp);
2157 return ret;
2158 }
2159
cnss_wlfw_send_pcie_gen_speed_sync(struct cnss_plat_data * plat_priv)2160 int cnss_wlfw_send_pcie_gen_speed_sync(struct cnss_plat_data *plat_priv)
2161 {
2162 struct wlfw_pcie_gen_switch_req_msg_v01 req;
2163 struct wlfw_pcie_gen_switch_resp_msg_v01 resp = {0};
2164 struct qmi_txn txn;
2165 int ret = 0;
2166
2167 if (!plat_priv)
2168 return -ENODEV;
2169
2170 if (plat_priv->pcie_gen_speed == QMI_PCIE_GEN_SPEED_INVALID_V01 ||
2171 !plat_priv->fw_pcie_gen_switch) {
2172 cnss_pr_dbg("PCIE Gen speed not setup\n");
2173 return 0;
2174 }
2175
2176 cnss_pr_dbg("Sending PCIE Gen speed: %d state: 0x%lx\n",
2177 plat_priv->pcie_gen_speed, plat_priv->driver_state);
2178 req.pcie_speed = (enum wlfw_pcie_gen_speed_v01)
2179 plat_priv->pcie_gen_speed;
2180
2181 ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
2182 wlfw_pcie_gen_switch_resp_msg_v01_ei, &resp);
2183 if (ret < 0) {
2184 cnss_pr_err("Failed to initialize txn for PCIE speed switch err: %d\n",
2185 ret);
2186 goto out;
2187 }
2188
2189 ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
2190 QMI_WLFW_PCIE_GEN_SWITCH_REQ_V01,
2191 WLFW_PCIE_GEN_SWITCH_REQ_MSG_V01_MAX_MSG_LEN,
2192 wlfw_pcie_gen_switch_req_msg_v01_ei, &req);
2193 if (ret < 0) {
2194 qmi_txn_cancel(&txn);
2195 cnss_pr_err("Failed to send PCIE speed switch, err: %d\n", ret);
2196 goto out;
2197 }
2198
2199 ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
2200 if (ret < 0) {
2201 cnss_pr_err("Failed to wait for PCIE Gen switch resp, err: %d\n",
2202 ret);
2203 goto out;
2204 }
2205
2206 if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
2207 cnss_pr_err("PCIE Gen Switch req failed, Speed: %d, result: %d, err: %d\n",
2208 plat_priv->pcie_gen_speed, resp.resp.result,
2209 resp.resp.error);
2210 ret = -resp.resp.result;
2211 }
2212 out:
2213 /* Reset PCIE Gen speed after one time use */
2214 plat_priv->pcie_gen_speed = QMI_PCIE_GEN_SPEED_INVALID_V01;
2215 return ret;
2216 }
2217
cnss_wlfw_antenna_switch_send_sync(struct cnss_plat_data * plat_priv)2218 int cnss_wlfw_antenna_switch_send_sync(struct cnss_plat_data *plat_priv)
2219 {
2220 struct wlfw_antenna_switch_req_msg_v01 *req;
2221 struct wlfw_antenna_switch_resp_msg_v01 *resp;
2222 struct qmi_txn txn;
2223 int ret = 0;
2224
2225 if (!plat_priv)
2226 return -ENODEV;
2227
2228 cnss_pr_dbg("Sending antenna switch sync request, state: 0x%lx\n",
2229 plat_priv->driver_state);
2230
2231 req = kzalloc(sizeof(*req), GFP_KERNEL);
2232 if (!req)
2233 return -ENOMEM;
2234
2235 resp = kzalloc(sizeof(*resp), GFP_KERNEL);
2236 if (!resp) {
2237 kfree(req);
2238 return -ENOMEM;
2239 }
2240
2241 ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
2242 wlfw_antenna_switch_resp_msg_v01_ei, resp);
2243 if (ret < 0) {
2244 cnss_pr_err("Failed to initialize txn for antenna switch request, err: %d\n",
2245 ret);
2246 goto out;
2247 }
2248
2249 ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
2250 QMI_WLFW_ANTENNA_SWITCH_REQ_V01,
2251 WLFW_ANTENNA_SWITCH_REQ_MSG_V01_MAX_MSG_LEN,
2252 wlfw_antenna_switch_req_msg_v01_ei, req);
2253 if (ret < 0) {
2254 qmi_txn_cancel(&txn);
2255 cnss_pr_err("Failed to send antenna switch request, err: %d\n",
2256 ret);
2257 goto out;
2258 }
2259
2260 ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
2261 if (ret < 0) {
2262 cnss_pr_err("Failed to wait for response of antenna switch request, err: %d\n",
2263 ret);
2264 goto out;
2265 }
2266
2267 if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
2268 cnss_pr_dbg("Antenna switch request failed, result: %d, err: %d\n",
2269 resp->resp.result, resp->resp.error);
2270 ret = -resp->resp.result;
2271 goto out;
2272 }
2273
2274 if (resp->antenna_valid)
2275 plat_priv->antenna = resp->antenna;
2276
2277 cnss_pr_dbg("Antenna valid: %u, antenna 0x%llx\n",
2278 resp->antenna_valid, resp->antenna);
2279
2280 kfree(req);
2281 kfree(resp);
2282 return 0;
2283
2284 out:
2285 kfree(req);
2286 kfree(resp);
2287 return ret;
2288 }
2289
cnss_wlfw_antenna_grant_send_sync(struct cnss_plat_data * plat_priv)2290 int cnss_wlfw_antenna_grant_send_sync(struct cnss_plat_data *plat_priv)
2291 {
2292 struct wlfw_antenna_grant_req_msg_v01 *req;
2293 struct wlfw_antenna_grant_resp_msg_v01 *resp;
2294 struct qmi_txn txn;
2295 int ret = 0;
2296
2297 if (!plat_priv)
2298 return -ENODEV;
2299
2300 cnss_pr_dbg("Sending antenna grant sync request, state: 0x%lx, grant 0x%llx\n",
2301 plat_priv->driver_state, plat_priv->grant);
2302
2303 req = kzalloc(sizeof(*req), GFP_KERNEL);
2304 if (!req)
2305 return -ENOMEM;
2306
2307 resp = kzalloc(sizeof(*resp), GFP_KERNEL);
2308 if (!resp) {
2309 kfree(req);
2310 return -ENOMEM;
2311 }
2312
2313 req->grant_valid = 1;
2314 req->grant = plat_priv->grant;
2315
2316 ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
2317 wlfw_antenna_grant_resp_msg_v01_ei, resp);
2318 if (ret < 0) {
2319 cnss_pr_err("Failed to initialize txn for antenna grant request, err: %d\n",
2320 ret);
2321 goto out;
2322 }
2323
2324 ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
2325 QMI_WLFW_ANTENNA_GRANT_REQ_V01,
2326 WLFW_ANTENNA_GRANT_REQ_MSG_V01_MAX_MSG_LEN,
2327 wlfw_antenna_grant_req_msg_v01_ei, req);
2328 if (ret < 0) {
2329 qmi_txn_cancel(&txn);
2330 cnss_pr_err("Failed to send antenna grant request, err: %d\n",
2331 ret);
2332 goto out;
2333 }
2334
2335 ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
2336 if (ret < 0) {
2337 cnss_pr_err("Failed to wait for response of antenna grant request, err: %d\n",
2338 ret);
2339 goto out;
2340 }
2341
2342 if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
2343 cnss_pr_err("Antenna grant request failed, result: %d, err: %d\n",
2344 resp->resp.result, resp->resp.error);
2345 ret = -resp->resp.result;
2346 goto out;
2347 }
2348
2349 kfree(req);
2350 kfree(resp);
2351 return 0;
2352
2353 out:
2354 kfree(req);
2355 kfree(resp);
2356 return ret;
2357 }
2358
cnss_wlfw_qdss_trace_mem_info_send_sync(struct cnss_plat_data * plat_priv)2359 int cnss_wlfw_qdss_trace_mem_info_send_sync(struct cnss_plat_data *plat_priv)
2360 {
2361 struct wlfw_qdss_trace_mem_info_req_msg_v01 *req;
2362 struct wlfw_qdss_trace_mem_info_resp_msg_v01 *resp;
2363 struct qmi_txn txn;
2364 struct cnss_fw_mem *qdss_mem = plat_priv->qdss_mem;
2365 int ret = 0;
2366 int i;
2367
2368 cnss_pr_dbg("Sending QDSS trace mem info, state: 0x%lx\n",
2369 plat_priv->driver_state);
2370
2371 req = kzalloc(sizeof(*req), GFP_KERNEL);
2372 if (!req)
2373 return -ENOMEM;
2374
2375 resp = kzalloc(sizeof(*resp), GFP_KERNEL);
2376 if (!resp) {
2377 kfree(req);
2378 return -ENOMEM;
2379 }
2380
2381 if (plat_priv->qdss_mem_seg_len > QMI_WLFW_MAX_NUM_MEM_SEG_V01) {
2382 cnss_pr_err("Invalid seg len %u\n", plat_priv->qdss_mem_seg_len);
2383 ret = -EINVAL;
2384 goto out;
2385 }
2386
2387 req->mem_seg_len = plat_priv->qdss_mem_seg_len;
2388 for (i = 0; i < req->mem_seg_len; i++) {
2389 cnss_pr_dbg("Memory for FW, va: 0x%pK, pa: %pa, size: 0x%zx, type: %u\n",
2390 qdss_mem[i].va, &qdss_mem[i].pa,
2391 qdss_mem[i].size, qdss_mem[i].type);
2392
2393 req->mem_seg[i].addr = qdss_mem[i].pa;
2394 req->mem_seg[i].size = qdss_mem[i].size;
2395 req->mem_seg[i].type = qdss_mem[i].type;
2396 }
2397
2398 ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
2399 wlfw_qdss_trace_mem_info_resp_msg_v01_ei, resp);
2400 if (ret < 0) {
2401 cnss_pr_err("Fail to initialize txn for QDSS trace mem request: err %d\n",
2402 ret);
2403 goto out;
2404 }
2405
2406 ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
2407 QMI_WLFW_QDSS_TRACE_MEM_INFO_REQ_V01,
2408 WLFW_QDSS_TRACE_MEM_INFO_REQ_MSG_V01_MAX_MSG_LEN,
2409 wlfw_qdss_trace_mem_info_req_msg_v01_ei, req);
2410 if (ret < 0) {
2411 qmi_txn_cancel(&txn);
2412 cnss_pr_err("Fail to send QDSS trace mem info request: err %d\n",
2413 ret);
2414 goto out;
2415 }
2416
2417 ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
2418 if (ret < 0) {
2419 cnss_pr_err("Fail to wait for response of QDSS trace mem info request, err %d\n",
2420 ret);
2421 goto out;
2422 }
2423
2424 if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
2425 cnss_pr_err("QDSS trace mem info request failed, result: %d, err: %d\n",
2426 resp->resp.result, resp->resp.error);
2427 ret = -resp->resp.result;
2428 goto out;
2429 }
2430
2431 kfree(req);
2432 kfree(resp);
2433 return 0;
2434
2435 out:
2436 kfree(req);
2437 kfree(resp);
2438 return ret;
2439 }
2440
cnss_wlfw_send_host_wfc_call_status(struct cnss_plat_data * plat_priv,struct cnss_wfc_cfg cfg)2441 int cnss_wlfw_send_host_wfc_call_status(struct cnss_plat_data *plat_priv,
2442 struct cnss_wfc_cfg cfg)
2443 {
2444 struct wlfw_wfc_call_status_req_msg_v01 *req;
2445 struct wlfw_wfc_call_status_resp_msg_v01 *resp;
2446 struct qmi_txn txn;
2447 int ret = 0;
2448
2449 if (!test_bit(CNSS_FW_READY, &plat_priv->driver_state)) {
2450 cnss_pr_err("Drop host WFC indication as FW not initialized\n");
2451 return -EINVAL;
2452 }
2453 req = kzalloc(sizeof(*req), GFP_KERNEL);
2454 if (!req)
2455 return -ENOMEM;
2456
2457 resp = kzalloc(sizeof(*resp), GFP_KERNEL);
2458 if (!resp) {
2459 kfree(req);
2460 return -ENOMEM;
2461 }
2462
2463 req->wfc_call_active_valid = 1;
2464 req->wfc_call_active = cfg.mode;
2465
2466 cnss_pr_dbg("CNSS->FW: WFC_CALL_REQ: state: 0x%lx\n",
2467 plat_priv->driver_state);
2468
2469 ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
2470 wlfw_wfc_call_status_resp_msg_v01_ei, resp);
2471 if (ret < 0) {
2472 cnss_pr_err("CNSS->FW: WFC_CALL_REQ: QMI Txn Init: Err %d\n",
2473 ret);
2474 goto out;
2475 }
2476
2477 cnss_pr_dbg("Send WFC Mode: %d\n", cfg.mode);
2478 ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
2479 QMI_WLFW_WFC_CALL_STATUS_REQ_V01,
2480 WLFW_WFC_CALL_STATUS_REQ_MSG_V01_MAX_MSG_LEN,
2481 wlfw_wfc_call_status_req_msg_v01_ei, req);
2482 if (ret < 0) {
2483 qmi_txn_cancel(&txn);
2484 cnss_pr_err("CNSS->FW: WFC_CALL_REQ: QMI Send Err: %d\n",
2485 ret);
2486 goto out;
2487 }
2488
2489 ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
2490 if (ret < 0) {
2491 cnss_pr_err("FW->CNSS: WFC_CALL_RSP: QMI Wait Err: %d\n",
2492 ret);
2493 goto out;
2494 }
2495
2496 if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
2497 cnss_pr_err("FW->CNSS: WFC_CALL_RSP: Result: %d Err: %d\n",
2498 resp->resp.result, resp->resp.error);
2499 ret = -EINVAL;
2500 goto out;
2501 }
2502 ret = 0;
2503 out:
2504 kfree(req);
2505 kfree(resp);
2506 return ret;
2507
2508 }
cnss_wlfw_wfc_call_status_send_sync(struct cnss_plat_data * plat_priv,const struct ims_private_service_wfc_call_status_ind_msg_v01 * ind_msg)2509 static int cnss_wlfw_wfc_call_status_send_sync
2510 (struct cnss_plat_data *plat_priv,
2511 const struct ims_private_service_wfc_call_status_ind_msg_v01 *ind_msg)
2512 {
2513 struct wlfw_wfc_call_status_req_msg_v01 *req;
2514 struct wlfw_wfc_call_status_resp_msg_v01 *resp;
2515 struct qmi_txn txn;
2516 int ret = 0;
2517
2518 if (!test_bit(CNSS_FW_READY, &plat_priv->driver_state)) {
2519 cnss_pr_err("Drop IMS WFC indication as FW not initialized\n");
2520 return -EINVAL;
2521 }
2522 req = kzalloc(sizeof(*req), GFP_KERNEL);
2523 if (!req)
2524 return -ENOMEM;
2525
2526 resp = kzalloc(sizeof(*resp), GFP_KERNEL);
2527 if (!resp) {
2528 kfree(req);
2529 return -ENOMEM;
2530 }
2531
2532 /**
2533 * WFC Call r1 design has CNSS as pass thru using opaque hex buffer.
2534 * But in r2 update QMI structure is expanded and as an effect qmi
2535 * decoded structures have padding. Thus we cannot use buffer design.
2536 * For backward compatibility for r1 design copy only wfc_call_active
2537 * value in hex buffer.
2538 */
2539 req->wfc_call_status_len = sizeof(ind_msg->wfc_call_active);
2540 req->wfc_call_status[0] = ind_msg->wfc_call_active;
2541
2542 /* wfc_call_active is mandatory in IMS indication */
2543 req->wfc_call_active_valid = 1;
2544 req->wfc_call_active = ind_msg->wfc_call_active;
2545 req->all_wfc_calls_held_valid = ind_msg->all_wfc_calls_held_valid;
2546 req->all_wfc_calls_held = ind_msg->all_wfc_calls_held;
2547 req->is_wfc_emergency_valid = ind_msg->is_wfc_emergency_valid;
2548 req->is_wfc_emergency = ind_msg->is_wfc_emergency;
2549 req->twt_ims_start_valid = ind_msg->twt_ims_start_valid;
2550 req->twt_ims_start = ind_msg->twt_ims_start;
2551 req->twt_ims_int_valid = ind_msg->twt_ims_int_valid;
2552 req->twt_ims_int = ind_msg->twt_ims_int;
2553 req->media_quality_valid = ind_msg->media_quality_valid;
2554 req->media_quality =
2555 (enum wlfw_wfc_media_quality_v01)ind_msg->media_quality;
2556
2557 cnss_pr_dbg("CNSS->FW: WFC_CALL_REQ: state: 0x%lx\n",
2558 plat_priv->driver_state);
2559
2560 ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
2561 wlfw_wfc_call_status_resp_msg_v01_ei, resp);
2562 if (ret < 0) {
2563 cnss_pr_err("CNSS->FW: WFC_CALL_REQ: QMI Txn Init: Err %d\n",
2564 ret);
2565 goto out;
2566 }
2567
2568 ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
2569 QMI_WLFW_WFC_CALL_STATUS_REQ_V01,
2570 WLFW_WFC_CALL_STATUS_REQ_MSG_V01_MAX_MSG_LEN,
2571 wlfw_wfc_call_status_req_msg_v01_ei, req);
2572 if (ret < 0) {
2573 qmi_txn_cancel(&txn);
2574 cnss_pr_err("CNSS->FW: WFC_CALL_REQ: QMI Send Err: %d\n",
2575 ret);
2576 goto out;
2577 }
2578
2579 ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
2580 if (ret < 0) {
2581 cnss_pr_err("FW->CNSS: WFC_CALL_RSP: QMI Wait Err: %d\n",
2582 ret);
2583 goto out;
2584 }
2585
2586 if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
2587 cnss_pr_err("FW->CNSS: WFC_CALL_RSP: Result: %d Err: %d\n",
2588 resp->resp.result, resp->resp.error);
2589 ret = -resp->resp.result;
2590 goto out;
2591 }
2592 ret = 0;
2593 out:
2594 kfree(req);
2595 kfree(resp);
2596 return ret;
2597 }
2598
cnss_wlfw_dynamic_feature_mask_send_sync(struct cnss_plat_data * plat_priv)2599 int cnss_wlfw_dynamic_feature_mask_send_sync(struct cnss_plat_data *plat_priv)
2600 {
2601 struct wlfw_dynamic_feature_mask_req_msg_v01 *req;
2602 struct wlfw_dynamic_feature_mask_resp_msg_v01 *resp;
2603 struct qmi_txn txn;
2604 int ret = 0;
2605
2606 cnss_pr_dbg("Sending dynamic feature mask 0x%llx, state: 0x%lx\n",
2607 plat_priv->dynamic_feature,
2608 plat_priv->driver_state);
2609
2610 req = kzalloc(sizeof(*req), GFP_KERNEL);
2611 if (!req)
2612 return -ENOMEM;
2613
2614 resp = kzalloc(sizeof(*resp), GFP_KERNEL);
2615 if (!resp) {
2616 kfree(req);
2617 return -ENOMEM;
2618 }
2619
2620 req->mask_valid = 1;
2621 req->mask = plat_priv->dynamic_feature;
2622
2623 ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
2624 wlfw_dynamic_feature_mask_resp_msg_v01_ei, resp);
2625 if (ret < 0) {
2626 cnss_pr_err("Fail to initialize txn for dynamic feature mask request: err %d\n",
2627 ret);
2628 goto out;
2629 }
2630
2631 ret = qmi_send_request
2632 (&plat_priv->qmi_wlfw, NULL, &txn,
2633 QMI_WLFW_DYNAMIC_FEATURE_MASK_REQ_V01,
2634 WLFW_DYNAMIC_FEATURE_MASK_REQ_MSG_V01_MAX_MSG_LEN,
2635 wlfw_dynamic_feature_mask_req_msg_v01_ei, req);
2636 if (ret < 0) {
2637 qmi_txn_cancel(&txn);
2638 cnss_pr_err("Fail to send dynamic feature mask request: err %d\n",
2639 ret);
2640 goto out;
2641 }
2642
2643 ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
2644 if (ret < 0) {
2645 cnss_pr_err("Fail to wait for response of dynamic feature mask request, err %d\n",
2646 ret);
2647 goto out;
2648 }
2649
2650 if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
2651 cnss_pr_err("Dynamic feature mask request failed, result: %d, err: %d\n",
2652 resp->resp.result, resp->resp.error);
2653 ret = -resp->resp.result;
2654 goto out;
2655 }
2656
2657 out:
2658 kfree(req);
2659 kfree(resp);
2660 return ret;
2661 }
2662
cnss_wlfw_get_info_send_sync(struct cnss_plat_data * plat_priv,int type,void * cmd,int cmd_len)2663 int cnss_wlfw_get_info_send_sync(struct cnss_plat_data *plat_priv, int type,
2664 void *cmd, int cmd_len)
2665 {
2666 struct wlfw_get_info_req_msg_v01 *req;
2667 struct wlfw_get_info_resp_msg_v01 *resp;
2668 struct qmi_txn txn;
2669 int ret = 0;
2670
2671 cnss_pr_buf("Sending get info message, type: %d, cmd length: %d, state: 0x%lx\n",
2672 type, cmd_len, plat_priv->driver_state);
2673
2674 if (cmd_len > QMI_WLFW_MAX_DATA_SIZE_V01)
2675 return -EINVAL;
2676
2677 req = kzalloc(sizeof(*req), GFP_KERNEL);
2678 if (!req)
2679 return -ENOMEM;
2680
2681 resp = kzalloc(sizeof(*resp), GFP_KERNEL);
2682 if (!resp) {
2683 kfree(req);
2684 return -ENOMEM;
2685 }
2686
2687 req->type = type;
2688 req->data_len = cmd_len;
2689 memcpy(req->data, cmd, req->data_len);
2690
2691 ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
2692 wlfw_get_info_resp_msg_v01_ei, resp);
2693 if (ret < 0) {
2694 cnss_pr_err("Failed to initialize txn for get info request, err: %d\n",
2695 ret);
2696 goto out;
2697 }
2698
2699 ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
2700 QMI_WLFW_GET_INFO_REQ_V01,
2701 WLFW_GET_INFO_REQ_MSG_V01_MAX_MSG_LEN,
2702 wlfw_get_info_req_msg_v01_ei, req);
2703 if (ret < 0) {
2704 qmi_txn_cancel(&txn);
2705 cnss_pr_err("Failed to send get info request, err: %d\n",
2706 ret);
2707 goto out;
2708 }
2709
2710 ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
2711 if (ret < 0) {
2712 cnss_pr_err("Failed to wait for response of get info request, err: %d\n",
2713 ret);
2714 goto out;
2715 }
2716
2717 if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
2718 cnss_pr_err("Get info request failed, result: %d, err: %d\n",
2719 resp->resp.result, resp->resp.error);
2720 ret = -resp->resp.result;
2721 goto out;
2722 }
2723
2724 kfree(req);
2725 kfree(resp);
2726 return 0;
2727
2728 out:
2729 kfree(req);
2730 kfree(resp);
2731 return ret;
2732 }
2733
cnss_get_qmi_timeout(struct cnss_plat_data * plat_priv)2734 unsigned int cnss_get_qmi_timeout(struct cnss_plat_data *plat_priv)
2735 {
2736 return QMI_WLFW_TIMEOUT_MS;
2737 }
2738
cnss_wlfw_request_mem_ind_cb(struct qmi_handle * qmi_wlfw,struct sockaddr_qrtr * sq,struct qmi_txn * txn,const void * data)2739 static void cnss_wlfw_request_mem_ind_cb(struct qmi_handle *qmi_wlfw,
2740 struct sockaddr_qrtr *sq,
2741 struct qmi_txn *txn, const void *data)
2742 {
2743 struct cnss_plat_data *plat_priv =
2744 container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw);
2745 const struct wlfw_request_mem_ind_msg_v01 *ind_msg = data;
2746 int i;
2747
2748 cnss_pr_dbg("Received QMI WLFW request memory indication\n");
2749
2750 if (!txn) {
2751 cnss_pr_err("Spurious indication\n");
2752 return;
2753 }
2754
2755 if (ind_msg->mem_seg_len > QMI_WLFW_MAX_NUM_MEM_SEG_V01) {
2756 cnss_pr_err("Invalid seg len %u\n", ind_msg->mem_seg_len);
2757 return;
2758 }
2759
2760 plat_priv->fw_mem_seg_len = ind_msg->mem_seg_len;
2761 for (i = 0; i < plat_priv->fw_mem_seg_len; i++) {
2762 cnss_pr_dbg("FW requests for memory, size: 0x%x, type: %u\n",
2763 ind_msg->mem_seg[i].size, ind_msg->mem_seg[i].type);
2764 plat_priv->fw_mem[i].type = ind_msg->mem_seg[i].type;
2765 plat_priv->fw_mem[i].size = ind_msg->mem_seg[i].size;
2766 if (!plat_priv->fw_mem[i].va &&
2767 plat_priv->fw_mem[i].type == CNSS_MEM_TYPE_DDR)
2768 plat_priv->fw_mem[i].attrs |=
2769 DMA_ATTR_FORCE_CONTIGUOUS;
2770 if (plat_priv->fw_mem[i].type == CNSS_MEM_CAL_V01)
2771 plat_priv->cal_mem = &plat_priv->fw_mem[i];
2772 }
2773
2774 cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_REQUEST_MEM,
2775 0, NULL);
2776 }
2777
cnss_wlfw_fw_mem_ready_ind_cb(struct qmi_handle * qmi_wlfw,struct sockaddr_qrtr * sq,struct qmi_txn * txn,const void * data)2778 static void cnss_wlfw_fw_mem_ready_ind_cb(struct qmi_handle *qmi_wlfw,
2779 struct sockaddr_qrtr *sq,
2780 struct qmi_txn *txn, const void *data)
2781 {
2782 struct cnss_plat_data *plat_priv =
2783 container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw);
2784
2785 cnss_pr_dbg("Received QMI WLFW FW memory ready indication\n");
2786
2787 if (!txn) {
2788 cnss_pr_err("Spurious indication\n");
2789 return;
2790 }
2791
2792 cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_FW_MEM_READY,
2793 0, NULL);
2794 }
2795
2796 /**
2797 * cnss_wlfw_fw_ready_ind_cb: FW ready indication handler (Helium arch)
2798 *
2799 * This event is not required for HST/ HSP as FW calibration done is
2800 * provided in QMI_WLFW_CAL_DONE_IND_V01
2801 */
cnss_wlfw_fw_ready_ind_cb(struct qmi_handle * qmi_wlfw,struct sockaddr_qrtr * sq,struct qmi_txn * txn,const void * data)2802 static void cnss_wlfw_fw_ready_ind_cb(struct qmi_handle *qmi_wlfw,
2803 struct sockaddr_qrtr *sq,
2804 struct qmi_txn *txn, const void *data)
2805 {
2806 struct cnss_plat_data *plat_priv =
2807 container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw);
2808 struct cnss_cal_info *cal_info;
2809
2810 if (!txn) {
2811 cnss_pr_err("Spurious indication\n");
2812 return;
2813 }
2814
2815 if (plat_priv->device_id == QCA6390_DEVICE_ID ||
2816 plat_priv->device_id == QCA6490_DEVICE_ID) {
2817 cnss_pr_dbg("Ignore FW Ready Indication for HST/HSP");
2818 return;
2819 }
2820
2821 cnss_pr_dbg("Received QMI WLFW FW ready indication.\n");
2822 cal_info = kzalloc(sizeof(*cal_info), GFP_KERNEL);
2823 if (!cal_info)
2824 return;
2825
2826 cal_info->cal_status = CNSS_CAL_DONE;
2827 cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_COLD_BOOT_CAL_DONE,
2828 0, cal_info);
2829 }
2830
cnss_wlfw_fw_init_done_ind_cb(struct qmi_handle * qmi_wlfw,struct sockaddr_qrtr * sq,struct qmi_txn * txn,const void * data)2831 static void cnss_wlfw_fw_init_done_ind_cb(struct qmi_handle *qmi_wlfw,
2832 struct sockaddr_qrtr *sq,
2833 struct qmi_txn *txn, const void *data)
2834 {
2835 struct cnss_plat_data *plat_priv =
2836 container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw);
2837
2838 cnss_pr_dbg("Received QMI WLFW FW initialization done indication\n");
2839
2840 if (!txn) {
2841 cnss_pr_err("Spurious indication\n");
2842 return;
2843 }
2844
2845 cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_FW_READY, 0, NULL);
2846 }
2847
cnss_wlfw_pin_result_ind_cb(struct qmi_handle * qmi_wlfw,struct sockaddr_qrtr * sq,struct qmi_txn * txn,const void * data)2848 static void cnss_wlfw_pin_result_ind_cb(struct qmi_handle *qmi_wlfw,
2849 struct sockaddr_qrtr *sq,
2850 struct qmi_txn *txn, const void *data)
2851 {
2852 struct cnss_plat_data *plat_priv =
2853 container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw);
2854 const struct wlfw_pin_connect_result_ind_msg_v01 *ind_msg = data;
2855
2856 cnss_pr_dbg("Received QMI WLFW pin connect result indication\n");
2857
2858 if (!txn) {
2859 cnss_pr_err("Spurious indication\n");
2860 return;
2861 }
2862
2863 if (ind_msg->pwr_pin_result_valid)
2864 plat_priv->pin_result.fw_pwr_pin_result =
2865 ind_msg->pwr_pin_result;
2866 if (ind_msg->phy_io_pin_result_valid)
2867 plat_priv->pin_result.fw_phy_io_pin_result =
2868 ind_msg->phy_io_pin_result;
2869 if (ind_msg->rf_pin_result_valid)
2870 plat_priv->pin_result.fw_rf_pin_result = ind_msg->rf_pin_result;
2871
2872 cnss_pr_dbg("Pin connect Result: pwr_pin: 0x%x phy_io_pin: 0x%x rf_io_pin: 0x%x\n",
2873 ind_msg->pwr_pin_result, ind_msg->phy_io_pin_result,
2874 ind_msg->rf_pin_result);
2875 }
2876
cnss_wlfw_cal_report_req_send_sync(struct cnss_plat_data * plat_priv,u32 cal_file_download_size)2877 int cnss_wlfw_cal_report_req_send_sync(struct cnss_plat_data *plat_priv,
2878 u32 cal_file_download_size)
2879 {
2880 struct wlfw_cal_report_req_msg_v01 req = {0};
2881 struct wlfw_cal_report_resp_msg_v01 resp = {0};
2882 struct qmi_txn txn;
2883 int ret = 0;
2884
2885 cnss_pr_dbg("Sending cal file report request. File size: %d, state: 0x%lx\n",
2886 cal_file_download_size, plat_priv->driver_state);
2887 req.cal_file_download_size_valid = 1;
2888 req.cal_file_download_size = cal_file_download_size;
2889
2890 ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
2891 wlfw_cal_report_resp_msg_v01_ei, &resp);
2892 if (ret < 0) {
2893 cnss_pr_err("Failed to initialize txn for Cal Report request, err: %d\n",
2894 ret);
2895 goto out;
2896 }
2897 ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
2898 QMI_WLFW_CAL_REPORT_REQ_V01,
2899 WLFW_CAL_REPORT_REQ_MSG_V01_MAX_MSG_LEN,
2900 wlfw_cal_report_req_msg_v01_ei, &req);
2901 if (ret < 0) {
2902 qmi_txn_cancel(&txn);
2903 cnss_pr_err("Failed to send Cal Report request, err: %d\n",
2904 ret);
2905 goto out;
2906 }
2907 ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
2908 if (ret < 0) {
2909 cnss_pr_err("Failed to wait for response of Cal Report request, err: %d\n",
2910 ret);
2911 goto out;
2912 }
2913 if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
2914 cnss_pr_err("Cal Report request failed, result: %d, err: %d\n",
2915 resp.resp.result, resp.resp.error);
2916 ret = -resp.resp.result;
2917 goto out;
2918 }
2919 out:
2920 return ret;
2921 }
2922
cnss_wlfw_cal_done_ind_cb(struct qmi_handle * qmi_wlfw,struct sockaddr_qrtr * sq,struct qmi_txn * txn,const void * data)2923 static void cnss_wlfw_cal_done_ind_cb(struct qmi_handle *qmi_wlfw,
2924 struct sockaddr_qrtr *sq,
2925 struct qmi_txn *txn, const void *data)
2926 {
2927 struct cnss_plat_data *plat_priv =
2928 container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw);
2929 const struct wlfw_cal_done_ind_msg_v01 *ind = data;
2930 struct cnss_cal_info *cal_info;
2931
2932 cnss_pr_dbg("Received Cal done indication. File size: %d\n",
2933 ind->cal_file_upload_size);
2934 cnss_pr_info("Calibration took %d ms\n",
2935 jiffies_to_msecs(jiffies - plat_priv->cal_time));
2936 if (!txn) {
2937 cnss_pr_err("Spurious indication\n");
2938 return;
2939 }
2940 if (ind->cal_file_upload_size_valid)
2941 plat_priv->cal_file_size = ind->cal_file_upload_size;
2942 cal_info = kzalloc(sizeof(*cal_info), GFP_KERNEL);
2943 if (!cal_info)
2944 return;
2945
2946 cal_info->cal_status = CNSS_CAL_DONE;
2947 cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_COLD_BOOT_CAL_DONE,
2948 0, cal_info);
2949 }
2950
cnss_wlfw_qdss_trace_req_mem_ind_cb(struct qmi_handle * qmi_wlfw,struct sockaddr_qrtr * sq,struct qmi_txn * txn,const void * data)2951 static void cnss_wlfw_qdss_trace_req_mem_ind_cb(struct qmi_handle *qmi_wlfw,
2952 struct sockaddr_qrtr *sq,
2953 struct qmi_txn *txn,
2954 const void *data)
2955 {
2956 struct cnss_plat_data *plat_priv =
2957 container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw);
2958 const struct wlfw_qdss_trace_req_mem_ind_msg_v01 *ind_msg = data;
2959 int i;
2960
2961 cnss_pr_dbg("Received QMI WLFW QDSS trace request mem indication\n");
2962
2963 if (!txn) {
2964 cnss_pr_err("Spurious indication\n");
2965 return;
2966 }
2967
2968 if (plat_priv->qdss_mem_seg_len) {
2969 cnss_pr_err("Ignore double allocation for QDSS trace, current len %u\n",
2970 plat_priv->qdss_mem_seg_len);
2971 return;
2972 }
2973
2974 if (ind_msg->mem_seg_len > QMI_WLFW_MAX_NUM_MEM_SEG_V01) {
2975 cnss_pr_err("Invalid seg len %u\n", ind_msg->mem_seg_len);
2976 return;
2977 }
2978
2979 plat_priv->qdss_mem_seg_len = ind_msg->mem_seg_len;
2980 for (i = 0; i < plat_priv->qdss_mem_seg_len; i++) {
2981 cnss_pr_dbg("QDSS requests for memory, size: 0x%x, type: %u\n",
2982 ind_msg->mem_seg[i].size, ind_msg->mem_seg[i].type);
2983 plat_priv->qdss_mem[i].type = ind_msg->mem_seg[i].type;
2984 plat_priv->qdss_mem[i].size = ind_msg->mem_seg[i].size;
2985 }
2986
2987 cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_QDSS_TRACE_REQ_MEM,
2988 0, NULL);
2989 }
2990
2991 /**
2992 * cnss_wlfw_fw_mem_file_save_ind_cb: Save given FW mem to filesystem
2993 *
2994 * QDSS_TRACE_SAVE_IND feature is overloaded to provide any host allocated
2995 * fw memory segment for dumping to file system. Only one type of mem can be
2996 * saved per indication and is provided in mem seg index 0.
2997 *
2998 * Return: None
2999 */
cnss_wlfw_fw_mem_file_save_ind_cb(struct qmi_handle * qmi_wlfw,struct sockaddr_qrtr * sq,struct qmi_txn * txn,const void * data)3000 static void cnss_wlfw_fw_mem_file_save_ind_cb(struct qmi_handle *qmi_wlfw,
3001 struct sockaddr_qrtr *sq,
3002 struct qmi_txn *txn,
3003 const void *data)
3004 {
3005 struct cnss_plat_data *plat_priv =
3006 container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw);
3007 const struct wlfw_qdss_trace_save_ind_msg_v01 *ind_msg = data;
3008 struct cnss_qmi_event_fw_mem_file_save_data *event_data;
3009 int i = 0;
3010
3011 if (!txn || !data) {
3012 cnss_pr_err("Spurious indication\n");
3013 return;
3014 }
3015 cnss_pr_dbg_buf("QMI fw_mem_file_save: source: %d mem_seg: %d type: %u len: %u\n",
3016 ind_msg->source, ind_msg->mem_seg_valid,
3017 ind_msg->mem_seg[0].type, ind_msg->mem_seg_len);
3018
3019 event_data = kzalloc(sizeof(*event_data), GFP_KERNEL);
3020 if (!event_data)
3021 return;
3022
3023 event_data->mem_type = ind_msg->mem_seg[0].type;
3024 event_data->mem_seg_len = ind_msg->mem_seg_len;
3025 event_data->total_size = ind_msg->total_size;
3026
3027 if (ind_msg->mem_seg_valid) {
3028 if (ind_msg->mem_seg_len > QMI_WLFW_MAX_STR_LEN_V01) {
3029 cnss_pr_err("Invalid seg len indication\n");
3030 goto free_event_data;
3031 }
3032 for (i = 0; i < ind_msg->mem_seg_len; i++) {
3033 event_data->mem_seg[i].addr = ind_msg->mem_seg[i].addr;
3034 event_data->mem_seg[i].size = ind_msg->mem_seg[i].size;
3035 if (event_data->mem_type != ind_msg->mem_seg[i].type) {
3036 cnss_pr_err("FW Mem file save ind cannot have multiple mem types\n");
3037 goto free_event_data;
3038 }
3039 cnss_pr_dbg_buf("seg-%d: addr 0x%llx size 0x%x\n",
3040 i, ind_msg->mem_seg[i].addr,
3041 ind_msg->mem_seg[i].size);
3042 }
3043 }
3044
3045 if (ind_msg->file_name_valid)
3046 strlcpy(event_data->file_name, ind_msg->file_name,
3047 QMI_WLFW_MAX_STR_LEN_V01 + 1);
3048 if (ind_msg->source == 1) {
3049 if (!ind_msg->file_name_valid)
3050 strlcpy(event_data->file_name, "qdss_trace_wcss_etb",
3051 QMI_WLFW_MAX_STR_LEN_V01 + 1);
3052 cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_QDSS_TRACE_REQ_DATA,
3053 0, event_data);
3054 } else {
3055 if (event_data->mem_type == QMI_WLFW_MEM_QDSS_V01) {
3056 if (!ind_msg->file_name_valid)
3057 strlcpy(event_data->file_name, "qdss_trace_ddr",
3058 QMI_WLFW_MAX_STR_LEN_V01 + 1);
3059 } else {
3060 if (!ind_msg->file_name_valid)
3061 strlcpy(event_data->file_name, "fw_mem_dump",
3062 QMI_WLFW_MAX_STR_LEN_V01 + 1);
3063 }
3064
3065 cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_FW_MEM_FILE_SAVE,
3066 0, event_data);
3067 }
3068
3069 return;
3070
3071 free_event_data:
3072 kfree(event_data);
3073 }
3074
cnss_wlfw_qdss_trace_free_ind_cb(struct qmi_handle * qmi_wlfw,struct sockaddr_qrtr * sq,struct qmi_txn * txn,const void * data)3075 static void cnss_wlfw_qdss_trace_free_ind_cb(struct qmi_handle *qmi_wlfw,
3076 struct sockaddr_qrtr *sq,
3077 struct qmi_txn *txn,
3078 const void *data)
3079 {
3080 struct cnss_plat_data *plat_priv =
3081 container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw);
3082
3083 cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_QDSS_TRACE_FREE,
3084 0, NULL);
3085 }
3086
cnss_wlfw_respond_get_info_ind_cb(struct qmi_handle * qmi_wlfw,struct sockaddr_qrtr * sq,struct qmi_txn * txn,const void * data)3087 static void cnss_wlfw_respond_get_info_ind_cb(struct qmi_handle *qmi_wlfw,
3088 struct sockaddr_qrtr *sq,
3089 struct qmi_txn *txn,
3090 const void *data)
3091 {
3092 struct cnss_plat_data *plat_priv =
3093 container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw);
3094 const struct wlfw_respond_get_info_ind_msg_v01 *ind_msg = data;
3095
3096 cnss_pr_buf("Received QMI WLFW respond get info indication\n");
3097
3098 if (!txn) {
3099 cnss_pr_err("Spurious indication\n");
3100 return;
3101 }
3102
3103 cnss_pr_buf("Extract message with event length: %d, type: %d, is last: %d, seq no: %d\n",
3104 ind_msg->data_len, ind_msg->type,
3105 ind_msg->is_last, ind_msg->seq_no);
3106
3107 if (plat_priv->get_info_cb_ctx && plat_priv->get_info_cb)
3108 plat_priv->get_info_cb(plat_priv->get_info_cb_ctx,
3109 (void *)ind_msg->data,
3110 ind_msg->data_len);
3111 }
3112
cnss_wlfw_driver_async_data_ind_cb(struct qmi_handle * qmi_wlfw,struct sockaddr_qrtr * sq,struct qmi_txn * txn,const void * data)3113 static void cnss_wlfw_driver_async_data_ind_cb(struct qmi_handle *qmi_wlfw,
3114 struct sockaddr_qrtr *sq,
3115 struct qmi_txn *txn,
3116 const void *data)
3117 {
3118 struct cnss_plat_data *plat_priv =
3119 container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw);
3120 const struct wlfw_driver_async_data_ind_msg_v01 *ind_msg = data;
3121
3122 cnss_pr_buf("Received QMI WLFW driver async data indication\n");
3123
3124 if (!txn) {
3125 cnss_pr_err("Spurious indication\n");
3126 return;
3127 }
3128
3129 cnss_pr_buf("Extract message with event length: %d, type: %d\n",
3130 ind_msg->data_len, ind_msg->type);
3131
3132 if (plat_priv->get_driver_async_data_ctx &&
3133 plat_priv->get_driver_async_data_cb)
3134 plat_priv->get_driver_async_data_cb(
3135 plat_priv->get_driver_async_data_ctx, ind_msg->type,
3136 (void *)ind_msg->data, ind_msg->data_len);
3137 }
3138
3139
cnss_ims_wfc_call_twt_cfg_send_sync(struct cnss_plat_data * plat_priv,const struct wlfw_wfc_call_twt_config_ind_msg_v01 * ind_msg)3140 static int cnss_ims_wfc_call_twt_cfg_send_sync
3141 (struct cnss_plat_data *plat_priv,
3142 const struct wlfw_wfc_call_twt_config_ind_msg_v01 *ind_msg)
3143 {
3144 struct ims_private_service_wfc_call_twt_config_req_msg_v01 *req;
3145 struct ims_private_service_wfc_call_twt_config_rsp_msg_v01 *resp;
3146 struct qmi_txn txn;
3147 int ret = 0;
3148
3149 if (!test_bit(CNSS_IMS_CONNECTED, &plat_priv->driver_state)) {
3150 cnss_pr_err("Drop FW WFC indication as IMS QMI not connected\n");
3151 return -EINVAL;
3152 }
3153
3154 req = kzalloc(sizeof(*req), GFP_KERNEL);
3155 if (!req)
3156 return -ENOMEM;
3157
3158 resp = kzalloc(sizeof(*resp), GFP_KERNEL);
3159 if (!resp) {
3160 kfree(req);
3161 return -ENOMEM;
3162 }
3163
3164 req->twt_sta_start_valid = ind_msg->twt_sta_start_valid;
3165 req->twt_sta_start = ind_msg->twt_sta_start;
3166 req->twt_sta_int_valid = ind_msg->twt_sta_int_valid;
3167 req->twt_sta_int = ind_msg->twt_sta_int;
3168 req->twt_sta_upo_valid = ind_msg->twt_sta_upo_valid;
3169 req->twt_sta_upo = ind_msg->twt_sta_upo;
3170 req->twt_sta_sp_valid = ind_msg->twt_sta_sp_valid;
3171 req->twt_sta_sp = ind_msg->twt_sta_sp;
3172 req->twt_sta_dl_valid = req->twt_sta_dl_valid;
3173 req->twt_sta_dl = req->twt_sta_dl;
3174 req->twt_sta_config_changed_valid =
3175 ind_msg->twt_sta_config_changed_valid;
3176 req->twt_sta_config_changed = ind_msg->twt_sta_config_changed;
3177
3178 cnss_pr_dbg("CNSS->IMS: TWT_CFG_REQ: state: 0x%lx\n",
3179 plat_priv->driver_state);
3180
3181 ret =
3182 qmi_txn_init(&plat_priv->ims_qmi, &txn,
3183 ims_private_service_wfc_call_twt_config_rsp_msg_v01_ei,
3184 resp);
3185 if (ret < 0) {
3186 cnss_pr_err("CNSS->IMS: TWT_CFG_REQ: QMI Txn Init Err: %d\n",
3187 ret);
3188 goto out;
3189 }
3190
3191 ret =
3192 qmi_send_request(&plat_priv->ims_qmi, NULL, &txn,
3193 QMI_IMS_PRIVATE_SERVICE_WFC_CALL_TWT_CONFIG_REQ_V01,
3194 IMS_PRIVATE_SERVICE_WFC_CALL_TWT_CONFIG_REQ_MSG_V01_MAX_MSG_LEN,
3195 ims_private_service_wfc_call_twt_config_req_msg_v01_ei, req);
3196 if (ret < 0) {
3197 qmi_txn_cancel(&txn);
3198 cnss_pr_err("CNSS->IMS: TWT_CFG_REQ: QMI Send Err: %d\n", ret);
3199 goto out;
3200 }
3201
3202 ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
3203 if (ret < 0) {
3204 cnss_pr_err("IMS->CNSS: TWT_CFG_RSP: QMI Wait Err: %d\n", ret);
3205 goto out;
3206 }
3207
3208 if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
3209 cnss_pr_err("IMS->CNSS: TWT_CFG_RSP: Result: %d Err: %d\n",
3210 resp->resp.result, resp->resp.error);
3211 ret = -resp->resp.result;
3212 goto out;
3213 }
3214 ret = 0;
3215 out:
3216 kfree(req);
3217 kfree(resp);
3218 return ret;
3219 }
3220
cnss_process_twt_cfg_ind_event(struct cnss_plat_data * plat_priv,void * data)3221 int cnss_process_twt_cfg_ind_event(struct cnss_plat_data *plat_priv,
3222 void *data)
3223 {
3224 int ret;
3225 struct wlfw_wfc_call_twt_config_ind_msg_v01 *ind_msg = data;
3226
3227 ret = cnss_ims_wfc_call_twt_cfg_send_sync(plat_priv, ind_msg);
3228 kfree(data);
3229 return ret;
3230 }
3231
cnss_wlfw_process_twt_cfg_ind(struct qmi_handle * qmi_wlfw,struct sockaddr_qrtr * sq,struct qmi_txn * txn,const void * data)3232 static void cnss_wlfw_process_twt_cfg_ind(struct qmi_handle *qmi_wlfw,
3233 struct sockaddr_qrtr *sq,
3234 struct qmi_txn *txn,
3235 const void *data)
3236 {
3237 struct cnss_plat_data *plat_priv =
3238 container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw);
3239 const struct wlfw_wfc_call_twt_config_ind_msg_v01 *ind_msg = data;
3240 struct wlfw_wfc_call_twt_config_ind_msg_v01 *event_data;
3241
3242 if (!txn) {
3243 cnss_pr_err("FW->CNSS: TWT_CFG_IND: Spurious indication\n");
3244 return;
3245 }
3246
3247 if (!ind_msg) {
3248 cnss_pr_err("FW->CNSS: TWT_CFG_IND: Invalid indication\n");
3249 return;
3250 }
3251 cnss_pr_dbg("FW->CNSS: TWT_CFG_IND: %x %llx, %x %x, %x %x, %x %x, %x %x, %x %x\n",
3252 ind_msg->twt_sta_start_valid, ind_msg->twt_sta_start,
3253 ind_msg->twt_sta_int_valid, ind_msg->twt_sta_int,
3254 ind_msg->twt_sta_upo_valid, ind_msg->twt_sta_upo,
3255 ind_msg->twt_sta_sp_valid, ind_msg->twt_sta_sp,
3256 ind_msg->twt_sta_dl_valid, ind_msg->twt_sta_dl,
3257 ind_msg->twt_sta_config_changed_valid,
3258 ind_msg->twt_sta_config_changed);
3259
3260 event_data = kmemdup(ind_msg, sizeof(*event_data), GFP_KERNEL);
3261 if (!event_data)
3262 return;
3263 cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_WLFW_TWT_CFG_IND, 0,
3264 event_data);
3265 }
3266
3267 static struct qmi_msg_handler qmi_wlfw_msg_handlers[] = {
3268 {
3269 .type = QMI_INDICATION,
3270 .msg_id = QMI_WLFW_REQUEST_MEM_IND_V01,
3271 .ei = wlfw_request_mem_ind_msg_v01_ei,
3272 .decoded_size = sizeof(struct wlfw_request_mem_ind_msg_v01),
3273 .fn = cnss_wlfw_request_mem_ind_cb
3274 },
3275 {
3276 .type = QMI_INDICATION,
3277 .msg_id = QMI_WLFW_FW_MEM_READY_IND_V01,
3278 .ei = wlfw_fw_mem_ready_ind_msg_v01_ei,
3279 .decoded_size = sizeof(struct wlfw_fw_mem_ready_ind_msg_v01),
3280 .fn = cnss_wlfw_fw_mem_ready_ind_cb
3281 },
3282 {
3283 .type = QMI_INDICATION,
3284 .msg_id = QMI_WLFW_FW_READY_IND_V01,
3285 .ei = wlfw_fw_ready_ind_msg_v01_ei,
3286 .decoded_size = sizeof(struct wlfw_fw_ready_ind_msg_v01),
3287 .fn = cnss_wlfw_fw_ready_ind_cb
3288 },
3289 {
3290 .type = QMI_INDICATION,
3291 .msg_id = QMI_WLFW_FW_INIT_DONE_IND_V01,
3292 .ei = wlfw_fw_init_done_ind_msg_v01_ei,
3293 .decoded_size = sizeof(struct wlfw_fw_init_done_ind_msg_v01),
3294 .fn = cnss_wlfw_fw_init_done_ind_cb
3295 },
3296 {
3297 .type = QMI_INDICATION,
3298 .msg_id = QMI_WLFW_PIN_CONNECT_RESULT_IND_V01,
3299 .ei = wlfw_pin_connect_result_ind_msg_v01_ei,
3300 .decoded_size =
3301 sizeof(struct wlfw_pin_connect_result_ind_msg_v01),
3302 .fn = cnss_wlfw_pin_result_ind_cb
3303 },
3304 {
3305 .type = QMI_INDICATION,
3306 .msg_id = QMI_WLFW_CAL_DONE_IND_V01,
3307 .ei = wlfw_cal_done_ind_msg_v01_ei,
3308 .decoded_size = sizeof(struct wlfw_cal_done_ind_msg_v01),
3309 .fn = cnss_wlfw_cal_done_ind_cb
3310 },
3311 {
3312 .type = QMI_INDICATION,
3313 .msg_id = QMI_WLFW_QDSS_TRACE_REQ_MEM_IND_V01,
3314 .ei = wlfw_qdss_trace_req_mem_ind_msg_v01_ei,
3315 .decoded_size =
3316 sizeof(struct wlfw_qdss_trace_req_mem_ind_msg_v01),
3317 .fn = cnss_wlfw_qdss_trace_req_mem_ind_cb
3318 },
3319 {
3320 .type = QMI_INDICATION,
3321 .msg_id = QMI_WLFW_QDSS_TRACE_SAVE_IND_V01,
3322 .ei = wlfw_qdss_trace_save_ind_msg_v01_ei,
3323 .decoded_size =
3324 sizeof(struct wlfw_qdss_trace_save_ind_msg_v01),
3325 .fn = cnss_wlfw_fw_mem_file_save_ind_cb
3326 },
3327 {
3328 .type = QMI_INDICATION,
3329 .msg_id = QMI_WLFW_QDSS_TRACE_FREE_IND_V01,
3330 .ei = wlfw_qdss_trace_free_ind_msg_v01_ei,
3331 .decoded_size =
3332 sizeof(struct wlfw_qdss_trace_free_ind_msg_v01),
3333 .fn = cnss_wlfw_qdss_trace_free_ind_cb
3334 },
3335 {
3336 .type = QMI_INDICATION,
3337 .msg_id = QMI_WLFW_RESPOND_GET_INFO_IND_V01,
3338 .ei = wlfw_respond_get_info_ind_msg_v01_ei,
3339 .decoded_size =
3340 sizeof(struct wlfw_respond_get_info_ind_msg_v01),
3341 .fn = cnss_wlfw_respond_get_info_ind_cb
3342 },
3343 {
3344 .type = QMI_INDICATION,
3345 .msg_id = QMI_WLFW_WFC_CALL_TWT_CONFIG_IND_V01,
3346 .ei = wlfw_wfc_call_twt_config_ind_msg_v01_ei,
3347 .decoded_size =
3348 sizeof(struct wlfw_wfc_call_twt_config_ind_msg_v01),
3349 .fn = cnss_wlfw_process_twt_cfg_ind
3350 },
3351 {
3352 .type = QMI_INDICATION,
3353 .msg_id = QMI_WLFW_DRIVER_ASYNC_DATA_IND_V01,
3354 .ei = wlfw_driver_async_data_ind_msg_v01_ei,
3355 .decoded_size =
3356 sizeof(struct wlfw_driver_async_data_ind_msg_v01),
3357 .fn = cnss_wlfw_driver_async_data_ind_cb
3358 },
3359 {}
3360 };
3361
cnss_wlfw_connect_to_server(struct cnss_plat_data * plat_priv,void * data)3362 static int cnss_wlfw_connect_to_server(struct cnss_plat_data *plat_priv,
3363 void *data)
3364 {
3365 struct cnss_qmi_event_server_arrive_data *event_data = data;
3366 struct qmi_handle *qmi_wlfw = &plat_priv->qmi_wlfw;
3367 struct sockaddr_qrtr sq = { 0 };
3368 int ret = 0;
3369
3370 if (!event_data)
3371 return -EINVAL;
3372
3373 sq.sq_family = AF_QIPCRTR;
3374 sq.sq_node = event_data->node;
3375 sq.sq_port = event_data->port;
3376
3377 ret = kernel_connect(qmi_wlfw->sock, (struct sockaddr *)&sq,
3378 sizeof(sq), 0);
3379 if (ret < 0) {
3380 cnss_pr_err("Failed to connect to QMI WLFW remote service port\n");
3381 goto out;
3382 }
3383
3384 set_bit(CNSS_QMI_WLFW_CONNECTED, &plat_priv->driver_state);
3385
3386 cnss_pr_info("QMI WLFW service connected, state: 0x%lx\n",
3387 plat_priv->driver_state);
3388
3389 kfree(data);
3390 return 0;
3391
3392 out:
3393 CNSS_QMI_ASSERT();
3394 kfree(data);
3395 return ret;
3396 }
3397
cnss_wlfw_server_arrive(struct cnss_plat_data * plat_priv,void * data)3398 int cnss_wlfw_server_arrive(struct cnss_plat_data *plat_priv, void *data)
3399 {
3400 int ret = 0;
3401
3402 if (!plat_priv)
3403 return -ENODEV;
3404
3405 if (test_bit(CNSS_QMI_WLFW_CONNECTED, &plat_priv->driver_state)) {
3406 cnss_pr_err("Unexpected WLFW server arrive\n");
3407 CNSS_ASSERT(0);
3408 return -EINVAL;
3409 }
3410
3411 cnss_ignore_qmi_failure(false);
3412
3413 ret = cnss_wlfw_connect_to_server(plat_priv, data);
3414 if (ret < 0)
3415 goto out;
3416
3417 ret = cnss_wlfw_ind_register_send_sync(plat_priv);
3418 if (ret < 0) {
3419 if (ret == -EALREADY)
3420 ret = 0;
3421 goto out;
3422 }
3423
3424 ret = cnss_wlfw_host_cap_send_sync(plat_priv);
3425 if (ret < 0)
3426 goto out;
3427
3428 return 0;
3429
3430 out:
3431 return ret;
3432 }
3433
cnss_wlfw_server_exit(struct cnss_plat_data * plat_priv)3434 int cnss_wlfw_server_exit(struct cnss_plat_data *plat_priv)
3435 {
3436 int ret;
3437
3438 if (!plat_priv)
3439 return -ENODEV;
3440
3441 clear_bit(CNSS_QMI_WLFW_CONNECTED, &plat_priv->driver_state);
3442
3443 cnss_pr_info("QMI WLFW service disconnected, state: 0x%lx\n",
3444 plat_priv->driver_state);
3445
3446 cnss_qmi_deinit(plat_priv);
3447
3448 clear_bit(CNSS_QMI_DEL_SERVER, &plat_priv->driver_state);
3449
3450 ret = cnss_qmi_init(plat_priv);
3451 if (ret < 0) {
3452 cnss_pr_err("QMI WLFW service registraton failed, ret\n", ret);
3453 CNSS_ASSERT(0);
3454 }
3455 return 0;
3456 }
3457
wlfw_new_server(struct qmi_handle * qmi_wlfw,struct qmi_service * service)3458 static int wlfw_new_server(struct qmi_handle *qmi_wlfw,
3459 struct qmi_service *service)
3460 {
3461 struct cnss_plat_data *plat_priv =
3462 container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw);
3463 struct cnss_qmi_event_server_arrive_data *event_data;
3464
3465 if (plat_priv && test_bit(CNSS_QMI_DEL_SERVER, &plat_priv->driver_state)) {
3466 cnss_pr_info("WLFW server delete in progress, Ignore server arrive, state: 0x%lx\n",
3467 plat_priv->driver_state);
3468 return 0;
3469 }
3470
3471 cnss_pr_dbg("WLFW server arriving: node %u port %u\n",
3472 service->node, service->port);
3473
3474 event_data = kzalloc(sizeof(*event_data), GFP_KERNEL);
3475 if (!event_data)
3476 return -ENOMEM;
3477
3478 event_data->node = service->node;
3479 event_data->port = service->port;
3480
3481 cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_SERVER_ARRIVE,
3482 0, event_data);
3483
3484 return 0;
3485 }
3486
wlfw_del_server(struct qmi_handle * qmi_wlfw,struct qmi_service * service)3487 static void wlfw_del_server(struct qmi_handle *qmi_wlfw,
3488 struct qmi_service *service)
3489 {
3490 struct cnss_plat_data *plat_priv =
3491 container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw);
3492
3493 if (plat_priv && test_bit(CNSS_QMI_DEL_SERVER, &plat_priv->driver_state)) {
3494 cnss_pr_info("WLFW server delete in progress, Ignore server delete, state: 0x%lx\n",
3495 plat_priv->driver_state);
3496 return;
3497 }
3498
3499 cnss_pr_dbg("WLFW server exiting\n");
3500
3501 if (plat_priv) {
3502 cnss_ignore_qmi_failure(true);
3503 set_bit(CNSS_QMI_DEL_SERVER, &plat_priv->driver_state);
3504 }
3505
3506 cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_SERVER_EXIT,
3507 0, NULL);
3508 }
3509
3510 static struct qmi_ops qmi_wlfw_ops = {
3511 .new_server = wlfw_new_server,
3512 .del_server = wlfw_del_server,
3513 };
3514
cnss_qmi_add_lookup(struct cnss_plat_data * plat_priv)3515 static int cnss_qmi_add_lookup(struct cnss_plat_data *plat_priv)
3516 {
3517 unsigned int id = WLFW_SERVICE_INS_ID_V01;
3518
3519 /* In order to support dual wlan card attach case,
3520 * need separate qmi service instance id for each dev
3521 */
3522 if (cnss_is_dual_wlan_enabled() && plat_priv->qrtr_node_id != 0 &&
3523 plat_priv->wlfw_service_instance_id != 0)
3524 id = plat_priv->wlfw_service_instance_id;
3525
3526 return qmi_add_lookup(&plat_priv->qmi_wlfw, WLFW_SERVICE_ID_V01,
3527 WLFW_SERVICE_VERS_V01, id);
3528 }
3529
cnss_qmi_init(struct cnss_plat_data * plat_priv)3530 int cnss_qmi_init(struct cnss_plat_data *plat_priv)
3531 {
3532 int ret = 0;
3533 cnss_get_qrtr_info(plat_priv);
3534
3535 ret = qmi_handle_init(&plat_priv->qmi_wlfw,
3536 QMI_WLFW_MAX_RECV_BUF_SIZE,
3537 &qmi_wlfw_ops, qmi_wlfw_msg_handlers);
3538 if (ret < 0) {
3539 cnss_pr_err("Failed to initialize WLFW QMI handle, err: %d\n",
3540 ret);
3541 goto out;
3542 }
3543
3544 ret = cnss_qmi_add_lookup(plat_priv);
3545 if (ret < 0)
3546 cnss_pr_err("Failed to add WLFW QMI lookup, err: %d\n", ret);
3547
3548 out:
3549 return ret;
3550 }
3551
cnss_qmi_deinit(struct cnss_plat_data * plat_priv)3552 void cnss_qmi_deinit(struct cnss_plat_data *plat_priv)
3553 {
3554 qmi_handle_release(&plat_priv->qmi_wlfw);
3555 }
3556
cnss_qmi_get_dms_mac(struct cnss_plat_data * plat_priv)3557 int cnss_qmi_get_dms_mac(struct cnss_plat_data *plat_priv)
3558 {
3559 struct dms_get_mac_address_req_msg_v01 req;
3560 struct dms_get_mac_address_resp_msg_v01 resp;
3561 struct qmi_txn txn;
3562 int ret = 0;
3563
3564 if (!test_bit(CNSS_QMI_DMS_CONNECTED, &plat_priv->driver_state)) {
3565 cnss_pr_err("DMS QMI connection not established\n");
3566 return -EINVAL;
3567 }
3568 cnss_pr_dbg("Requesting DMS MAC address");
3569
3570 memset(&resp, 0, sizeof(resp));
3571 ret = qmi_txn_init(&plat_priv->qmi_dms, &txn,
3572 dms_get_mac_address_resp_msg_v01_ei, &resp);
3573 if (ret < 0) {
3574 cnss_pr_err("Failed to initialize txn for dms, err: %d\n",
3575 ret);
3576 goto out;
3577 }
3578 req.device = DMS_DEVICE_MAC_WLAN_V01;
3579 ret = qmi_send_request(&plat_priv->qmi_dms, NULL, &txn,
3580 QMI_DMS_GET_MAC_ADDRESS_REQ_V01,
3581 DMS_GET_MAC_ADDRESS_REQ_MSG_V01_MAX_MSG_LEN,
3582 dms_get_mac_address_req_msg_v01_ei, &req);
3583 if (ret < 0) {
3584 qmi_txn_cancel(&txn);
3585 cnss_pr_err("Failed to send QMI_DMS_GET_MAC_ADDRESS_REQ_V01, err: %d\n",
3586 ret);
3587 goto out;
3588 }
3589 ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
3590 if (ret < 0) {
3591 cnss_pr_err("Failed to wait for QMI_DMS_GET_MAC_ADDRESS_RESP_V01, err: %d\n",
3592 ret);
3593 goto out;
3594 }
3595
3596 if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
3597 cnss_pr_err("QMI_DMS_GET_MAC_ADDRESS_REQ_V01 failed, result: %d, err: %d\n",
3598 resp.resp.result, resp.resp.error);
3599 ret = -resp.resp.result;
3600 goto out;
3601 }
3602 if (!resp.mac_address_valid ||
3603 resp.mac_address_len != QMI_WLFW_MAC_ADDR_SIZE_V01) {
3604 cnss_pr_err("Invalid MAC address received from DMS\n");
3605 plat_priv->dms.mac_valid = false;
3606 goto out;
3607 }
3608 plat_priv->dms.mac_valid = true;
3609 memcpy(plat_priv->dms.mac, resp.mac_address, QMI_WLFW_MAC_ADDR_SIZE_V01);
3610 cnss_pr_info("Received DMS MAC: [%pM]\n", plat_priv->dms.mac);
3611 out:
3612 return ret;
3613 }
3614
cnss_dms_connect_to_server(struct cnss_plat_data * plat_priv,unsigned int node,unsigned int port)3615 static int cnss_dms_connect_to_server(struct cnss_plat_data *plat_priv,
3616 unsigned int node, unsigned int port)
3617 {
3618 struct qmi_handle *qmi_dms = &plat_priv->qmi_dms;
3619 struct sockaddr_qrtr sq = {0};
3620 int ret = 0;
3621
3622 sq.sq_family = AF_QIPCRTR;
3623 sq.sq_node = node;
3624 sq.sq_port = port;
3625
3626 ret = kernel_connect(qmi_dms->sock, (struct sockaddr *)&sq,
3627 sizeof(sq), 0);
3628 if (ret < 0) {
3629 cnss_pr_err("Failed to connect to QMI DMS remote service Node: %d Port: %d\n",
3630 node, port);
3631 goto out;
3632 }
3633
3634 set_bit(CNSS_QMI_DMS_CONNECTED, &plat_priv->driver_state);
3635 cnss_pr_info("QMI DMS service connected, state: 0x%lx\n",
3636 plat_priv->driver_state);
3637 out:
3638 return ret;
3639 }
3640
dms_new_server(struct qmi_handle * qmi_dms,struct qmi_service * service)3641 static int dms_new_server(struct qmi_handle *qmi_dms,
3642 struct qmi_service *service)
3643 {
3644 struct cnss_plat_data *plat_priv =
3645 container_of(qmi_dms, struct cnss_plat_data, qmi_dms);
3646
3647 if (!service)
3648 return -EINVAL;
3649
3650 return cnss_dms_connect_to_server(plat_priv, service->node,
3651 service->port);
3652 }
3653
cnss_dms_server_exit_work(struct work_struct * work)3654 static void cnss_dms_server_exit_work(struct work_struct *work)
3655 {
3656 int ret;
3657 struct cnss_plat_data *plat_priv = cnss_get_plat_priv(NULL);
3658
3659 cnss_dms_deinit(plat_priv);
3660
3661 cnss_pr_info("QMI DMS Server Exit");
3662 clear_bit(CNSS_DMS_DEL_SERVER, &plat_priv->driver_state);
3663
3664 ret = cnss_dms_init(plat_priv);
3665 if (ret < 0)
3666 cnss_pr_err("QMI DMS service registraton failed, ret\n", ret);
3667 }
3668
3669 static DECLARE_WORK(cnss_dms_del_work, cnss_dms_server_exit_work);
3670
dms_del_server(struct qmi_handle * qmi_dms,struct qmi_service * service)3671 static void dms_del_server(struct qmi_handle *qmi_dms,
3672 struct qmi_service *service)
3673 {
3674 struct cnss_plat_data *plat_priv =
3675 container_of(qmi_dms, struct cnss_plat_data, qmi_dms);
3676
3677 if (!plat_priv)
3678 return;
3679
3680 if (test_bit(CNSS_DMS_DEL_SERVER, &plat_priv->driver_state)) {
3681 cnss_pr_info("DMS server delete or cnss remove in progress, Ignore server delete: 0x%lx\n",
3682 plat_priv->driver_state);
3683 return;
3684 }
3685
3686 set_bit(CNSS_DMS_DEL_SERVER, &plat_priv->driver_state);
3687 clear_bit(CNSS_QMI_DMS_CONNECTED, &plat_priv->driver_state);
3688 cnss_pr_info("QMI DMS service disconnected, state: 0x%lx\n",
3689 plat_priv->driver_state);
3690 schedule_work(&cnss_dms_del_work);
3691 }
3692
cnss_cancel_dms_work(void)3693 void cnss_cancel_dms_work(void)
3694 {
3695 cancel_work_sync(&cnss_dms_del_work);
3696 }
3697
3698 static struct qmi_ops qmi_dms_ops = {
3699 .new_server = dms_new_server,
3700 .del_server = dms_del_server,
3701 };
3702
cnss_dms_init(struct cnss_plat_data * plat_priv)3703 int cnss_dms_init(struct cnss_plat_data *plat_priv)
3704 {
3705 int ret = 0;
3706
3707 ret = qmi_handle_init(&plat_priv->qmi_dms, DMS_QMI_MAX_MSG_LEN,
3708 &qmi_dms_ops, NULL);
3709 if (ret < 0) {
3710 cnss_pr_err("Failed to initialize DMS handle, err: %d\n", ret);
3711 goto out;
3712 }
3713
3714 ret = qmi_add_lookup(&plat_priv->qmi_dms, DMS_SERVICE_ID_V01,
3715 DMS_SERVICE_VERS_V01, 0);
3716 if (ret < 0)
3717 cnss_pr_err("Failed to add DMS lookup, err: %d\n", ret);
3718 out:
3719 return ret;
3720 }
3721
cnss_dms_deinit(struct cnss_plat_data * plat_priv)3722 void cnss_dms_deinit(struct cnss_plat_data *plat_priv)
3723 {
3724 set_bit(CNSS_DMS_DEL_SERVER, &plat_priv->driver_state);
3725 qmi_handle_release(&plat_priv->qmi_dms);
3726 }
3727
coex_antenna_switch_to_wlan_send_sync_msg(struct cnss_plat_data * plat_priv)3728 int coex_antenna_switch_to_wlan_send_sync_msg(struct cnss_plat_data *plat_priv)
3729 {
3730 int ret;
3731 struct coex_antenna_switch_to_wlan_req_msg_v01 *req;
3732 struct coex_antenna_switch_to_wlan_resp_msg_v01 *resp;
3733 struct qmi_txn txn;
3734
3735 if (!plat_priv)
3736 return -ENODEV;
3737
3738 cnss_pr_dbg("Sending coex antenna switch_to_wlan\n");
3739
3740 req = kzalloc(sizeof(*req), GFP_KERNEL);
3741 if (!req)
3742 return -ENOMEM;
3743
3744 resp = kzalloc(sizeof(*resp), GFP_KERNEL);
3745 if (!resp) {
3746 kfree(req);
3747 return -ENOMEM;
3748 }
3749
3750 req->antenna = plat_priv->antenna;
3751
3752 ret = qmi_txn_init(&plat_priv->coex_qmi, &txn,
3753 coex_antenna_switch_to_wlan_resp_msg_v01_ei, resp);
3754 if (ret < 0) {
3755 cnss_pr_err("Fail to init txn for coex antenna switch_to_wlan resp %d\n",
3756 ret);
3757 goto out;
3758 }
3759
3760 ret = qmi_send_request
3761 (&plat_priv->coex_qmi, NULL, &txn,
3762 QMI_COEX_SWITCH_ANTENNA_TO_WLAN_REQ_V01,
3763 COEX_ANTENNA_SWITCH_TO_WLAN_REQ_MSG_V01_MAX_MSG_LEN,
3764 coex_antenna_switch_to_wlan_req_msg_v01_ei, req);
3765 if (ret < 0) {
3766 qmi_txn_cancel(&txn);
3767 cnss_pr_err("Fail to send coex antenna switch_to_wlan req %d\n",
3768 ret);
3769 goto out;
3770 }
3771
3772 ret = qmi_txn_wait(&txn, COEX_TIMEOUT);
3773 if (ret < 0) {
3774 cnss_pr_err("Coex antenna switch_to_wlan resp wait failed with ret %d\n",
3775 ret);
3776 goto out;
3777 } else if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
3778 cnss_pr_err("Coex antenna switch_to_wlan request rejected, result:%d error:%d\n",
3779 resp->resp.result, resp->resp.error);
3780 ret = -resp->resp.result;
3781 goto out;
3782 }
3783
3784 if (resp->grant_valid)
3785 plat_priv->grant = resp->grant;
3786
3787 cnss_pr_dbg("Coex antenna grant: 0x%llx\n", resp->grant);
3788
3789 kfree(resp);
3790 kfree(req);
3791 return 0;
3792
3793 out:
3794 kfree(resp);
3795 kfree(req);
3796 return ret;
3797 }
3798
coex_antenna_switch_to_mdm_send_sync_msg(struct cnss_plat_data * plat_priv)3799 int coex_antenna_switch_to_mdm_send_sync_msg(struct cnss_plat_data *plat_priv)
3800 {
3801 int ret;
3802 struct coex_antenna_switch_to_mdm_req_msg_v01 *req;
3803 struct coex_antenna_switch_to_mdm_resp_msg_v01 *resp;
3804 struct qmi_txn txn;
3805
3806 if (!plat_priv)
3807 return -ENODEV;
3808
3809 cnss_pr_dbg("Sending coex antenna switch_to_mdm\n");
3810
3811 req = kzalloc(sizeof(*req), GFP_KERNEL);
3812 if (!req)
3813 return -ENOMEM;
3814
3815 resp = kzalloc(sizeof(*resp), GFP_KERNEL);
3816 if (!resp) {
3817 kfree(req);
3818 return -ENOMEM;
3819 }
3820
3821 req->antenna = plat_priv->antenna;
3822
3823 ret = qmi_txn_init(&plat_priv->coex_qmi, &txn,
3824 coex_antenna_switch_to_mdm_resp_msg_v01_ei, resp);
3825 if (ret < 0) {
3826 cnss_pr_err("Fail to init txn for coex antenna switch_to_mdm resp %d\n",
3827 ret);
3828 goto out;
3829 }
3830
3831 ret = qmi_send_request
3832 (&plat_priv->coex_qmi, NULL, &txn,
3833 QMI_COEX_SWITCH_ANTENNA_TO_MDM_REQ_V01,
3834 COEX_ANTENNA_SWITCH_TO_MDM_REQ_MSG_V01_MAX_MSG_LEN,
3835 coex_antenna_switch_to_mdm_req_msg_v01_ei, req);
3836 if (ret < 0) {
3837 qmi_txn_cancel(&txn);
3838 cnss_pr_err("Fail to send coex antenna switch_to_mdm req %d\n",
3839 ret);
3840 goto out;
3841 }
3842
3843 ret = qmi_txn_wait(&txn, COEX_TIMEOUT);
3844 if (ret < 0) {
3845 cnss_pr_err("Coex antenna switch_to_mdm resp wait failed with ret %d\n",
3846 ret);
3847 goto out;
3848 } else if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
3849 cnss_pr_err("Coex antenna switch_to_mdm request rejected, result:%d error:%d\n",
3850 resp->resp.result, resp->resp.error);
3851 ret = -resp->resp.result;
3852 goto out;
3853 }
3854
3855 kfree(resp);
3856 kfree(req);
3857 return 0;
3858
3859 out:
3860 kfree(resp);
3861 kfree(req);
3862 return ret;
3863 }
3864
cnss_send_subsys_restart_level_msg(struct cnss_plat_data * plat_priv)3865 int cnss_send_subsys_restart_level_msg(struct cnss_plat_data *plat_priv)
3866 {
3867 int ret;
3868 struct wlfw_subsys_restart_level_req_msg_v01 req;
3869 struct wlfw_subsys_restart_level_resp_msg_v01 resp;
3870 u8 pcss_enabled;
3871
3872 if (!plat_priv)
3873 return -ENODEV;
3874
3875 if (!test_bit(CNSS_FW_READY, &plat_priv->driver_state)) {
3876 cnss_pr_dbg("Can't send pcss cmd before fw ready\n");
3877 return 0;
3878 }
3879
3880 pcss_enabled = plat_priv->recovery_pcss_enabled;
3881 cnss_pr_dbg("Sending pcss recovery status: %d\n", pcss_enabled);
3882
3883 req.restart_level_type_valid = 1;
3884 req.restart_level_type = pcss_enabled;
3885
3886 ret = qmi_send_wait(&plat_priv->qmi_wlfw, &req, &resp,
3887 wlfw_subsys_restart_level_req_msg_v01_ei,
3888 wlfw_subsys_restart_level_resp_msg_v01_ei,
3889 QMI_WLFW_SUBSYS_RESTART_LEVEL_REQ_V01,
3890 WLFW_SUBSYS_RESTART_LEVEL_REQ_MSG_V01_MAX_MSG_LEN,
3891 QMI_WLFW_TIMEOUT_JF);
3892
3893 if (ret < 0)
3894 cnss_pr_err("pcss recovery setting failed with ret %d\n", ret);
3895 return ret;
3896 }
3897
coex_new_server(struct qmi_handle * qmi,struct qmi_service * service)3898 static int coex_new_server(struct qmi_handle *qmi,
3899 struct qmi_service *service)
3900 {
3901 struct cnss_plat_data *plat_priv =
3902 container_of(qmi, struct cnss_plat_data, coex_qmi);
3903 struct sockaddr_qrtr sq = { 0 };
3904 int ret = 0;
3905
3906 cnss_pr_dbg("COEX server arrive: node %u port %u\n",
3907 service->node, service->port);
3908
3909 sq.sq_family = AF_QIPCRTR;
3910 sq.sq_node = service->node;
3911 sq.sq_port = service->port;
3912 ret = kernel_connect(qmi->sock, (struct sockaddr *)&sq, sizeof(sq), 0);
3913 if (ret < 0) {
3914 cnss_pr_err("Fail to connect to remote service port\n");
3915 return ret;
3916 }
3917
3918 set_bit(CNSS_COEX_CONNECTED, &plat_priv->driver_state);
3919 cnss_pr_dbg("COEX Server Connected: 0x%lx\n",
3920 plat_priv->driver_state);
3921 return 0;
3922 }
3923
coex_del_server(struct qmi_handle * qmi,struct qmi_service * service)3924 static void coex_del_server(struct qmi_handle *qmi,
3925 struct qmi_service *service)
3926 {
3927 struct cnss_plat_data *plat_priv =
3928 container_of(qmi, struct cnss_plat_data, coex_qmi);
3929
3930 cnss_pr_dbg("COEX server exit\n");
3931
3932 clear_bit(CNSS_COEX_CONNECTED, &plat_priv->driver_state);
3933 }
3934
3935 static struct qmi_ops coex_qmi_ops = {
3936 .new_server = coex_new_server,
3937 .del_server = coex_del_server,
3938 };
3939
cnss_register_coex_service(struct cnss_plat_data * plat_priv)3940 int cnss_register_coex_service(struct cnss_plat_data *plat_priv)
3941 { int ret;
3942
3943 ret = qmi_handle_init(&plat_priv->coex_qmi,
3944 COEX_SERVICE_MAX_MSG_LEN,
3945 &coex_qmi_ops, NULL);
3946 if (ret < 0)
3947 return ret;
3948
3949 ret = qmi_add_lookup(&plat_priv->coex_qmi, COEX_SERVICE_ID_V01,
3950 COEX_SERVICE_VERS_V01, 0);
3951 return ret;
3952 }
3953
cnss_unregister_coex_service(struct cnss_plat_data * plat_priv)3954 void cnss_unregister_coex_service(struct cnss_plat_data *plat_priv)
3955 {
3956 qmi_handle_release(&plat_priv->coex_qmi);
3957 }
3958
3959 /* IMS Service */
ims_subscribe_for_indication_send_async(struct cnss_plat_data * plat_priv)3960 int ims_subscribe_for_indication_send_async(struct cnss_plat_data *plat_priv)
3961 {
3962 int ret;
3963 struct ims_private_service_subscribe_for_indications_req_msg_v01 *req;
3964 struct qmi_txn *txn;
3965
3966 if (!plat_priv)
3967 return -ENODEV;
3968
3969 cnss_pr_dbg("Sending ASYNC ims subscribe for indication\n");
3970
3971 req = kzalloc(sizeof(*req), GFP_KERNEL);
3972 if (!req)
3973 return -ENOMEM;
3974
3975 req->wfc_call_status_valid = 1;
3976 req->wfc_call_status = 1;
3977
3978 txn = &plat_priv->txn;
3979 ret = qmi_txn_init(&plat_priv->ims_qmi, txn, NULL, NULL);
3980 if (ret < 0) {
3981 cnss_pr_err("Fail to init txn for ims subscribe for indication resp %d\n",
3982 ret);
3983 goto out;
3984 }
3985
3986 ret = qmi_send_request
3987 (&plat_priv->ims_qmi, NULL, txn,
3988 QMI_IMS_PRIVATE_SERVICE_SUBSCRIBE_FOR_INDICATIONS_REQ_V01,
3989 IMS_PRIVATE_SERVICE_SUBSCRIBE_FOR_INDICATIONS_REQ_MSG_V01_MAX_MSG_LEN,
3990 ims_private_service_subscribe_ind_req_msg_v01_ei, req);
3991 if (ret < 0) {
3992 qmi_txn_cancel(txn);
3993 cnss_pr_err("Fail to send ims subscribe for indication req %d\n",
3994 ret);
3995 goto out;
3996 }
3997
3998 kfree(req);
3999 return 0;
4000
4001 out:
4002 kfree(req);
4003 return ret;
4004 }
4005
ims_subscribe_for_indication_resp_cb(struct qmi_handle * qmi,struct sockaddr_qrtr * sq,struct qmi_txn * txn,const void * data)4006 static void ims_subscribe_for_indication_resp_cb(struct qmi_handle *qmi,
4007 struct sockaddr_qrtr *sq,
4008 struct qmi_txn *txn,
4009 const void *data)
4010 {
4011 const
4012 struct ims_private_service_subscribe_for_indications_rsp_msg_v01 *resp =
4013 data;
4014
4015 cnss_pr_dbg("Received IMS subscribe indication response\n");
4016
4017 if (!txn) {
4018 cnss_pr_err("spurious response\n");
4019 return;
4020 }
4021
4022 if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
4023 cnss_pr_err("IMS subscribe for indication request rejected, result:%d error:%d\n",
4024 resp->resp.result, resp->resp.error);
4025 txn->result = -resp->resp.result;
4026 }
4027 }
4028
cnss_process_wfc_call_ind_event(struct cnss_plat_data * plat_priv,void * data)4029 int cnss_process_wfc_call_ind_event(struct cnss_plat_data *plat_priv,
4030 void *data)
4031 {
4032 int ret;
4033 struct ims_private_service_wfc_call_status_ind_msg_v01 *ind_msg = data;
4034
4035 ret = cnss_wlfw_wfc_call_status_send_sync(plat_priv, ind_msg);
4036 kfree(data);
4037 return ret;
4038 }
4039
4040 static void
cnss_ims_process_wfc_call_ind_cb(struct qmi_handle * ims_qmi,struct sockaddr_qrtr * sq,struct qmi_txn * txn,const void * data)4041 cnss_ims_process_wfc_call_ind_cb(struct qmi_handle *ims_qmi,
4042 struct sockaddr_qrtr *sq,
4043 struct qmi_txn *txn, const void *data)
4044 {
4045 struct cnss_plat_data *plat_priv =
4046 container_of(ims_qmi, struct cnss_plat_data, ims_qmi);
4047 const
4048 struct ims_private_service_wfc_call_status_ind_msg_v01 *ind_msg = data;
4049 struct ims_private_service_wfc_call_status_ind_msg_v01 *event_data;
4050
4051 if (!txn) {
4052 cnss_pr_err("IMS->CNSS: WFC_CALL_IND: Spurious indication\n");
4053 return;
4054 }
4055
4056 if (!ind_msg) {
4057 cnss_pr_err("IMS->CNSS: WFC_CALL_IND: Invalid indication\n");
4058 return;
4059 }
4060 cnss_pr_dbg("IMS->CNSS: WFC_CALL_IND: %x, %x %x, %x %x, %x %llx, %x %x, %x %x\n",
4061 ind_msg->wfc_call_active, ind_msg->all_wfc_calls_held_valid,
4062 ind_msg->all_wfc_calls_held,
4063 ind_msg->is_wfc_emergency_valid, ind_msg->is_wfc_emergency,
4064 ind_msg->twt_ims_start_valid, ind_msg->twt_ims_start,
4065 ind_msg->twt_ims_int_valid, ind_msg->twt_ims_int,
4066 ind_msg->media_quality_valid, ind_msg->media_quality);
4067
4068 event_data = kmemdup(ind_msg, sizeof(*event_data), GFP_KERNEL);
4069 if (!event_data)
4070 return;
4071 cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_IMS_WFC_CALL_IND,
4072 0, event_data);
4073 }
4074
4075 static struct qmi_msg_handler qmi_ims_msg_handlers[] = {
4076 {
4077 .type = QMI_RESPONSE,
4078 .msg_id =
4079 QMI_IMS_PRIVATE_SERVICE_SUBSCRIBE_FOR_INDICATIONS_REQ_V01,
4080 .ei =
4081 ims_private_service_subscribe_ind_rsp_msg_v01_ei,
4082 .decoded_size = sizeof(struct
4083 ims_private_service_subscribe_for_indications_rsp_msg_v01),
4084 .fn = ims_subscribe_for_indication_resp_cb
4085 },
4086 {
4087 .type = QMI_INDICATION,
4088 .msg_id = QMI_IMS_PRIVATE_SERVICE_WFC_CALL_STATUS_IND_V01,
4089 .ei = ims_private_service_wfc_call_status_ind_msg_v01_ei,
4090 .decoded_size =
4091 sizeof(struct ims_private_service_wfc_call_status_ind_msg_v01),
4092 .fn = cnss_ims_process_wfc_call_ind_cb
4093 },
4094 {}
4095 };
4096
ims_new_server(struct qmi_handle * qmi,struct qmi_service * service)4097 static int ims_new_server(struct qmi_handle *qmi,
4098 struct qmi_service *service)
4099 {
4100 struct cnss_plat_data *plat_priv =
4101 container_of(qmi, struct cnss_plat_data, ims_qmi);
4102 struct sockaddr_qrtr sq = { 0 };
4103 int ret = 0;
4104
4105 cnss_pr_dbg("IMS server arrive: node %u port %u\n",
4106 service->node, service->port);
4107
4108 sq.sq_family = AF_QIPCRTR;
4109 sq.sq_node = service->node;
4110 sq.sq_port = service->port;
4111 ret = kernel_connect(qmi->sock, (struct sockaddr *)&sq, sizeof(sq), 0);
4112 if (ret < 0) {
4113 cnss_pr_err("Fail to connect to remote service port\n");
4114 return ret;
4115 }
4116
4117 set_bit(CNSS_IMS_CONNECTED, &plat_priv->driver_state);
4118 cnss_pr_dbg("IMS Server Connected: 0x%lx\n",
4119 plat_priv->driver_state);
4120
4121 ret = ims_subscribe_for_indication_send_async(plat_priv);
4122 return ret;
4123 }
4124
ims_del_server(struct qmi_handle * qmi,struct qmi_service * service)4125 static void ims_del_server(struct qmi_handle *qmi,
4126 struct qmi_service *service)
4127 {
4128 struct cnss_plat_data *plat_priv =
4129 container_of(qmi, struct cnss_plat_data, ims_qmi);
4130
4131 cnss_pr_dbg("IMS server exit\n");
4132
4133 clear_bit(CNSS_IMS_CONNECTED, &plat_priv->driver_state);
4134 }
4135
4136 static struct qmi_ops ims_qmi_ops = {
4137 .new_server = ims_new_server,
4138 .del_server = ims_del_server,
4139 };
4140
cnss_register_ims_service(struct cnss_plat_data * plat_priv)4141 int cnss_register_ims_service(struct cnss_plat_data *plat_priv)
4142 { int ret;
4143
4144 ret = qmi_handle_init(&plat_priv->ims_qmi,
4145 IMSPRIVATE_SERVICE_MAX_MSG_LEN,
4146 &ims_qmi_ops, qmi_ims_msg_handlers);
4147 if (ret < 0)
4148 return ret;
4149
4150 ret = qmi_add_lookup(&plat_priv->ims_qmi, IMSPRIVATE_SERVICE_ID_V01,
4151 IMSPRIVATE_SERVICE_VERS_V01, 0);
4152 return ret;
4153 }
4154
cnss_unregister_ims_service(struct cnss_plat_data * plat_priv)4155 void cnss_unregister_ims_service(struct cnss_plat_data *plat_priv)
4156 {
4157 qmi_handle_release(&plat_priv->ims_qmi);
4158 }
4159