1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
4 * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
5 */
6
7 #include <linux/completion.h>
8 #include <linux/io.h>
9 #include <linux/irq.h>
10 #include <linux/memblock.h>
11 #include <linux/module.h>
12 #include <linux/msi.h>
13 #include <linux/of.h>
14 #include <linux/of_gpio.h>
15 #include <linux/pm_runtime.h>
16 #include <linux/suspend.h>
17 #include <linux/version.h>
18 #include <linux/sched.h>
19 #include "main.h"
20 #include "bus.h"
21 #include "debug.h"
22 #include "pci.h"
23 #include "pci_platform.h"
24 #include "reg.h"
25
26 #define PCI_LINK_UP 1
27 #define PCI_LINK_DOWN 0
28
29 #define SAVE_PCI_CONFIG_SPACE 1
30 #define RESTORE_PCI_CONFIG_SPACE 0
31
32 #define PCI_BAR_NUM 0
33 #define PCI_INVALID_READ(val) ((val) == U32_MAX)
34
35 #define PCI_DMA_MASK_32_BIT DMA_BIT_MASK(32)
36 #define PCI_DMA_MASK_36_BIT DMA_BIT_MASK(36)
37 #define PCI_DMA_MASK_64_BIT DMA_BIT_MASK(64)
38
39 #define MHI_NODE_NAME "qcom,mhi"
40 #define MHI_MSI_NAME "MHI"
41
42 #define QCA6390_PATH_PREFIX "qca6390/"
43 #define QCA6490_PATH_PREFIX "qca6490/"
44 #define QCN7605_PATH_PREFIX "qcn7605/"
45 #define KIWI_PATH_PREFIX "kiwi/"
46 #define MANGO_PATH_PREFIX "mango/"
47 #define PEACH_PATH_PREFIX "peach/"
48 #define DEFAULT_PHY_M3_FILE_NAME "m3.bin"
49 #define DEFAULT_AUX_FILE_NAME "aux_ucode.elf"
50 #define DEFAULT_PHY_UCODE_FILE_NAME "phy_ucode.elf"
51 #define TME_PATCH_FILE_NAME_1_0 "tmel_peach_10.elf"
52 #define TME_PATCH_FILE_NAME_2_0 "tmel_peach_20.elf"
53 #define PHY_UCODE_V2_FILE_NAME "phy_ucode20.elf"
54 #define DEFAULT_FW_FILE_NAME "amss.bin"
55 #define FW_V2_FILE_NAME "amss20.bin"
56 #define DEVICE_MAJOR_VERSION_MASK 0xF
57
58 #define WAKE_MSI_NAME "WAKE"
59
60 #define DEV_RDDM_TIMEOUT 5000
61 #define WAKE_EVENT_TIMEOUT 5000
62
63 #ifdef CONFIG_CNSS_EMULATION
64 #define EMULATION_HW 1
65 #else
66 #define EMULATION_HW 0
67 #endif
68
69 #define RAMDUMP_SIZE_DEFAULT 0x420000
70 #define CNSS_256KB_SIZE 0x40000
71 #define DEVICE_RDDM_COOKIE 0xCAFECACE
72
73 static bool cnss_driver_registered;
74
75 static DEFINE_SPINLOCK(pci_link_down_lock);
76 static DEFINE_SPINLOCK(pci_reg_window_lock);
77 static DEFINE_SPINLOCK(time_sync_lock);
78
79 #define MHI_TIMEOUT_OVERWRITE_MS (plat_priv->ctrl_params.mhi_timeout)
80 #define MHI_M2_TIMEOUT_MS (plat_priv->ctrl_params.mhi_m2_timeout)
81
82 #define WLAON_PWR_CTRL_SHUTDOWN_DELAY_MIN_US 1000
83 #define WLAON_PWR_CTRL_SHUTDOWN_DELAY_MAX_US 2000
84
85 #define RDDM_LINK_RECOVERY_RETRY 20
86 #define RDDM_LINK_RECOVERY_RETRY_DELAY_MS 20
87
88 #define FORCE_WAKE_DELAY_MIN_US 4000
89 #define FORCE_WAKE_DELAY_MAX_US 6000
90 #define FORCE_WAKE_DELAY_TIMEOUT_US 60000
91
92 #define REG_RETRY_MAX_TIMES 3
93
94 #define MHI_SUSPEND_RETRY_MAX_TIMES 3
95 #define MHI_SUSPEND_RETRY_DELAY_US 5000
96
97 #define BOOT_DEBUG_TIMEOUT_MS 7000
98
99 #define HANG_DATA_LENGTH 384
100 #define HST_HANG_DATA_OFFSET ((3 * 1024 * 1024) - HANG_DATA_LENGTH)
101 #define HSP_HANG_DATA_OFFSET ((2 * 1024 * 1024) - HANG_DATA_LENGTH)
102 #define GNO_HANG_DATA_OFFSET (0x7d000 - HANG_DATA_LENGTH)
103
104 #define AFC_SLOT_SIZE 0x1000
105 #define AFC_MAX_SLOT 2
106 #define AFC_MEM_SIZE (AFC_SLOT_SIZE * AFC_MAX_SLOT)
107 #define AFC_AUTH_STATUS_OFFSET 1
108 #define AFC_AUTH_SUCCESS 1
109 #define AFC_AUTH_ERROR 0
110
111 static const struct mhi_channel_config cnss_mhi_channels[] = {
112 {
113 .num = 0,
114 .name = "LOOPBACK",
115 .num_elements = 32,
116 .event_ring = 1,
117 .dir = DMA_TO_DEVICE,
118 .ee_mask = 0x4,
119 .pollcfg = 0,
120 .doorbell = MHI_DB_BRST_DISABLE,
121 .lpm_notify = false,
122 .offload_channel = false,
123 .doorbell_mode_switch = false,
124 .auto_queue = false,
125 },
126 {
127 .num = 1,
128 .name = "LOOPBACK",
129 .num_elements = 32,
130 .event_ring = 1,
131 .dir = DMA_FROM_DEVICE,
132 .ee_mask = 0x4,
133 .pollcfg = 0,
134 .doorbell = MHI_DB_BRST_DISABLE,
135 .lpm_notify = false,
136 .offload_channel = false,
137 .doorbell_mode_switch = false,
138 .auto_queue = false,
139 },
140 {
141 .num = 4,
142 .name = "DIAG",
143 .num_elements = 64,
144 .event_ring = 1,
145 .dir = DMA_TO_DEVICE,
146 .ee_mask = 0x4,
147 .pollcfg = 0,
148 .doorbell = MHI_DB_BRST_DISABLE,
149 .lpm_notify = false,
150 .offload_channel = false,
151 .doorbell_mode_switch = false,
152 .auto_queue = false,
153 },
154 {
155 .num = 5,
156 .name = "DIAG",
157 .num_elements = 64,
158 .event_ring = 1,
159 .dir = DMA_FROM_DEVICE,
160 .ee_mask = 0x4,
161 .pollcfg = 0,
162 .doorbell = MHI_DB_BRST_DISABLE,
163 .lpm_notify = false,
164 .offload_channel = false,
165 .doorbell_mode_switch = false,
166 .auto_queue = false,
167 },
168 {
169 .num = 20,
170 .name = "IPCR",
171 .num_elements = 64,
172 .event_ring = 1,
173 .dir = DMA_TO_DEVICE,
174 .ee_mask = 0x4,
175 .pollcfg = 0,
176 .doorbell = MHI_DB_BRST_DISABLE,
177 .lpm_notify = false,
178 .offload_channel = false,
179 .doorbell_mode_switch = false,
180 .auto_queue = false,
181 },
182 {
183 .num = 21,
184 .name = "IPCR",
185 .num_elements = 64,
186 .event_ring = 1,
187 .dir = DMA_FROM_DEVICE,
188 .ee_mask = 0x4,
189 .pollcfg = 0,
190 .doorbell = MHI_DB_BRST_DISABLE,
191 .lpm_notify = false,
192 .offload_channel = false,
193 .doorbell_mode_switch = false,
194 .auto_queue = true,
195 },
196 /* All MHI satellite config to be at the end of data struct */
197 #if IS_ENABLED(CONFIG_MHI_SATELLITE)
198 {
199 .num = 50,
200 .name = "ADSP_0",
201 .num_elements = 64,
202 .event_ring = 3,
203 .dir = DMA_BIDIRECTIONAL,
204 .ee_mask = 0x4,
205 .pollcfg = 0,
206 .doorbell = MHI_DB_BRST_DISABLE,
207 .lpm_notify = false,
208 .offload_channel = true,
209 .doorbell_mode_switch = false,
210 .auto_queue = false,
211 },
212 {
213 .num = 51,
214 .name = "ADSP_1",
215 .num_elements = 64,
216 .event_ring = 3,
217 .dir = DMA_BIDIRECTIONAL,
218 .ee_mask = 0x4,
219 .pollcfg = 0,
220 .doorbell = MHI_DB_BRST_DISABLE,
221 .lpm_notify = false,
222 .offload_channel = true,
223 .doorbell_mode_switch = false,
224 .auto_queue = false,
225 },
226 {
227 .num = 70,
228 .name = "ADSP_2",
229 .num_elements = 64,
230 .event_ring = 3,
231 .dir = DMA_BIDIRECTIONAL,
232 .ee_mask = 0x4,
233 .pollcfg = 0,
234 .doorbell = MHI_DB_BRST_DISABLE,
235 .lpm_notify = false,
236 .offload_channel = true,
237 .doorbell_mode_switch = false,
238 .auto_queue = false,
239 },
240 {
241 .num = 71,
242 .name = "ADSP_3",
243 .num_elements = 64,
244 .event_ring = 3,
245 .dir = DMA_BIDIRECTIONAL,
246 .ee_mask = 0x4,
247 .pollcfg = 0,
248 .doorbell = MHI_DB_BRST_DISABLE,
249 .lpm_notify = false,
250 .offload_channel = true,
251 .doorbell_mode_switch = false,
252 .auto_queue = false,
253 },
254 #endif
255 };
256
257 static const struct mhi_channel_config cnss_mhi_channels_no_diag[] = {
258 {
259 .num = 0,
260 .name = "LOOPBACK",
261 .num_elements = 32,
262 .event_ring = 1,
263 .dir = DMA_TO_DEVICE,
264 .ee_mask = 0x4,
265 .pollcfg = 0,
266 .doorbell = MHI_DB_BRST_DISABLE,
267 .lpm_notify = false,
268 .offload_channel = false,
269 .doorbell_mode_switch = false,
270 .auto_queue = false,
271 },
272 {
273 .num = 1,
274 .name = "LOOPBACK",
275 .num_elements = 32,
276 .event_ring = 1,
277 .dir = DMA_FROM_DEVICE,
278 .ee_mask = 0x4,
279 .pollcfg = 0,
280 .doorbell = MHI_DB_BRST_DISABLE,
281 .lpm_notify = false,
282 .offload_channel = false,
283 .doorbell_mode_switch = false,
284 .auto_queue = false,
285 },
286 {
287 .num = 20,
288 .name = "IPCR",
289 .num_elements = 64,
290 .event_ring = 1,
291 .dir = DMA_TO_DEVICE,
292 .ee_mask = 0x4,
293 .pollcfg = 0,
294 .doorbell = MHI_DB_BRST_DISABLE,
295 .lpm_notify = false,
296 .offload_channel = false,
297 .doorbell_mode_switch = false,
298 .auto_queue = false,
299 },
300 {
301 .num = 21,
302 .name = "IPCR",
303 .num_elements = 64,
304 .event_ring = 1,
305 .dir = DMA_FROM_DEVICE,
306 .ee_mask = 0x4,
307 .pollcfg = 0,
308 .doorbell = MHI_DB_BRST_DISABLE,
309 .lpm_notify = false,
310 .offload_channel = false,
311 .doorbell_mode_switch = false,
312 .auto_queue = true,
313 },
314 /* All MHI satellite config to be at the end of data struct */
315 #if IS_ENABLED(CONFIG_MHI_SATELLITE)
316 {
317 .num = 50,
318 .name = "ADSP_0",
319 .num_elements = 64,
320 .event_ring = 3,
321 .dir = DMA_BIDIRECTIONAL,
322 .ee_mask = 0x4,
323 .pollcfg = 0,
324 .doorbell = MHI_DB_BRST_DISABLE,
325 .lpm_notify = false,
326 .offload_channel = true,
327 .doorbell_mode_switch = false,
328 .auto_queue = false,
329 },
330 {
331 .num = 51,
332 .name = "ADSP_1",
333 .num_elements = 64,
334 .event_ring = 3,
335 .dir = DMA_BIDIRECTIONAL,
336 .ee_mask = 0x4,
337 .pollcfg = 0,
338 .doorbell = MHI_DB_BRST_DISABLE,
339 .lpm_notify = false,
340 .offload_channel = true,
341 .doorbell_mode_switch = false,
342 .auto_queue = false,
343 },
344 {
345 .num = 70,
346 .name = "ADSP_2",
347 .num_elements = 64,
348 .event_ring = 3,
349 .dir = DMA_BIDIRECTIONAL,
350 .ee_mask = 0x4,
351 .pollcfg = 0,
352 .doorbell = MHI_DB_BRST_DISABLE,
353 .lpm_notify = false,
354 .offload_channel = true,
355 .doorbell_mode_switch = false,
356 .auto_queue = false,
357 },
358 {
359 .num = 71,
360 .name = "ADSP_3",
361 .num_elements = 64,
362 .event_ring = 3,
363 .dir = DMA_BIDIRECTIONAL,
364 .ee_mask = 0x4,
365 .pollcfg = 0,
366 .doorbell = MHI_DB_BRST_DISABLE,
367 .lpm_notify = false,
368 .offload_channel = true,
369 .doorbell_mode_switch = false,
370 .auto_queue = false,
371 },
372 #endif
373 };
374
375 static const struct mhi_channel_config cnss_mhi_channels_genoa[] = {
376 {
377 .num = 0,
378 .name = "LOOPBACK",
379 .num_elements = 32,
380 .event_ring = 1,
381 .dir = DMA_TO_DEVICE,
382 .ee_mask = 0x4,
383 .pollcfg = 0,
384 .doorbell = MHI_DB_BRST_DISABLE,
385 .lpm_notify = false,
386 .offload_channel = false,
387 .doorbell_mode_switch = false,
388 .auto_queue = false,
389 },
390 {
391 .num = 1,
392 .name = "LOOPBACK",
393 .num_elements = 32,
394 .event_ring = 1,
395 .dir = DMA_FROM_DEVICE,
396 .ee_mask = 0x4,
397 .pollcfg = 0,
398 .doorbell = MHI_DB_BRST_DISABLE,
399 .lpm_notify = false,
400 .offload_channel = false,
401 .doorbell_mode_switch = false,
402 .auto_queue = false,
403 },
404 {
405 .num = 4,
406 .name = "DIAG",
407 .num_elements = 64,
408 .event_ring = 1,
409 .dir = DMA_TO_DEVICE,
410 .ee_mask = 0x4,
411 .pollcfg = 0,
412 .doorbell = MHI_DB_BRST_DISABLE,
413 .lpm_notify = false,
414 .offload_channel = false,
415 .doorbell_mode_switch = false,
416 .auto_queue = false,
417 },
418 {
419 .num = 5,
420 .name = "DIAG",
421 .num_elements = 64,
422 .event_ring = 1,
423 .dir = DMA_FROM_DEVICE,
424 .ee_mask = 0x4,
425 .pollcfg = 0,
426 .doorbell = MHI_DB_BRST_DISABLE,
427 .lpm_notify = false,
428 .offload_channel = false,
429 .doorbell_mode_switch = false,
430 .auto_queue = false,
431 },
432 {
433 .num = 16,
434 .name = "IPCR",
435 .num_elements = 64,
436 .event_ring = 1,
437 .dir = DMA_TO_DEVICE,
438 .ee_mask = 0x4,
439 .pollcfg = 0,
440 .doorbell = MHI_DB_BRST_DISABLE,
441 .lpm_notify = false,
442 .offload_channel = false,
443 .doorbell_mode_switch = false,
444 .auto_queue = false,
445 },
446 {
447 .num = 17,
448 .name = "IPCR",
449 .num_elements = 64,
450 .event_ring = 1,
451 .dir = DMA_FROM_DEVICE,
452 .ee_mask = 0x4,
453 .pollcfg = 0,
454 .doorbell = MHI_DB_BRST_DISABLE,
455 .lpm_notify = false,
456 .offload_channel = false,
457 .doorbell_mode_switch = false,
458 .auto_queue = true,
459 },
460 };
461
462 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 12, 0))
463 static struct mhi_event_config cnss_mhi_events[] = {
464 #else
465 static const struct mhi_event_config cnss_mhi_events[] = {
466 #endif
467 {
468 .num_elements = 32,
469 .irq_moderation_ms = 0,
470 .irq = 1,
471 .mode = MHI_DB_BRST_DISABLE,
472 .data_type = MHI_ER_CTRL,
473 .priority = 0,
474 .hardware_event = false,
475 .client_managed = false,
476 .offload_channel = false,
477 },
478 {
479 .num_elements = 256,
480 .irq_moderation_ms = 0,
481 .irq = 2,
482 .mode = MHI_DB_BRST_DISABLE,
483 .priority = 1,
484 .hardware_event = false,
485 .client_managed = false,
486 .offload_channel = false,
487 },
488 #if IS_ENABLED(CONFIG_MHI_BUS_MISC)
489 {
490 .num_elements = 32,
491 .irq_moderation_ms = 0,
492 .irq = 1,
493 .mode = MHI_DB_BRST_DISABLE,
494 .data_type = MHI_ER_BW_SCALE,
495 .priority = 2,
496 .hardware_event = false,
497 .client_managed = false,
498 .offload_channel = false,
499 },
500 #endif
501 #if IS_ENABLED(CONFIG_MHI_SATELLITE)
502 {
503 .num_elements = 256,
504 .irq_moderation_ms = 0,
505 .irq = 2,
506 .mode = MHI_DB_BRST_DISABLE,
507 .data_type = MHI_ER_DATA,
508 .priority = 1,
509 .hardware_event = false,
510 .client_managed = true,
511 .offload_channel = true,
512 },
513 #endif
514 };
515
516 #if IS_ENABLED(CONFIG_MHI_SATELLITE)
517 #define CNSS_MHI_SATELLITE_CH_CFG_COUNT 4
518 #define CNSS_MHI_SATELLITE_EVT_COUNT 1
519 #else
520 #define CNSS_MHI_SATELLITE_CH_CFG_COUNT 0
521 #define CNSS_MHI_SATELLITE_EVT_COUNT 0
522 #endif
523
524 static const struct mhi_controller_config cnss_mhi_config_no_diag = {
525 #if IS_ENABLED(CONFIG_MHI_SATELLITE)
526 .max_channels = 72,
527 #else
528 .max_channels = 32,
529 #endif
530 .timeout_ms = 10000,
531 .use_bounce_buf = false,
532 .buf_len = 0x8000,
533 .num_channels = ARRAY_SIZE(cnss_mhi_channels_no_diag),
534 .ch_cfg = cnss_mhi_channels_no_diag,
535 .num_events = ARRAY_SIZE(cnss_mhi_events),
536 .event_cfg = cnss_mhi_events,
537 .m2_no_db = true,
538 };
539
540 static const struct mhi_controller_config cnss_mhi_config_default = {
541 #if IS_ENABLED(CONFIG_MHI_SATELLITE)
542 .max_channels = 72,
543 #else
544 .max_channels = 32,
545 #endif
546 .timeout_ms = 10000,
547 .use_bounce_buf = false,
548 .buf_len = 0x8000,
549 .num_channels = ARRAY_SIZE(cnss_mhi_channels),
550 .ch_cfg = cnss_mhi_channels,
551 .num_events = ARRAY_SIZE(cnss_mhi_events),
552 .event_cfg = cnss_mhi_events,
553 .m2_no_db = true,
554 };
555
556 static const struct mhi_controller_config cnss_mhi_config_genoa = {
557 .max_channels = 32,
558 .timeout_ms = 10000,
559 .use_bounce_buf = false,
560 .buf_len = 0x8000,
561 .num_channels = ARRAY_SIZE(cnss_mhi_channels_genoa),
562 .ch_cfg = cnss_mhi_channels_genoa,
563 .num_events = ARRAY_SIZE(cnss_mhi_events) -
564 CNSS_MHI_SATELLITE_EVT_COUNT,
565 .event_cfg = cnss_mhi_events,
566 .m2_no_db = true,
567 #if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 2, 0))
568 .bhie_offset = 0x0324,
569 #endif
570 };
571
572 static const struct mhi_controller_config cnss_mhi_config_no_satellite = {
573 .max_channels = 32,
574 .timeout_ms = 10000,
575 .use_bounce_buf = false,
576 .buf_len = 0x8000,
577 .num_channels = ARRAY_SIZE(cnss_mhi_channels) -
578 CNSS_MHI_SATELLITE_CH_CFG_COUNT,
579 .ch_cfg = cnss_mhi_channels,
580 .num_events = ARRAY_SIZE(cnss_mhi_events) -
581 CNSS_MHI_SATELLITE_EVT_COUNT,
582 .event_cfg = cnss_mhi_events,
583 .m2_no_db = true,
584 };
585
586 static struct cnss_pci_reg ce_src[] = {
587 { "SRC_RING_BASE_LSB", CE_SRC_RING_BASE_LSB_OFFSET },
588 { "SRC_RING_BASE_MSB", CE_SRC_RING_BASE_MSB_OFFSET },
589 { "SRC_RING_ID", CE_SRC_RING_ID_OFFSET },
590 { "SRC_RING_MISC", CE_SRC_RING_MISC_OFFSET },
591 { "SRC_CTRL", CE_SRC_CTRL_OFFSET },
592 { "SRC_R0_CE_CH_SRC_IS", CE_SRC_R0_CE_CH_SRC_IS_OFFSET },
593 { "SRC_RING_HP", CE_SRC_RING_HP_OFFSET },
594 { "SRC_RING_TP", CE_SRC_RING_TP_OFFSET },
595 { NULL },
596 };
597
598 static struct cnss_pci_reg ce_dst[] = {
599 { "DEST_RING_BASE_LSB", CE_DEST_RING_BASE_LSB_OFFSET },
600 { "DEST_RING_BASE_MSB", CE_DEST_RING_BASE_MSB_OFFSET },
601 { "DEST_RING_ID", CE_DEST_RING_ID_OFFSET },
602 { "DEST_RING_MISC", CE_DEST_RING_MISC_OFFSET },
603 { "DEST_CTRL", CE_DEST_CTRL_OFFSET },
604 { "CE_CH_DST_IS", CE_CH_DST_IS_OFFSET },
605 { "CE_CH_DEST_CTRL2", CE_CH_DEST_CTRL2_OFFSET },
606 { "DEST_RING_HP", CE_DEST_RING_HP_OFFSET },
607 { "DEST_RING_TP", CE_DEST_RING_TP_OFFSET },
608 { "STATUS_RING_BASE_LSB", CE_STATUS_RING_BASE_LSB_OFFSET },
609 { "STATUS_RING_BASE_MSB", CE_STATUS_RING_BASE_MSB_OFFSET },
610 { "STATUS_RING_ID", CE_STATUS_RING_ID_OFFSET },
611 { "STATUS_RING_MISC", CE_STATUS_RING_MISC_OFFSET },
612 { "STATUS_RING_HP", CE_STATUS_RING_HP_OFFSET },
613 { "STATUS_RING_TP", CE_STATUS_RING_TP_OFFSET },
614 { NULL },
615 };
616
617 static struct cnss_pci_reg ce_cmn[] = {
618 { "GXI_ERR_INTS", CE_COMMON_GXI_ERR_INTS },
619 { "GXI_ERR_STATS", CE_COMMON_GXI_ERR_STATS },
620 { "GXI_WDOG_STATUS", CE_COMMON_GXI_WDOG_STATUS },
621 { "TARGET_IE_0", CE_COMMON_TARGET_IE_0 },
622 { "TARGET_IE_1", CE_COMMON_TARGET_IE_1 },
623 { NULL },
624 };
625
626 static struct cnss_pci_reg qdss_csr[] = {
627 { "QDSSCSR_ETRIRQCTRL", QDSS_APB_DEC_CSR_ETRIRQCTRL_OFFSET },
628 { "QDSSCSR_PRESERVEETF", QDSS_APB_DEC_CSR_PRESERVEETF_OFFSET },
629 { "QDSSCSR_PRESERVEETR0", QDSS_APB_DEC_CSR_PRESERVEETR0_OFFSET },
630 { "QDSSCSR_PRESERVEETR1", QDSS_APB_DEC_CSR_PRESERVEETR1_OFFSET },
631 { NULL },
632 };
633
634 static struct cnss_pci_reg pci_scratch[] = {
635 { "PCIE_SCRATCH_0", PCIE_SCRATCH_0_SOC_PCIE_REG },
636 { "PCIE_SCRATCH_1", PCIE_SCRATCH_1_SOC_PCIE_REG },
637 { "PCIE_SCRATCH_2", PCIE_SCRATCH_2_SOC_PCIE_REG },
638 { NULL },
639 };
640
641 static struct cnss_pci_reg pci_bhi_debug[] = {
642 { "PCIE_BHIE_DEBUG_0", PCIE_PCIE_BHIE_DEBUG_0 },
643 { "PCIE_BHIE_DEBUG_1", PCIE_PCIE_BHIE_DEBUG_1 },
644 { "PCIE_BHIE_DEBUG_2", PCIE_PCIE_BHIE_DEBUG_2 },
645 { "PCIE_BHIE_DEBUG_3", PCIE_PCIE_BHIE_DEBUG_3 },
646 { "PCIE_BHIE_DEBUG_4", PCIE_PCIE_BHIE_DEBUG_4 },
647 { "PCIE_BHIE_DEBUG_5", PCIE_PCIE_BHIE_DEBUG_5 },
648 { "PCIE_BHIE_DEBUG_6", PCIE_PCIE_BHIE_DEBUG_6 },
649 { "PCIE_BHIE_DEBUG_7", PCIE_PCIE_BHIE_DEBUG_7 },
650 { "PCIE_BHIE_DEBUG_8", PCIE_PCIE_BHIE_DEBUG_8 },
651 { "PCIE_BHIE_DEBUG_9", PCIE_PCIE_BHIE_DEBUG_9 },
652 { "PCIE_BHIE_DEBUG_10", PCIE_PCIE_BHIE_DEBUG_10 },
653 { NULL },
654 };
655
656 /* First field of the structure is the device bit mask. Use
657 * enum cnss_pci_reg_mask as reference for the value.
658 */
659 static struct cnss_misc_reg wcss_reg_access_seq[] = {
660 {1, 0, QCA6390_GCC_DEBUG_CLK_CTL, 0},
661 {1, 1, QCA6390_GCC_DEBUG_CLK_CTL, 0x802},
662 {1, 0, QCA6390_GCC_DEBUG_CLK_CTL, 0},
663 {1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_PLL_MODE, 0},
664 {1, 1, QCA6390_GCC_DEBUG_CLK_CTL, 0x805},
665 {1, 0, QCA6390_GCC_DEBUG_CLK_CTL, 0},
666 {1, 0, QCA6390_WCSS_WFSS_PMM_WFSS_PMM_R0_PMM_CTRL, 0},
667 {1, 0, QCA6390_WCSS_PMM_TOP_PMU_CX_CSR, 0},
668 {1, 0, QCA6390_WCSS_PMM_TOP_AON_INT_RAW_STAT, 0},
669 {1, 0, QCA6390_WCSS_PMM_TOP_AON_INT_EN, 0},
670 {1, 0, QCA6390_WCSS_PMM_TOP_PMU_TESTBUS_STS, 0},
671 {1, 1, QCA6390_WCSS_PMM_TOP_PMU_TESTBUS_CTL, 0xD},
672 {1, 0, QCA6390_WCSS_PMM_TOP_TESTBUS_STS, 0},
673 {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_CFG, 0},
674 {1, 1, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_CFG, 0},
675 {1, 1, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_CTL, 0x8},
676 {1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
677 {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_SPM_STS, 0},
678 {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_SPM_CTL, 0},
679 {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_SPM_SLP_SEQ_ENTRY_0, 0},
680 {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_SPM_SLP_SEQ_ENTRY_9, 0},
681 {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS0, 0},
682 {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS1, 0},
683 {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS2, 0},
684 {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS3, 0},
685 {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS4, 0},
686 {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS5, 0},
687 {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS6, 0},
688 {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE0, 0},
689 {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE1, 0},
690 {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE2, 0},
691 {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE3, 0},
692 {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE4, 0},
693 {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE5, 0},
694 {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE6, 0},
695 {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING0, 0},
696 {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING1, 0},
697 {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING2, 0},
698 {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING3, 0},
699 {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING4, 0},
700 {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING5, 0},
701 {1, 0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING6, 0},
702 {1, 1, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_CTL, 0x30040},
703 {1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_CTL, 0},
704 {1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
705 {1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
706 {1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
707 {1, 1, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_CTL, 0x30105},
708 {1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_CTL, 0},
709 {1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
710 {1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
711 {1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
712 {1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
713 {1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
714 {1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
715 {1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
716 {1, 0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_CTL, 0},
717 {1, 0, QCA6390_WCSS_CC_WCSS_UMAC_NOC_CBCR, 0},
718 {1, 0, QCA6390_WCSS_CC_WCSS_UMAC_AHB_CBCR, 0},
719 {1, 0, QCA6390_WCSS_CC_WCSS_UMAC_GDSCR, 0},
720 {1, 0, QCA6390_WCSS_CC_WCSS_WLAN1_GDSCR, 0},
721 {1, 0, QCA6390_WCSS_CC_WCSS_WLAN2_GDSCR, 0},
722 {1, 0, QCA6390_WCSS_PMM_TOP_PMM_INT_CLR, 0},
723 {1, 0, QCA6390_WCSS_PMM_TOP_AON_INT_STICKY_EN, 0},
724 };
725
726 static struct cnss_misc_reg pcie_reg_access_seq[] = {
727 {1, 0, QCA6390_PCIE_PCIE_WCSS_STATUS_FOR_DEBUG_LOW_PCIE_LOCAL_REG, 0},
728 {1, 0, QCA6390_PCIE_SOC_PCIE_WRAP_INTR_MASK_SOC_PCIE_REG, 0},
729 {1, 1, QCA6390_PCIE_SOC_PCIE_WRAP_INTR_MASK_SOC_PCIE_REG, 0x18},
730 {1, 0, QCA6390_PCIE_SOC_PCIE_WRAP_INTR_MASK_SOC_PCIE_REG, 0},
731 {1, 0, QCA6390_PCIE_SOC_PCIE_WRAP_INTR_MASK_SOC_PCIE_REG, 0},
732 {1, 0, QCA6390_PCIE_SOC_PCIE_WRAP_INTR_STATUS_SOC_PCIE_REG, 0},
733 {1, 0, QCA6390_PCIE_SOC_COMMIT_REPLAY_SOC_PCIE_REG, 0},
734 {1, 0, QCA6390_TLMM_GPIO_IN_OUT57, 0},
735 {1, 0, QCA6390_TLMM_GPIO_INTR_CFG57, 0},
736 {1, 0, QCA6390_TLMM_GPIO_INTR_STATUS57, 0},
737 {1, 0, QCA6390_TLMM_GPIO_IN_OUT59, 0},
738 {1, 0, QCA6390_TLMM_GPIO_INTR_CFG59, 0},
739 {1, 0, QCA6390_TLMM_GPIO_INTR_STATUS59, 0},
740 {1, 0, QCA6390_PCIE_PCIE_PARF_LTSSM, 0},
741 {1, 0, QCA6390_PCIE_PCIE_PARF_PM_STTS, 0},
742 {1, 0, QCA6390_PCIE_PCIE_PARF_PM_STTS_1, 0},
743 {1, 0, QCA6390_PCIE_PCIE_PARF_INT_STATUS, 0},
744 {1, 0, QCA6390_PCIE_PCIE_INT_ALL_STATUS, 0},
745 {1, 0, QCA6390_PCIE_PCIE_INT_ALL_MASK, 0},
746 {1, 0, QCA6390_PCIE_PCIE_PARF_BDF_TO_SID_CFG, 0},
747 {1, 0, QCA6390_PCIE_PCIE_PARF_L1SS_SLEEP_NO_MHI_ACCESS_HANDLER_RD_4, 0},
748 {1, 0, QCA6390_PCIE_PCIE_PARF_L1SS_SLEEP_NO_MHI_ACCESS_HANDLER_RD_3, 0},
749 {1, 0, QCA6390_PCIE_PCIE_PARF_MHI_CLOCK_RESET_CTRL, 0},
750 {1, 0, QCA6390_PCIE_PCIE_PARF_MHI_BASE_ADDR_LOWER, 0},
751 {1, 0, QCA6390_PCIE_PCIE_PARF_L1SS_SLEEP_MODE_HANDLER_STATUS, 0},
752 {1, 0, QCA6390_PCIE_PCIE_PARF_L1SS_SLEEP_MODE_HANDLER_CFG, 0},
753 {1, 0, QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2, 0},
754 {1, 0, QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_PM_LINKST_IN_L1SUB, 0},
755 {1, 0, QCA6390_PCIE_PCIE_CORE_CONFIG, 0},
756 {1, 0, QCA6390_PCIE_PCIE_PARF_L1SS_SLEEP_NO_MHI_ACCESS_HANDLER_RD_4, 0},
757 {1, 0, QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_PM_LINKST_IN_L2, 0},
758 {1, 0, QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_PM_LINKST_IN_L1, 0},
759 {1, 0, QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L1, 0},
760 {1, 0, QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2, 0},
761 {1, 0, QCA6390_PCIE_PCIE_LOCAL_REG_WCSSAON_PCIE_SR_STATUS_HIGH, 0},
762 {1, 0, QCA6390_PCIE_PCIE_LOCAL_REG_WCSSAON_PCIE_SR_STATUS_LOW, 0},
763 {1, 0, QCA6390_PCIE_PCIE_LOCAL_REG_WCSS_STATUS_FOR_DEBUG_HIGH, 0},
764 {1, 0, QCA6390_PCIE_PCIE_LOCAL_REG_WCSS_STATUS_FOR_DEBUG_LOW, 0},
765 {1, 0, QCA6390_WFSS_PMM_WFSS_PMM_R0_WLAN1_STATUS_REG2, 0},
766 {1, 0, QCA6390_WFSS_PMM_WFSS_PMM_R0_WLAN2_STATUS_REG2, 0},
767 {1, 0, QCA6390_WFSS_PMM_WFSS_PMM_R0_PMM_WLAN2_CFG_REG1, 0},
768 {1, 0, QCA6390_WFSS_PMM_WFSS_PMM_R0_PMM_WLAN1_CFG_REG1, 0},
769 {1, 0, QCA6390_WFSS_PMM_WFSS_PMM_R0_WLAN2_APS_STATUS_REG1, 0},
770 {1, 0, QCA6390_WFSS_PMM_WFSS_PMM_R0_WLAN1_APS_STATUS_REG1, 0},
771 {1, 0, QCA6390_PCIE_PCIE_BHI_EXECENV_REG, 0},
772 };
773
774 static struct cnss_misc_reg wlaon_reg_access_seq[] = {
775 {3, 0, WLAON_SOC_POWER_CTRL, 0},
776 {3, 0, WLAON_SOC_PWR_WDG_BARK_THRSHD, 0},
777 {3, 0, WLAON_SOC_PWR_WDG_BITE_THRSHD, 0},
778 {3, 0, WLAON_SW_COLD_RESET, 0},
779 {3, 0, WLAON_RFA_MEM_SLP_NRET_N_OVERRIDE, 0},
780 {3, 0, WLAON_GDSC_DELAY_SETTING, 0},
781 {3, 0, WLAON_GDSC_DELAY_SETTING2, 0},
782 {3, 0, WLAON_WL_PWR_STATUS_REG, 0},
783 {3, 0, WLAON_WL_AON_DBG_CFG_REG, 0},
784 {2, 0, WLAON_WL_AON_DBG_ENABLE_GRP0_REG, 0},
785 {2, 0, WLAON_WL_AON_DBG_ENABLE_GRP1_REG, 0},
786 {2, 0, WLAON_WL_AON_APM_CFG_CTRL0, 0},
787 {2, 0, WLAON_WL_AON_APM_CFG_CTRL1, 0},
788 {2, 0, WLAON_WL_AON_APM_CFG_CTRL2, 0},
789 {2, 0, WLAON_WL_AON_APM_CFG_CTRL3, 0},
790 {2, 0, WLAON_WL_AON_APM_CFG_CTRL4, 0},
791 {2, 0, WLAON_WL_AON_APM_CFG_CTRL5, 0},
792 {2, 0, WLAON_WL_AON_APM_CFG_CTRL5_1, 0},
793 {2, 0, WLAON_WL_AON_APM_CFG_CTRL6, 0},
794 {2, 0, WLAON_WL_AON_APM_CFG_CTRL6_1, 0},
795 {2, 0, WLAON_WL_AON_APM_CFG_CTRL7, 0},
796 {2, 0, WLAON_WL_AON_APM_CFG_CTRL8, 0},
797 {2, 0, WLAON_WL_AON_APM_CFG_CTRL8_1, 0},
798 {2, 0, WLAON_WL_AON_APM_CFG_CTRL9, 0},
799 {2, 0, WLAON_WL_AON_APM_CFG_CTRL9_1, 0},
800 {2, 0, WLAON_WL_AON_APM_CFG_CTRL10, 0},
801 {2, 0, WLAON_WL_AON_APM_CFG_CTRL11, 0},
802 {2, 0, WLAON_WL_AON_APM_CFG_CTRL12, 0},
803 {2, 0, WLAON_WL_AON_APM_OVERRIDE_REG, 0},
804 {2, 0, WLAON_WL_AON_CXPC_REG, 0},
805 {2, 0, WLAON_WL_AON_APM_STATUS0, 0},
806 {2, 0, WLAON_WL_AON_APM_STATUS1, 0},
807 {2, 0, WLAON_WL_AON_APM_STATUS2, 0},
808 {2, 0, WLAON_WL_AON_APM_STATUS3, 0},
809 {2, 0, WLAON_WL_AON_APM_STATUS4, 0},
810 {2, 0, WLAON_WL_AON_APM_STATUS5, 0},
811 {2, 0, WLAON_WL_AON_APM_STATUS6, 0},
812 {3, 0, WLAON_GLOBAL_COUNTER_CTRL1, 0},
813 {3, 0, WLAON_GLOBAL_COUNTER_CTRL6, 0},
814 {3, 0, WLAON_GLOBAL_COUNTER_CTRL7, 0},
815 {3, 0, WLAON_GLOBAL_COUNTER_CTRL3, 0},
816 {3, 0, WLAON_GLOBAL_COUNTER_CTRL4, 0},
817 {3, 0, WLAON_GLOBAL_COUNTER_CTRL5, 0},
818 {3, 0, WLAON_GLOBAL_COUNTER_CTRL8, 0},
819 {3, 0, WLAON_GLOBAL_COUNTER_CTRL2, 0},
820 {3, 0, WLAON_GLOBAL_COUNTER_CTRL9, 0},
821 {3, 0, WLAON_RTC_CLK_CAL_CTRL1, 0},
822 {3, 0, WLAON_RTC_CLK_CAL_CTRL2, 0},
823 {3, 0, WLAON_RTC_CLK_CAL_CTRL3, 0},
824 {3, 0, WLAON_RTC_CLK_CAL_CTRL4, 0},
825 {3, 0, WLAON_RTC_CLK_CAL_CTRL5, 0},
826 {3, 0, WLAON_RTC_CLK_CAL_CTRL6, 0},
827 {3, 0, WLAON_RTC_CLK_CAL_CTRL7, 0},
828 {3, 0, WLAON_RTC_CLK_CAL_CTRL8, 0},
829 {3, 0, WLAON_RTC_CLK_CAL_CTRL9, 0},
830 {3, 0, WLAON_WCSSAON_CONFIG_REG, 0},
831 {3, 0, WLAON_WLAN_OEM_DEBUG_REG, 0},
832 {3, 0, WLAON_WLAN_RAM_DUMP_REG, 0},
833 {3, 0, WLAON_QDSS_WCSS_REG, 0},
834 {3, 0, WLAON_QDSS_WCSS_ACK, 0},
835 {3, 0, WLAON_WL_CLK_CNTL_KDF_REG, 0},
836 {3, 0, WLAON_WL_CLK_CNTL_PMU_HFRC_REG, 0},
837 {3, 0, WLAON_QFPROM_PWR_CTRL_REG, 0},
838 {3, 0, WLAON_DLY_CONFIG, 0},
839 {3, 0, WLAON_WLAON_Q6_IRQ_REG, 0},
840 {3, 0, WLAON_PCIE_INTF_SW_CFG_REG, 0},
841 {3, 0, WLAON_PCIE_INTF_STICKY_SW_CFG_REG, 0},
842 {3, 0, WLAON_PCIE_INTF_PHY_SW_CFG_REG, 0},
843 {3, 0, WLAON_PCIE_INTF_PHY_NOCSR_SW_CFG_REG, 0},
844 {3, 0, WLAON_Q6_COOKIE_BIT, 0},
845 {3, 0, WLAON_WARM_SW_ENTRY, 0},
846 {3, 0, WLAON_RESET_DBG_SW_ENTRY, 0},
847 {3, 0, WLAON_WL_PMUNOC_CFG_REG, 0},
848 {3, 0, WLAON_RESET_CAUSE_CFG_REG, 0},
849 {3, 0, WLAON_SOC_WCSSAON_WAKEUP_IRQ_7_EN_REG, 0},
850 {3, 0, WLAON_DEBUG, 0},
851 {3, 0, WLAON_SOC_PARAMETERS, 0},
852 {3, 0, WLAON_WLPM_SIGNAL, 0},
853 {3, 0, WLAON_SOC_RESET_CAUSE_REG, 0},
854 {3, 0, WLAON_WAKEUP_PCIE_SOC_REG, 0},
855 {3, 0, WLAON_PBL_STACK_CANARY, 0},
856 {3, 0, WLAON_MEM_TOT_NUM_GRP_REG, 0},
857 {3, 0, WLAON_MEM_TOT_BANKS_IN_GRP0_REG, 0},
858 {3, 0, WLAON_MEM_TOT_BANKS_IN_GRP1_REG, 0},
859 {3, 0, WLAON_MEM_TOT_BANKS_IN_GRP2_REG, 0},
860 {3, 0, WLAON_MEM_TOT_BANKS_IN_GRP3_REG, 0},
861 {3, 0, WLAON_MEM_TOT_SIZE_IN_GRP0_REG, 0},
862 {3, 0, WLAON_MEM_TOT_SIZE_IN_GRP1_REG, 0},
863 {3, 0, WLAON_MEM_TOT_SIZE_IN_GRP2_REG, 0},
864 {3, 0, WLAON_MEM_TOT_SIZE_IN_GRP3_REG, 0},
865 {3, 0, WLAON_MEM_SLP_NRET_OVERRIDE_GRP0_REG, 0},
866 {3, 0, WLAON_MEM_SLP_NRET_OVERRIDE_GRP1_REG, 0},
867 {3, 0, WLAON_MEM_SLP_NRET_OVERRIDE_GRP2_REG, 0},
868 {3, 0, WLAON_MEM_SLP_NRET_OVERRIDE_GRP3_REG, 0},
869 {3, 0, WLAON_MEM_SLP_RET_OVERRIDE_GRP0_REG, 0},
870 {3, 0, WLAON_MEM_SLP_RET_OVERRIDE_GRP1_REG, 0},
871 {3, 0, WLAON_MEM_SLP_RET_OVERRIDE_GRP2_REG, 0},
872 {3, 0, WLAON_MEM_SLP_RET_OVERRIDE_GRP3_REG, 0},
873 {3, 0, WLAON_MEM_CNT_SEL_REG, 0},
874 {3, 0, WLAON_MEM_NO_EXTBHS_REG, 0},
875 {3, 0, WLAON_MEM_DEBUG_REG, 0},
876 {3, 0, WLAON_MEM_DEBUG_BUS_REG, 0},
877 {3, 0, WLAON_MEM_REDUN_CFG_REG, 0},
878 {3, 0, WLAON_WL_AON_SPARE2, 0},
879 {3, 0, WLAON_VSEL_CFG_FOR_WL_RET_DISABLE_REG, 0},
880 {3, 0, WLAON_BTFM_WLAN_IPC_STATUS_REG, 0},
881 {3, 0, WLAON_MPM_COUNTER_CHICKEN_BITS, 0},
882 {3, 0, WLAON_WLPM_CHICKEN_BITS, 0},
883 {3, 0, WLAON_PCIE_PHY_PWR_REG, 0},
884 {3, 0, WLAON_WL_CLK_CNTL_PMU_LPO2M_REG, 0},
885 {3, 0, WLAON_WL_SS_ROOT_CLK_SWITCH_REG, 0},
886 {3, 0, WLAON_POWERCTRL_PMU_REG, 0},
887 {3, 0, WLAON_POWERCTRL_MEM_REG, 0},
888 {3, 0, WLAON_PCIE_PWR_CTRL_REG, 0},
889 {3, 0, WLAON_SOC_PWR_PROFILE_REG, 0},
890 {3, 0, WLAON_WCSSAON_PCIE_SR_STATUS_HI_REG, 0},
891 {3, 0, WLAON_WCSSAON_PCIE_SR_STATUS_LO_REG, 0},
892 {3, 0, WLAON_WCSS_TCSR_PMM_SR_STATUS_HI_REG, 0},
893 {3, 0, WLAON_WCSS_TCSR_PMM_SR_STATUS_LO_REG, 0},
894 {3, 0, WLAON_MEM_SVS_CFG_REG, 0},
895 {3, 0, WLAON_CMN_AON_MISC_REG, 0},
896 {3, 0, WLAON_INTR_STATUS, 0},
897 {2, 0, WLAON_INTR_ENABLE, 0},
898 {2, 0, WLAON_NOC_DBG_BUS_SEL_REG, 0},
899 {2, 0, WLAON_NOC_DBG_BUS_REG, 0},
900 {2, 0, WLAON_WL_CTRL_MISC_REG, 0},
901 {2, 0, WLAON_DBG_STATUS0, 0},
902 {2, 0, WLAON_DBG_STATUS1, 0},
903 {2, 0, WLAON_TIMERSYNC_OFFSET_L, 0},
904 {2, 0, WLAON_TIMERSYNC_OFFSET_H, 0},
905 {2, 0, WLAON_PMU_LDO_SETTLE_REG, 0},
906 };
907
908 static struct cnss_misc_reg syspm_reg_access_seq[] = {
909 {1, 0, QCA6390_SYSPM_SYSPM_PWR_STATUS, 0},
910 {1, 0, QCA6390_SYSPM_DBG_BTFM_AON_REG, 0},
911 {1, 0, QCA6390_SYSPM_DBG_BUS_SEL_REG, 0},
912 {1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
913 {1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
914 {1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
915 {1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
916 {1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
917 {1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
918 {1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
919 {1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
920 {1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
921 {1, 0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
922 };
923
924 static struct cnss_print_optimize print_optimize;
925
926 #define WCSS_REG_SIZE ARRAY_SIZE(wcss_reg_access_seq)
927 #define PCIE_REG_SIZE ARRAY_SIZE(pcie_reg_access_seq)
928 #define WLAON_REG_SIZE ARRAY_SIZE(wlaon_reg_access_seq)
929 #define SYSPM_REG_SIZE ARRAY_SIZE(syspm_reg_access_seq)
930
931 static int cnss_pci_update_fw_name(struct cnss_pci_data *pci_priv);
932 static void cnss_pci_suspend_pwroff(struct pci_dev *pci_dev);
933 static bool cnss_should_suspend_pwroff(struct pci_dev *pci_dev);
934 static void cnss_pci_update_link_event(struct cnss_pci_data *pci_priv,
935 enum cnss_bus_event_type type,
936 void *data);
937
938 #if IS_ENABLED(CONFIG_MHI_BUS_MISC)
cnss_mhi_debug_reg_dump(struct cnss_pci_data * pci_priv)939 static void cnss_mhi_debug_reg_dump(struct cnss_pci_data *pci_priv)
940 {
941 mhi_debug_reg_dump(pci_priv->mhi_ctrl);
942 }
943
cnss_mhi_dump_sfr(struct cnss_pci_data * pci_priv)944 static void cnss_mhi_dump_sfr(struct cnss_pci_data *pci_priv)
945 {
946 mhi_dump_sfr(pci_priv->mhi_ctrl);
947 }
948
cnss_mhi_scan_rddm_cookie(struct cnss_pci_data * pci_priv,u32 cookie)949 static bool cnss_mhi_scan_rddm_cookie(struct cnss_pci_data *pci_priv,
950 u32 cookie)
951 {
952 return mhi_scan_rddm_cookie(pci_priv->mhi_ctrl, cookie);
953 }
954
cnss_mhi_pm_fast_suspend(struct cnss_pci_data * pci_priv,bool notify_clients)955 static int cnss_mhi_pm_fast_suspend(struct cnss_pci_data *pci_priv,
956 bool notify_clients)
957 {
958 return mhi_pm_fast_suspend(pci_priv->mhi_ctrl, notify_clients);
959 }
960
cnss_mhi_pm_fast_resume(struct cnss_pci_data * pci_priv,bool notify_clients)961 static int cnss_mhi_pm_fast_resume(struct cnss_pci_data *pci_priv,
962 bool notify_clients)
963 {
964 return mhi_pm_fast_resume(pci_priv->mhi_ctrl, notify_clients);
965 }
966
cnss_mhi_set_m2_timeout_ms(struct cnss_pci_data * pci_priv,u32 timeout)967 static void cnss_mhi_set_m2_timeout_ms(struct cnss_pci_data *pci_priv,
968 u32 timeout)
969 {
970 return mhi_set_m2_timeout_ms(pci_priv->mhi_ctrl, timeout);
971 }
972
cnss_mhi_device_get_sync_atomic(struct cnss_pci_data * pci_priv,int timeout_us,bool in_panic)973 static int cnss_mhi_device_get_sync_atomic(struct cnss_pci_data *pci_priv,
974 int timeout_us, bool in_panic)
975 {
976 return mhi_device_get_sync_atomic(pci_priv->mhi_ctrl->mhi_dev,
977 timeout_us, in_panic);
978 }
979
980 #ifdef CONFIG_CNSS2_SMMU_DB_SUPPORT
cnss_mhi_host_notify_db_disable_trace(struct cnss_pci_data * pci_priv)981 static int cnss_mhi_host_notify_db_disable_trace(struct cnss_pci_data *pci_priv)
982 {
983 return mhi_host_notify_db_disable_trace(pci_priv->mhi_ctrl);
984 }
985 #endif
986
987 static void
cnss_mhi_controller_set_bw_scale_cb(struct cnss_pci_data * pci_priv,int (* cb)(struct mhi_controller * mhi_ctrl,struct mhi_link_info * link_info))988 cnss_mhi_controller_set_bw_scale_cb(struct cnss_pci_data *pci_priv,
989 int (*cb)(struct mhi_controller *mhi_ctrl,
990 struct mhi_link_info *link_info))
991 {
992 mhi_controller_set_bw_scale_cb(pci_priv->mhi_ctrl, cb);
993 }
994
cnss_mhi_force_reset(struct cnss_pci_data * pci_priv)995 static int cnss_mhi_force_reset(struct cnss_pci_data *pci_priv)
996 {
997 return mhi_force_reset(pci_priv->mhi_ctrl);
998 }
999
cnss_mhi_controller_set_base(struct cnss_pci_data * pci_priv,phys_addr_t base)1000 void cnss_mhi_controller_set_base(struct cnss_pci_data *pci_priv,
1001 phys_addr_t base)
1002 {
1003 return mhi_controller_set_base(pci_priv->mhi_ctrl, base);
1004 }
1005 #else
cnss_mhi_debug_reg_dump(struct cnss_pci_data * pci_priv)1006 static void cnss_mhi_debug_reg_dump(struct cnss_pci_data *pci_priv)
1007 {
1008 }
1009
cnss_mhi_dump_sfr(struct cnss_pci_data * pci_priv)1010 static void cnss_mhi_dump_sfr(struct cnss_pci_data *pci_priv)
1011 {
1012 }
1013
cnss_mhi_scan_rddm_cookie(struct cnss_pci_data * pci_priv,u32 cookie)1014 static bool cnss_mhi_scan_rddm_cookie(struct cnss_pci_data *pci_priv,
1015 u32 cookie)
1016 {
1017 return false;
1018 }
1019
cnss_mhi_pm_fast_suspend(struct cnss_pci_data * pci_priv,bool notify_clients)1020 static int cnss_mhi_pm_fast_suspend(struct cnss_pci_data *pci_priv,
1021 bool notify_clients)
1022 {
1023 return -EOPNOTSUPP;
1024 }
1025
cnss_mhi_pm_fast_resume(struct cnss_pci_data * pci_priv,bool notify_clients)1026 static int cnss_mhi_pm_fast_resume(struct cnss_pci_data *pci_priv,
1027 bool notify_clients)
1028 {
1029 return -EOPNOTSUPP;
1030 }
1031
cnss_mhi_set_m2_timeout_ms(struct cnss_pci_data * pci_priv,u32 timeout)1032 static void cnss_mhi_set_m2_timeout_ms(struct cnss_pci_data *pci_priv,
1033 u32 timeout)
1034 {
1035 }
1036
cnss_mhi_device_get_sync_atomic(struct cnss_pci_data * pci_priv,int timeout_us,bool in_panic)1037 static int cnss_mhi_device_get_sync_atomic(struct cnss_pci_data *pci_priv,
1038 int timeout_us, bool in_panic)
1039 {
1040 return -EOPNOTSUPP;
1041 }
1042
1043 #ifdef CONFIG_CNSS2_SMMU_DB_SUPPORT
cnss_mhi_host_notify_db_disable_trace(struct cnss_pci_data * pci_priv)1044 static int cnss_mhi_host_notify_db_disable_trace(struct cnss_pci_data *pci_priv)
1045 {
1046 return -EOPNOTSUPP;
1047 }
1048 #endif
1049
1050 static void
cnss_mhi_controller_set_bw_scale_cb(struct cnss_pci_data * pci_priv,int (* cb)(struct mhi_controller * mhi_ctrl,struct mhi_link_info * link_info))1051 cnss_mhi_controller_set_bw_scale_cb(struct cnss_pci_data *pci_priv,
1052 int (*cb)(struct mhi_controller *mhi_ctrl,
1053 struct mhi_link_info *link_info))
1054 {
1055 }
1056
cnss_mhi_force_reset(struct cnss_pci_data * pci_priv)1057 static int cnss_mhi_force_reset(struct cnss_pci_data *pci_priv)
1058 {
1059 return -EOPNOTSUPP;
1060 }
1061
cnss_mhi_controller_set_base(struct cnss_pci_data * pci_priv,phys_addr_t base)1062 void cnss_mhi_controller_set_base(struct cnss_pci_data *pci_priv,
1063 phys_addr_t base)
1064 {
1065 }
1066 #endif /* CONFIG_MHI_BUS_MISC */
1067
1068 #ifdef CONFIG_CNSS2_SMMU_DB_SUPPORT
1069 #define CNSS_MHI_WAKE_TIMEOUT 500000
1070
cnss_record_smmu_fault_timestamp(struct cnss_pci_data * pci_priv,enum cnss_smmu_fault_time id)1071 static void cnss_record_smmu_fault_timestamp(struct cnss_pci_data *pci_priv,
1072 enum cnss_smmu_fault_time id)
1073 {
1074 if (id >= SMMU_CB_MAX)
1075 return;
1076
1077 pci_priv->smmu_fault_timestamp[id] = sched_clock();
1078 }
1079
cnss_pci_smmu_fault_handler_irq(struct iommu_domain * domain,void * handler_token)1080 static void cnss_pci_smmu_fault_handler_irq(struct iommu_domain *domain,
1081 void *handler_token)
1082 {
1083 struct cnss_pci_data *pci_priv = handler_token;
1084 int ret = 0;
1085
1086 cnss_record_smmu_fault_timestamp(pci_priv, SMMU_CB_ENTRY);
1087 ret = cnss_mhi_device_get_sync_atomic(pci_priv,
1088 CNSS_MHI_WAKE_TIMEOUT, true);
1089 if (ret < 0) {
1090 cnss_pr_err("Failed to bring mhi in M0 state, ret %d\n", ret);
1091 return;
1092 }
1093
1094 cnss_record_smmu_fault_timestamp(pci_priv, SMMU_CB_DOORBELL_RING);
1095 ret = cnss_mhi_host_notify_db_disable_trace(pci_priv);
1096 if (ret < 0)
1097 cnss_pr_err("Fail to notify wlan fw to stop trace collection, ret %d\n", ret);
1098
1099 cnss_record_smmu_fault_timestamp(pci_priv, SMMU_CB_EXIT);
1100 }
1101
cnss_register_iommu_fault_handler_irq(struct cnss_pci_data * pci_priv)1102 void cnss_register_iommu_fault_handler_irq(struct cnss_pci_data *pci_priv)
1103 {
1104 qcom_iommu_set_fault_handler_irq(pci_priv->iommu_domain,
1105 cnss_pci_smmu_fault_handler_irq, pci_priv);
1106 }
1107 #else
cnss_register_iommu_fault_handler_irq(struct cnss_pci_data * pci_priv)1108 void cnss_register_iommu_fault_handler_irq(struct cnss_pci_data *pci_priv)
1109 {
1110 }
1111 #endif
1112
cnss_pci_check_link_status(struct cnss_pci_data * pci_priv)1113 int cnss_pci_check_link_status(struct cnss_pci_data *pci_priv)
1114 {
1115 u16 device_id;
1116
1117 if (pci_priv->pci_link_state == PCI_LINK_DOWN) {
1118 cnss_pr_dbg("%ps: PCIe link is in suspend state\n",
1119 (void *)_RET_IP_);
1120 return -EACCES;
1121 }
1122
1123 if (pci_priv->pci_link_down_ind) {
1124 cnss_pr_err("%ps: PCIe link is down\n", (void *)_RET_IP_);
1125 return -EIO;
1126 }
1127
1128 pci_read_config_word(pci_priv->pci_dev, PCI_DEVICE_ID, &device_id);
1129 if (device_id != pci_priv->device_id) {
1130 cnss_fatal_err("%ps: PCI device ID mismatch, link possibly down, current read ID: 0x%x, record ID: 0x%x\n",
1131 (void *)_RET_IP_, device_id,
1132 pci_priv->device_id);
1133 return -EIO;
1134 }
1135
1136 return 0;
1137 }
1138
cnss_pci_select_window(struct cnss_pci_data * pci_priv,u32 offset)1139 static void cnss_pci_select_window(struct cnss_pci_data *pci_priv, u32 offset)
1140 {
1141 struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
1142
1143 u32 window = (offset >> WINDOW_SHIFT) & WINDOW_VALUE_MASK;
1144 u32 window_enable = WINDOW_ENABLE_BIT | window;
1145 u32 val;
1146
1147 if (plat_priv->device_id == QCN7605_DEVICE_ID)
1148 window_enable = QCN7605_WINDOW_ENABLE_BIT | window;
1149
1150 if (plat_priv->device_id == PEACH_DEVICE_ID) {
1151 writel_relaxed(window_enable, pci_priv->bar +
1152 PEACH_PCIE_REMAP_BAR_CTRL_OFFSET);
1153 } else {
1154 writel_relaxed(window_enable, pci_priv->bar +
1155 QCA6390_PCIE_REMAP_BAR_CTRL_OFFSET);
1156 }
1157
1158 if (window != pci_priv->remap_window) {
1159 pci_priv->remap_window = window;
1160 cnss_pr_dbg("Config PCIe remap window register to 0x%x\n",
1161 window_enable);
1162 }
1163
1164 /* Read it back to make sure the write has taken effect */
1165 if (plat_priv->device_id == PEACH_DEVICE_ID) {
1166 val = readl_relaxed(pci_priv->bar +
1167 PEACH_PCIE_REMAP_BAR_CTRL_OFFSET);
1168 } else {
1169 val = readl_relaxed(pci_priv->bar +
1170 QCA6390_PCIE_REMAP_BAR_CTRL_OFFSET);
1171 }
1172 if (val != window_enable) {
1173 cnss_pr_err("Failed to config window register to 0x%x, current value: 0x%x\n",
1174 window_enable, val);
1175 if (!cnss_pci_check_link_status(pci_priv) &&
1176 !test_bit(CNSS_IN_PANIC, &plat_priv->driver_state))
1177 CNSS_ASSERT(0);
1178 }
1179 }
1180
cnss_pci_reg_read(struct cnss_pci_data * pci_priv,u32 offset,u32 * val)1181 static int cnss_pci_reg_read(struct cnss_pci_data *pci_priv,
1182 u32 offset, u32 *val)
1183 {
1184 int ret;
1185 struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
1186
1187 if (!in_interrupt() && !irqs_disabled()) {
1188 ret = cnss_pci_check_link_status(pci_priv);
1189 if (ret)
1190 return ret;
1191 }
1192
1193 if (pci_priv->pci_dev->device == QCA6174_DEVICE_ID ||
1194 offset < MAX_UNWINDOWED_ADDRESS) {
1195 *val = readl_relaxed(pci_priv->bar + offset);
1196 return 0;
1197 }
1198
1199 /* If in panic, assumption is kernel panic handler will hold all threads
1200 * and interrupts. Further pci_reg_window_lock could be held before
1201 * panic. So only lock during normal operation.
1202 */
1203 if (test_bit(CNSS_IN_PANIC, &plat_priv->driver_state)) {
1204 cnss_pci_select_window(pci_priv, offset);
1205 *val = readl_relaxed(pci_priv->bar + WINDOW_START +
1206 (offset & WINDOW_RANGE_MASK));
1207 } else {
1208 spin_lock_bh(&pci_reg_window_lock);
1209 cnss_pci_select_window(pci_priv, offset);
1210 *val = readl_relaxed(pci_priv->bar + WINDOW_START +
1211 (offset & WINDOW_RANGE_MASK));
1212 spin_unlock_bh(&pci_reg_window_lock);
1213 }
1214
1215 return 0;
1216 }
1217
cnss_pci_reg_write(struct cnss_pci_data * pci_priv,u32 offset,u32 val)1218 static int cnss_pci_reg_write(struct cnss_pci_data *pci_priv, u32 offset,
1219 u32 val)
1220 {
1221 int ret;
1222 struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
1223
1224 if (!in_interrupt() && !irqs_disabled()) {
1225 ret = cnss_pci_check_link_status(pci_priv);
1226 if (ret)
1227 return ret;
1228 }
1229
1230 if (pci_priv->pci_dev->device == QCA6174_DEVICE_ID ||
1231 offset < MAX_UNWINDOWED_ADDRESS) {
1232 writel_relaxed(val, pci_priv->bar + offset);
1233 return 0;
1234 }
1235
1236 /* Same constraint as PCI register read in panic */
1237 if (test_bit(CNSS_IN_PANIC, &plat_priv->driver_state)) {
1238 cnss_pci_select_window(pci_priv, offset);
1239 writel_relaxed(val, pci_priv->bar + WINDOW_START +
1240 (offset & WINDOW_RANGE_MASK));
1241 } else {
1242 spin_lock_bh(&pci_reg_window_lock);
1243 cnss_pci_select_window(pci_priv, offset);
1244 writel_relaxed(val, pci_priv->bar + WINDOW_START +
1245 (offset & WINDOW_RANGE_MASK));
1246 spin_unlock_bh(&pci_reg_window_lock);
1247 }
1248
1249 return 0;
1250 }
1251
cnss_pci_force_wake_get(struct cnss_pci_data * pci_priv)1252 static int cnss_pci_force_wake_get(struct cnss_pci_data *pci_priv)
1253 {
1254 struct device *dev = &pci_priv->pci_dev->dev;
1255 int ret;
1256
1257 ret = cnss_pci_force_wake_request_sync(dev,
1258 FORCE_WAKE_DELAY_TIMEOUT_US);
1259 if (ret) {
1260 if (ret != -EAGAIN)
1261 cnss_pr_err("Failed to request force wake\n");
1262 return ret;
1263 }
1264
1265 /* If device's M1 state-change event races here, it can be ignored,
1266 * as the device is expected to immediately move from M2 to M0
1267 * without entering low power state.
1268 */
1269 if (cnss_pci_is_device_awake(dev) != true)
1270 cnss_pr_warn("MHI not in M0, while reg still accessible\n");
1271
1272 return 0;
1273 }
1274
cnss_pci_force_wake_put(struct cnss_pci_data * pci_priv)1275 static int cnss_pci_force_wake_put(struct cnss_pci_data *pci_priv)
1276 {
1277 struct device *dev = &pci_priv->pci_dev->dev;
1278 int ret;
1279
1280 ret = cnss_pci_force_wake_release(dev);
1281 if (ret && ret != -EAGAIN)
1282 cnss_pr_err("Failed to release force wake\n");
1283
1284 return ret;
1285 }
1286
1287 #if IS_ENABLED(CONFIG_INTERCONNECT)
1288 /**
1289 * cnss_setup_bus_bandwidth() - Setup interconnect vote for given bandwidth
1290 * @plat_priv: Platform private data struct
1291 * @bw: bandwidth
1292 * @save: toggle flag to save bandwidth to current_bw_vote
1293 *
1294 * Setup bandwidth votes for configured interconnect paths
1295 *
1296 * Return: 0 for success
1297 */
cnss_setup_bus_bandwidth(struct cnss_plat_data * plat_priv,u32 bw,bool save)1298 static int cnss_setup_bus_bandwidth(struct cnss_plat_data *plat_priv,
1299 u32 bw, bool save)
1300 {
1301 int ret = 0;
1302 struct cnss_bus_bw_info *bus_bw_info;
1303
1304 if (!plat_priv->icc.path_count)
1305 return -EOPNOTSUPP;
1306
1307 if (bw >= plat_priv->icc.bus_bw_cfg_count) {
1308 cnss_pr_err("Invalid bus bandwidth Type: %d", bw);
1309 return -EINVAL;
1310 }
1311
1312 cnss_pr_buf("Bandwidth vote to %d, save %d\n", bw, save);
1313
1314 list_for_each_entry(bus_bw_info, &plat_priv->icc.list_head, list) {
1315 ret = icc_set_bw(bus_bw_info->icc_path,
1316 bus_bw_info->cfg_table[bw].avg_bw,
1317 bus_bw_info->cfg_table[bw].peak_bw);
1318 if (ret) {
1319 cnss_pr_err("Could not set BW Cfg: %d, err = %d ICC Path: %s Val: %d %d\n",
1320 bw, ret, bus_bw_info->icc_name,
1321 bus_bw_info->cfg_table[bw].avg_bw,
1322 bus_bw_info->cfg_table[bw].peak_bw);
1323 break;
1324 }
1325 }
1326 if (ret == 0 && save)
1327 plat_priv->icc.current_bw_vote = bw;
1328 return ret;
1329 }
1330
cnss_request_bus_bandwidth(struct device * dev,int bandwidth)1331 int cnss_request_bus_bandwidth(struct device *dev, int bandwidth)
1332 {
1333 struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
1334
1335 if (!plat_priv)
1336 return -ENODEV;
1337
1338 if (bandwidth < 0)
1339 return -EINVAL;
1340
1341 return cnss_setup_bus_bandwidth(plat_priv, (u32)bandwidth, true);
1342 }
1343 #else
cnss_setup_bus_bandwidth(struct cnss_plat_data * plat_priv,u32 bw,bool save)1344 static int cnss_setup_bus_bandwidth(struct cnss_plat_data *plat_priv,
1345 u32 bw, bool save)
1346 {
1347 return 0;
1348 }
1349
cnss_request_bus_bandwidth(struct device * dev,int bandwidth)1350 int cnss_request_bus_bandwidth(struct device *dev, int bandwidth)
1351 {
1352 return 0;
1353 }
1354 #endif
1355 EXPORT_SYMBOL(cnss_request_bus_bandwidth);
1356
cnss_pci_debug_reg_read(struct cnss_pci_data * pci_priv,u32 offset,u32 * val,bool raw_access)1357 int cnss_pci_debug_reg_read(struct cnss_pci_data *pci_priv, u32 offset,
1358 u32 *val, bool raw_access)
1359 {
1360 int ret = 0;
1361 bool do_force_wake_put = true;
1362
1363 if (raw_access) {
1364 ret = cnss_pci_reg_read(pci_priv, offset, val);
1365 goto out;
1366 }
1367
1368 ret = cnss_pci_is_device_down(&pci_priv->pci_dev->dev);
1369 if (ret)
1370 goto out;
1371
1372 ret = cnss_pci_pm_runtime_get_sync(pci_priv, RTPM_ID_CNSS);
1373 if (ret < 0)
1374 goto runtime_pm_put;
1375
1376 ret = cnss_pci_force_wake_get(pci_priv);
1377 if (ret)
1378 do_force_wake_put = false;
1379
1380 ret = cnss_pci_reg_read(pci_priv, offset, val);
1381 if (ret) {
1382 cnss_pr_err("Failed to read register offset 0x%x, err = %d\n",
1383 offset, ret);
1384 goto force_wake_put;
1385 }
1386
1387 force_wake_put:
1388 if (do_force_wake_put)
1389 cnss_pci_force_wake_put(pci_priv);
1390 runtime_pm_put:
1391 cnss_pci_pm_runtime_mark_last_busy(pci_priv);
1392 cnss_pci_pm_runtime_put_autosuspend(pci_priv, RTPM_ID_CNSS);
1393 out:
1394 return ret;
1395 }
1396
cnss_pci_debug_reg_write(struct cnss_pci_data * pci_priv,u32 offset,u32 val,bool raw_access)1397 int cnss_pci_debug_reg_write(struct cnss_pci_data *pci_priv, u32 offset,
1398 u32 val, bool raw_access)
1399 {
1400 int ret = 0;
1401 bool do_force_wake_put = true;
1402
1403 if (raw_access) {
1404 ret = cnss_pci_reg_write(pci_priv, offset, val);
1405 goto out;
1406 }
1407
1408 ret = cnss_pci_is_device_down(&pci_priv->pci_dev->dev);
1409 if (ret)
1410 goto out;
1411
1412 ret = cnss_pci_pm_runtime_get_sync(pci_priv, RTPM_ID_CNSS);
1413 if (ret < 0)
1414 goto runtime_pm_put;
1415
1416 ret = cnss_pci_force_wake_get(pci_priv);
1417 if (ret)
1418 do_force_wake_put = false;
1419
1420 ret = cnss_pci_reg_write(pci_priv, offset, val);
1421 if (ret) {
1422 cnss_pr_err("Failed to write 0x%x to register offset 0x%x, err = %d\n",
1423 val, offset, ret);
1424 goto force_wake_put;
1425 }
1426
1427 force_wake_put:
1428 if (do_force_wake_put)
1429 cnss_pci_force_wake_put(pci_priv);
1430 runtime_pm_put:
1431 cnss_pci_pm_runtime_mark_last_busy(pci_priv);
1432 cnss_pci_pm_runtime_put_autosuspend(pci_priv, RTPM_ID_CNSS);
1433 out:
1434 return ret;
1435 }
1436
cnss_set_pci_config_space(struct cnss_pci_data * pci_priv,bool save)1437 static int cnss_set_pci_config_space(struct cnss_pci_data *pci_priv, bool save)
1438 {
1439 struct pci_dev *pci_dev = pci_priv->pci_dev;
1440 struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
1441 bool link_down_or_recovery;
1442
1443 if (!plat_priv)
1444 return -ENODEV;
1445
1446 link_down_or_recovery = pci_priv->pci_link_down_ind ||
1447 (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state));
1448
1449 if (save) {
1450 if (link_down_or_recovery) {
1451 pci_priv->saved_state = NULL;
1452 } else {
1453 pci_save_state(pci_dev);
1454 pci_priv->saved_state = pci_store_saved_state(pci_dev);
1455 }
1456 } else {
1457 if (link_down_or_recovery) {
1458 pci_load_saved_state(pci_dev, pci_priv->default_state);
1459 pci_restore_state(pci_dev);
1460 } else if (pci_priv->saved_state) {
1461 pci_load_and_free_saved_state(pci_dev,
1462 &pci_priv->saved_state);
1463 pci_restore_state(pci_dev);
1464 }
1465 }
1466
1467 return 0;
1468 }
1469
cnss_update_supported_link_info(struct cnss_pci_data * pci_priv)1470 static int cnss_update_supported_link_info(struct cnss_pci_data *pci_priv)
1471 {
1472 int ret = 0;
1473 struct pci_dev *root_port;
1474 struct device_node *root_of_node;
1475 struct cnss_plat_data *plat_priv;
1476
1477 if (!pci_priv)
1478 return -EINVAL;
1479
1480 if (pci_priv->device_id != KIWI_DEVICE_ID)
1481 return ret;
1482
1483 plat_priv = pci_priv->plat_priv;
1484 root_port = pcie_find_root_port(pci_priv->pci_dev);
1485
1486 if (!root_port) {
1487 cnss_pr_err("PCIe root port is null\n");
1488 return -EINVAL;
1489 }
1490
1491 root_of_node = root_port->dev.of_node;
1492 if (root_of_node && root_of_node->parent) {
1493 ret = of_property_read_u32(root_of_node->parent,
1494 "qcom,target-link-speed",
1495 &plat_priv->supported_link_speed);
1496 if (!ret)
1497 cnss_pr_dbg("Supported PCIe Link Speed: %d\n",
1498 plat_priv->supported_link_speed);
1499 else
1500 plat_priv->supported_link_speed = 0;
1501 }
1502
1503 return ret;
1504 }
1505
cnss_pci_get_link_status(struct cnss_pci_data * pci_priv)1506 static int cnss_pci_get_link_status(struct cnss_pci_data *pci_priv)
1507 {
1508 u16 link_status;
1509 int ret;
1510
1511 ret = pcie_capability_read_word(pci_priv->pci_dev, PCI_EXP_LNKSTA,
1512 &link_status);
1513 if (ret)
1514 return ret;
1515
1516 cnss_pr_dbg("Get PCI link status register: %u\n", link_status);
1517
1518 pci_priv->def_link_speed = link_status & PCI_EXP_LNKSTA_CLS;
1519 pci_priv->def_link_width =
1520 (link_status & PCI_EXP_LNKSTA_NLW) >> PCI_EXP_LNKSTA_NLW_SHIFT;
1521 pci_priv->cur_link_speed = pci_priv->def_link_speed;
1522
1523 cnss_pr_dbg("Default PCI link speed is 0x%x, link width is 0x%x\n",
1524 pci_priv->def_link_speed, pci_priv->def_link_width);
1525
1526 return 0;
1527 }
1528
cnss_pci_soc_scratch_reg_dump(struct cnss_pci_data * pci_priv)1529 static void cnss_pci_soc_scratch_reg_dump(struct cnss_pci_data *pci_priv)
1530 {
1531 u32 reg_offset, val;
1532 int i;
1533
1534 switch (pci_priv->device_id) {
1535 case QCA6390_DEVICE_ID:
1536 case QCA6490_DEVICE_ID:
1537 case KIWI_DEVICE_ID:
1538 case MANGO_DEVICE_ID:
1539 case PEACH_DEVICE_ID:
1540 break;
1541 default:
1542 return;
1543 }
1544
1545 if (in_interrupt() || irqs_disabled())
1546 return;
1547
1548 if (cnss_pci_check_link_status(pci_priv))
1549 return;
1550
1551 cnss_pr_dbg("Start to dump SOC Scratch registers\n");
1552
1553 for (i = 0; pci_scratch[i].name; i++) {
1554 reg_offset = pci_scratch[i].offset;
1555 if (cnss_pci_reg_read(pci_priv, reg_offset, &val))
1556 return;
1557 cnss_pr_dbg("PCIE_SOC_REG_%s = 0x%x\n",
1558 pci_scratch[i].name, val);
1559 }
1560 }
1561
cnss_pci_soc_reset_cause_reg_dump(struct cnss_pci_data * pci_priv)1562 static void cnss_pci_soc_reset_cause_reg_dump(struct cnss_pci_data *pci_priv)
1563 {
1564 u32 val;
1565
1566 switch (pci_priv->device_id) {
1567 case PEACH_DEVICE_ID:
1568 break;
1569 default:
1570 return;
1571 }
1572
1573 if (in_interrupt() || irqs_disabled())
1574 return;
1575
1576 if (cnss_pci_check_link_status(pci_priv))
1577 return;
1578
1579 cnss_pr_dbg("Start to dump SOC Reset Cause registers\n");
1580
1581 if (cnss_pci_reg_read(pci_priv, WLAON_SOC_RESET_CAUSE_SHADOW_REG,
1582 &val))
1583 return;
1584 cnss_pr_dbg("WLAON_SOC_RESET_CAUSE_SHADOW_REG = 0x%x\n",
1585 val);
1586
1587 }
1588
cnss_pci_bhi_debug_reg_dump(struct cnss_pci_data * pci_priv)1589 static void cnss_pci_bhi_debug_reg_dump(struct cnss_pci_data *pci_priv)
1590 {
1591 u32 reg_offset, val;
1592 int i;
1593
1594 switch (pci_priv->device_id) {
1595 case PEACH_DEVICE_ID:
1596 break;
1597 default:
1598 return;
1599 }
1600
1601 if (cnss_pci_check_link_status(pci_priv))
1602 return;
1603
1604 cnss_pr_dbg("Start to dump PCIE BHIE DEBUG registers\n");
1605
1606 for (i = 0; pci_bhi_debug[i].name; i++) {
1607 reg_offset = pci_bhi_debug[i].offset;
1608 if (cnss_pci_reg_read(pci_priv, reg_offset, &val))
1609 return;
1610 cnss_pr_dbg("PCIE__%s = 0x%x\n",
1611 pci_bhi_debug[i].name, val);
1612 }
1613 }
1614
cnss_suspend_pci_link(struct cnss_pci_data * pci_priv)1615 int cnss_suspend_pci_link(struct cnss_pci_data *pci_priv)
1616 {
1617 int ret = 0;
1618
1619 if (!pci_priv)
1620 return -ENODEV;
1621
1622 if (pci_priv->pci_link_state == PCI_LINK_DOWN) {
1623 cnss_pr_info("PCI link is already suspended\n");
1624 goto out;
1625 }
1626
1627 pci_clear_master(pci_priv->pci_dev);
1628
1629 ret = cnss_set_pci_config_space(pci_priv, SAVE_PCI_CONFIG_SPACE);
1630 if (ret)
1631 goto out;
1632
1633 pci_disable_device(pci_priv->pci_dev);
1634
1635 if (pci_priv->pci_dev->device != QCA6174_DEVICE_ID) {
1636 ret = pci_set_power_state(pci_priv->pci_dev, PCI_D3hot);
1637 if (ret)
1638 cnss_pr_err("Failed to set D3Hot, err = %d\n", ret);
1639 }
1640
1641 /* Always do PCIe L2 suspend during power off/PCIe link recovery */
1642 pci_priv->drv_connected_last = 0;
1643
1644 ret = cnss_set_pci_link(pci_priv, PCI_LINK_DOWN);
1645 if (ret)
1646 goto out;
1647
1648 pci_priv->pci_link_state = PCI_LINK_DOWN;
1649
1650 return 0;
1651 out:
1652 return ret;
1653 }
1654
cnss_resume_pci_link(struct cnss_pci_data * pci_priv)1655 int cnss_resume_pci_link(struct cnss_pci_data *pci_priv)
1656 {
1657 int ret = 0;
1658
1659 if (!pci_priv)
1660 return -ENODEV;
1661
1662 if (pci_priv->pci_link_state == PCI_LINK_UP) {
1663 cnss_pr_info("PCI link is already resumed\n");
1664 goto out;
1665 }
1666
1667 ret = cnss_set_pci_link(pci_priv, PCI_LINK_UP);
1668 if (ret) {
1669 ret = -EAGAIN;
1670 cnss_pci_update_link_event(pci_priv,
1671 BUS_EVENT_PCI_LINK_RESUME_FAIL, NULL);
1672 goto out;
1673 }
1674
1675 pci_priv->pci_link_state = PCI_LINK_UP;
1676
1677 if (pci_priv->pci_dev->device != QCA6174_DEVICE_ID) {
1678 ret = pci_set_power_state(pci_priv->pci_dev, PCI_D0);
1679 if (ret) {
1680 cnss_pr_err("Failed to set D0, err = %d\n", ret);
1681 goto out;
1682 }
1683 }
1684
1685 ret = cnss_set_pci_config_space(pci_priv, RESTORE_PCI_CONFIG_SPACE);
1686 if (ret)
1687 goto out;
1688
1689 ret = pci_enable_device(pci_priv->pci_dev);
1690 if (ret) {
1691 cnss_pr_err("Failed to enable PCI device, err = %d\n", ret);
1692 goto out;
1693 }
1694
1695 pci_set_master(pci_priv->pci_dev);
1696
1697 if (pci_priv->pci_link_down_ind)
1698 pci_priv->pci_link_down_ind = false;
1699
1700 return 0;
1701 out:
1702 return ret;
1703 }
1704
cnss_pci_update_link_event(struct cnss_pci_data * pci_priv,enum cnss_bus_event_type type,void * data)1705 static void cnss_pci_update_link_event(struct cnss_pci_data *pci_priv,
1706 enum cnss_bus_event_type type,
1707 void *data)
1708 {
1709 struct cnss_bus_event bus_event;
1710
1711 bus_event.etype = type;
1712 bus_event.event_data = data;
1713 cnss_pci_call_driver_uevent(pci_priv, CNSS_BUS_EVENT, &bus_event);
1714 }
1715
cnss_pci_handle_linkdown(struct cnss_pci_data * pci_priv)1716 void cnss_pci_handle_linkdown(struct cnss_pci_data *pci_priv)
1717 {
1718 struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
1719 struct pci_dev *pci_dev = pci_priv->pci_dev;
1720 unsigned long flags;
1721
1722 if (test_bit(ENABLE_PCI_LINK_DOWN_PANIC,
1723 &plat_priv->ctrl_params.quirks))
1724 panic("cnss: PCI link is down\n");
1725
1726 spin_lock_irqsave(&pci_link_down_lock, flags);
1727 if (pci_priv->pci_link_down_ind) {
1728 cnss_pr_dbg("PCI link down recovery is in progress, ignore\n");
1729 spin_unlock_irqrestore(&pci_link_down_lock, flags);
1730 return;
1731 }
1732 pci_priv->pci_link_down_ind = true;
1733 spin_unlock_irqrestore(&pci_link_down_lock, flags);
1734
1735 if (pci_priv->mhi_ctrl) {
1736 /* Notify MHI about link down*/
1737 mhi_report_error(pci_priv->mhi_ctrl);
1738 }
1739
1740 if (pci_dev->device == QCA6174_DEVICE_ID)
1741 disable_irq_nosync(pci_dev->irq);
1742
1743 /* Notify bus related event. Now for all supported chips.
1744 * Here PCIe LINK_DOWN notification taken care.
1745 * uevent buffer can be extended later, to cover more bus info.
1746 */
1747 cnss_pci_update_link_event(pci_priv, BUS_EVENT_PCI_LINK_DOWN, NULL);
1748
1749 cnss_fatal_err("PCI link down, schedule recovery\n");
1750 reinit_completion(&pci_priv->wake_event_complete);
1751 cnss_schedule_recovery(&pci_dev->dev, CNSS_REASON_LINK_DOWN);
1752 }
1753
cnss_pci_link_down(struct device * dev)1754 int cnss_pci_link_down(struct device *dev)
1755 {
1756 struct pci_dev *pci_dev = to_pci_dev(dev);
1757 struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
1758 struct cnss_plat_data *plat_priv = NULL;
1759 int ret;
1760
1761 if (!pci_priv) {
1762 cnss_pr_err("pci_priv is NULL\n");
1763 return -EINVAL;
1764 }
1765
1766 plat_priv = pci_priv->plat_priv;
1767 if (!plat_priv) {
1768 cnss_pr_err("plat_priv is NULL\n");
1769 return -ENODEV;
1770 }
1771
1772 if (pci_priv->pci_link_down_ind) {
1773 cnss_pr_dbg("PCI link down recovery is already in progress\n");
1774 return -EBUSY;
1775 }
1776
1777 if (pci_priv->drv_connected_last &&
1778 of_property_read_bool(plat_priv->plat_dev->dev.of_node,
1779 "cnss-enable-self-recovery"))
1780 plat_priv->ctrl_params.quirks |= BIT(LINK_DOWN_SELF_RECOVERY);
1781
1782 cnss_pr_err("PCI link down is detected by drivers\n");
1783
1784 ret = cnss_pci_assert_perst(pci_priv);
1785 if (ret)
1786 cnss_pci_handle_linkdown(pci_priv);
1787
1788 return ret;
1789 }
1790 EXPORT_SYMBOL(cnss_pci_link_down);
1791
cnss_pci_get_reg_dump(struct device * dev,uint8_t * buffer,uint32_t len)1792 int cnss_pci_get_reg_dump(struct device *dev, uint8_t *buffer, uint32_t len)
1793 {
1794 struct pci_dev *pci_dev = to_pci_dev(dev);
1795 struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
1796
1797 if (!pci_priv) {
1798 cnss_pr_err("pci_priv is NULL\n");
1799 return -ENODEV;
1800 }
1801
1802 if (pci_priv->pci_link_state == PCI_LINK_DOWN) {
1803 cnss_pr_dbg("No PCIe reg dump since PCIe is suspended(D3)\n");
1804 return -EACCES;
1805 }
1806
1807 cnss_pr_dbg("Start to get PCIe reg dump\n");
1808
1809 return _cnss_pci_get_reg_dump(pci_priv, buffer, len);
1810 }
1811 EXPORT_SYMBOL(cnss_pci_get_reg_dump);
1812
cnss_pcie_is_device_down(struct cnss_pci_data * pci_priv)1813 int cnss_pcie_is_device_down(struct cnss_pci_data *pci_priv)
1814 {
1815 struct cnss_plat_data *plat_priv;
1816
1817 if (!pci_priv) {
1818 cnss_pr_err("pci_priv is NULL\n");
1819 return -ENODEV;
1820 }
1821
1822 plat_priv = pci_priv->plat_priv;
1823 if (!plat_priv) {
1824 cnss_pr_err("plat_priv is NULL\n");
1825 return -ENODEV;
1826 }
1827
1828 return test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state) |
1829 pci_priv->pci_link_down_ind;
1830 }
1831
cnss_pci_is_device_down(struct device * dev)1832 int cnss_pci_is_device_down(struct device *dev)
1833 {
1834 struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
1835
1836 return cnss_pcie_is_device_down(pci_priv);
1837 }
1838 EXPORT_SYMBOL(cnss_pci_is_device_down);
1839
cnss_pci_shutdown_cleanup(struct cnss_pci_data * pci_priv)1840 int cnss_pci_shutdown_cleanup(struct cnss_pci_data *pci_priv)
1841 {
1842 int ret;
1843
1844 if (!pci_priv) {
1845 cnss_pr_err("pci_priv is NULL\n");
1846 return -ENODEV;
1847 }
1848
1849 ret = del_timer(&pci_priv->dev_rddm_timer);
1850 cnss_pr_dbg("%s RDDM timer deleted", ret ? "Active" : "Inactive");
1851 return ret;
1852 }
1853
cnss_pci_lock_reg_window(struct device * dev,unsigned long * flags)1854 void cnss_pci_lock_reg_window(struct device *dev, unsigned long *flags)
1855 {
1856 spin_lock_bh(&pci_reg_window_lock);
1857 }
1858 EXPORT_SYMBOL(cnss_pci_lock_reg_window);
1859
cnss_pci_unlock_reg_window(struct device * dev,unsigned long * flags)1860 void cnss_pci_unlock_reg_window(struct device *dev, unsigned long *flags)
1861 {
1862 spin_unlock_bh(&pci_reg_window_lock);
1863 }
1864 EXPORT_SYMBOL(cnss_pci_unlock_reg_window);
1865
cnss_get_pci_slot(struct device * dev)1866 int cnss_get_pci_slot(struct device *dev)
1867 {
1868 struct pci_dev *pci_dev = to_pci_dev(dev);
1869 struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
1870 struct cnss_plat_data *plat_priv = NULL;
1871
1872 if (!pci_priv) {
1873 cnss_pr_err("pci_priv is NULL\n");
1874 return -EINVAL;
1875 }
1876
1877 plat_priv = pci_priv->plat_priv;
1878 if (!plat_priv) {
1879 cnss_pr_err("plat_priv is NULL\n");
1880 return -ENODEV;
1881 }
1882
1883 return plat_priv->rc_num;
1884 }
1885 EXPORT_SYMBOL(cnss_get_pci_slot);
1886
1887 /**
1888 * cnss_pci_dump_bl_sram_mem - Dump WLAN device bootloader debug log
1889 * @pci_priv: driver PCI bus context pointer
1890 *
1891 * Dump primary and secondary bootloader debug log data. For SBL check the
1892 * log struct address and size for validity.
1893 *
1894 * Return: None
1895 */
cnss_pci_dump_bl_sram_mem(struct cnss_pci_data * pci_priv)1896 static void cnss_pci_dump_bl_sram_mem(struct cnss_pci_data *pci_priv)
1897 {
1898 enum mhi_ee_type ee;
1899 u32 mem_addr, val, pbl_log_max_size, sbl_log_max_size;
1900 u32 pbl_log_sram_start;
1901 u32 pbl_stage, sbl_log_start, sbl_log_size;
1902 u32 pbl_wlan_boot_cfg, pbl_bootstrap_status;
1903 u32 pbl_bootstrap_status_reg = PBL_BOOTSTRAP_STATUS;
1904 u32 sbl_log_def_start = SRAM_START;
1905 u32 sbl_log_def_end = SRAM_END;
1906 int i;
1907
1908 cnss_pci_soc_reset_cause_reg_dump(pci_priv);
1909
1910 switch (pci_priv->device_id) {
1911 case QCA6390_DEVICE_ID:
1912 pbl_log_sram_start = QCA6390_DEBUG_PBL_LOG_SRAM_START;
1913 pbl_log_max_size = QCA6390_DEBUG_PBL_LOG_SRAM_MAX_SIZE;
1914 sbl_log_max_size = QCA6390_DEBUG_SBL_LOG_SRAM_MAX_SIZE;
1915 break;
1916 case QCA6490_DEVICE_ID:
1917 pbl_log_sram_start = QCA6490_DEBUG_PBL_LOG_SRAM_START;
1918 pbl_log_max_size = QCA6490_DEBUG_PBL_LOG_SRAM_MAX_SIZE;
1919 sbl_log_max_size = QCA6490_DEBUG_SBL_LOG_SRAM_MAX_SIZE;
1920 break;
1921 case KIWI_DEVICE_ID:
1922 pbl_bootstrap_status_reg = KIWI_PBL_BOOTSTRAP_STATUS;
1923 pbl_log_sram_start = KIWI_DEBUG_PBL_LOG_SRAM_START;
1924 pbl_log_max_size = KIWI_DEBUG_PBL_LOG_SRAM_MAX_SIZE;
1925 sbl_log_max_size = KIWI_DEBUG_SBL_LOG_SRAM_MAX_SIZE;
1926 break;
1927 case MANGO_DEVICE_ID:
1928 pbl_bootstrap_status_reg = MANGO_PBL_BOOTSTRAP_STATUS;
1929 pbl_log_sram_start = MANGO_DEBUG_PBL_LOG_SRAM_START;
1930 pbl_log_max_size = MANGO_DEBUG_PBL_LOG_SRAM_MAX_SIZE;
1931 sbl_log_max_size = MANGO_DEBUG_SBL_LOG_SRAM_MAX_SIZE;
1932 break;
1933 case PEACH_DEVICE_ID:
1934 pbl_bootstrap_status_reg = PEACH_PBL_BOOTSTRAP_STATUS;
1935 pbl_log_sram_start = PEACH_DEBUG_PBL_LOG_SRAM_START;
1936 pbl_log_max_size = PEACH_DEBUG_PBL_LOG_SRAM_MAX_SIZE;
1937 sbl_log_max_size = PEACH_DEBUG_SBL_LOG_SRAM_MAX_SIZE;
1938 break;
1939 default:
1940 return;
1941 }
1942
1943 if (cnss_pci_check_link_status(pci_priv))
1944 return;
1945
1946 cnss_pci_reg_read(pci_priv, TCSR_PBL_LOGGING_REG, &pbl_stage);
1947 cnss_pci_reg_read(pci_priv, PCIE_BHI_ERRDBG2_REG, &sbl_log_start);
1948 cnss_pci_reg_read(pci_priv, PCIE_BHI_ERRDBG3_REG, &sbl_log_size);
1949 cnss_pci_reg_read(pci_priv, PBL_WLAN_BOOT_CFG, &pbl_wlan_boot_cfg);
1950 cnss_pci_reg_read(pci_priv, pbl_bootstrap_status_reg,
1951 &pbl_bootstrap_status);
1952 cnss_pr_dbg("TCSR_PBL_LOGGING: 0x%08x PCIE_BHI_ERRDBG: Start: 0x%08x Size:0x%08x\n",
1953 pbl_stage, sbl_log_start, sbl_log_size);
1954 cnss_pr_dbg("PBL_WLAN_BOOT_CFG: 0x%08x PBL_BOOTSTRAP_STATUS: 0x%08x\n",
1955 pbl_wlan_boot_cfg, pbl_bootstrap_status);
1956
1957 ee = mhi_get_exec_env(pci_priv->mhi_ctrl);
1958 if (CNSS_MHI_IN_MISSION_MODE(ee)) {
1959 cnss_pr_err("Avoid Dumping PBL log data in Mission mode\n");
1960 return;
1961 }
1962
1963 cnss_pr_dbg("Dumping PBL log data\n");
1964 for (i = 0; i < pbl_log_max_size; i += sizeof(val)) {
1965 mem_addr = pbl_log_sram_start + i;
1966 if (cnss_pci_reg_read(pci_priv, mem_addr, &val))
1967 break;
1968 cnss_pr_dbg("SRAM[0x%x] = 0x%x\n", mem_addr, val);
1969 }
1970
1971 sbl_log_size = (sbl_log_size > sbl_log_max_size ?
1972 sbl_log_max_size : sbl_log_size);
1973 if (sbl_log_start < sbl_log_def_start ||
1974 sbl_log_start > sbl_log_def_end ||
1975 (sbl_log_start + sbl_log_size) > sbl_log_def_end) {
1976 cnss_pr_err("Invalid SBL log data\n");
1977 return;
1978 }
1979
1980 ee = mhi_get_exec_env(pci_priv->mhi_ctrl);
1981 if (CNSS_MHI_IN_MISSION_MODE(ee)) {
1982 cnss_pr_err("Avoid Dumping SBL log data in Mission mode\n");
1983 return;
1984 }
1985
1986 cnss_pr_dbg("Dumping SBL log data\n");
1987 for (i = 0; i < sbl_log_size; i += sizeof(val)) {
1988 mem_addr = sbl_log_start + i;
1989 if (cnss_pci_reg_read(pci_priv, mem_addr, &val))
1990 break;
1991 cnss_pr_dbg("SRAM[0x%x] = 0x%x\n", mem_addr, val);
1992 }
1993 }
1994
1995 #ifdef CONFIG_DISABLE_CNSS_SRAM_DUMP
cnss_pci_dump_sram(struct cnss_pci_data * pci_priv)1996 static void cnss_pci_dump_sram(struct cnss_pci_data *pci_priv)
1997 {
1998 }
1999 #else
cnss_pci_dump_sram(struct cnss_pci_data * pci_priv)2000 static void cnss_pci_dump_sram(struct cnss_pci_data *pci_priv)
2001 {
2002 struct cnss_plat_data *plat_priv;
2003 u32 i, mem_addr;
2004 u32 *dump_ptr;
2005
2006 plat_priv = pci_priv->plat_priv;
2007
2008 if (plat_priv->device_id != QCA6490_DEVICE_ID ||
2009 cnss_get_host_build_type() != QMI_HOST_BUILD_TYPE_PRIMARY_V01)
2010 return;
2011
2012 if (!plat_priv->sram_dump) {
2013 cnss_pr_err("SRAM dump memory is not allocated\n");
2014 return;
2015 }
2016
2017 if (cnss_pci_check_link_status(pci_priv))
2018 return;
2019
2020 cnss_pr_dbg("Dumping SRAM at 0x%lx\n", plat_priv->sram_dump);
2021
2022 for (i = 0; i < SRAM_DUMP_SIZE; i += sizeof(u32)) {
2023 mem_addr = SRAM_START + i;
2024 dump_ptr = (u32 *)(plat_priv->sram_dump + i);
2025 if (cnss_pci_reg_read(pci_priv, mem_addr, dump_ptr)) {
2026 cnss_pr_err("SRAM Dump failed at 0x%x\n", mem_addr);
2027 break;
2028 }
2029 /* Relinquish CPU after dumping 256KB chunks*/
2030 if (!(i % CNSS_256KB_SIZE))
2031 cond_resched();
2032 }
2033 }
2034 #endif
2035
cnss_pci_handle_mhi_poweron_timeout(struct cnss_pci_data * pci_priv)2036 static int cnss_pci_handle_mhi_poweron_timeout(struct cnss_pci_data *pci_priv)
2037 {
2038 struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
2039
2040 cnss_fatal_err("MHI power up returns timeout\n");
2041
2042 if (cnss_mhi_scan_rddm_cookie(pci_priv, DEVICE_RDDM_COOKIE) ||
2043 cnss_get_dev_sol_value(plat_priv) > 0) {
2044 /* Wait for RDDM if RDDM cookie is set or device SOL GPIO is
2045 * high. If RDDM times out, PBL/SBL error region may have been
2046 * erased so no need to dump them either.
2047 */
2048 if (!test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state) &&
2049 !pci_priv->pci_link_down_ind) {
2050 mod_timer(&pci_priv->dev_rddm_timer,
2051 jiffies + msecs_to_jiffies(DEV_RDDM_TIMEOUT));
2052 }
2053 } else {
2054 cnss_pr_dbg("RDDM cookie is not set and device SOL is low\n");
2055 cnss_mhi_debug_reg_dump(pci_priv);
2056 cnss_pci_bhi_debug_reg_dump(pci_priv);
2057 cnss_pci_soc_scratch_reg_dump(pci_priv);
2058 /* Dump PBL/SBL error log if RDDM cookie is not set */
2059 cnss_pci_dump_bl_sram_mem(pci_priv);
2060 cnss_pci_dump_sram(pci_priv);
2061 return -ETIMEDOUT;
2062 }
2063
2064 return 0;
2065 }
2066
cnss_mhi_state_to_str(enum cnss_mhi_state mhi_state)2067 static char *cnss_mhi_state_to_str(enum cnss_mhi_state mhi_state)
2068 {
2069 switch (mhi_state) {
2070 case CNSS_MHI_INIT:
2071 return "INIT";
2072 case CNSS_MHI_DEINIT:
2073 return "DEINIT";
2074 case CNSS_MHI_POWER_ON:
2075 return "POWER_ON";
2076 case CNSS_MHI_POWERING_OFF:
2077 return "POWERING_OFF";
2078 case CNSS_MHI_POWER_OFF:
2079 return "POWER_OFF";
2080 case CNSS_MHI_FORCE_POWER_OFF:
2081 return "FORCE_POWER_OFF";
2082 case CNSS_MHI_SUSPEND:
2083 return "SUSPEND";
2084 case CNSS_MHI_RESUME:
2085 return "RESUME";
2086 case CNSS_MHI_TRIGGER_RDDM:
2087 return "TRIGGER_RDDM";
2088 case CNSS_MHI_RDDM_DONE:
2089 return "RDDM_DONE";
2090 default:
2091 return "UNKNOWN";
2092 }
2093 };
2094
cnss_pci_check_mhi_state_bit(struct cnss_pci_data * pci_priv,enum cnss_mhi_state mhi_state)2095 static int cnss_pci_check_mhi_state_bit(struct cnss_pci_data *pci_priv,
2096 enum cnss_mhi_state mhi_state)
2097 {
2098 switch (mhi_state) {
2099 case CNSS_MHI_INIT:
2100 if (!test_bit(CNSS_MHI_INIT, &pci_priv->mhi_state))
2101 return 0;
2102 break;
2103 case CNSS_MHI_DEINIT:
2104 case CNSS_MHI_POWER_ON:
2105 if (test_bit(CNSS_MHI_INIT, &pci_priv->mhi_state) &&
2106 !test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state))
2107 return 0;
2108 break;
2109 case CNSS_MHI_FORCE_POWER_OFF:
2110 if (test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state))
2111 return 0;
2112 break;
2113 case CNSS_MHI_POWER_OFF:
2114 case CNSS_MHI_SUSPEND:
2115 if (test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state) &&
2116 !test_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state))
2117 return 0;
2118 break;
2119 case CNSS_MHI_RESUME:
2120 if (test_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state))
2121 return 0;
2122 break;
2123 case CNSS_MHI_TRIGGER_RDDM:
2124 if (test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state) &&
2125 !test_bit(CNSS_MHI_TRIGGER_RDDM, &pci_priv->mhi_state))
2126 return 0;
2127 break;
2128 case CNSS_MHI_RDDM_DONE:
2129 return 0;
2130 default:
2131 cnss_pr_err("Unhandled MHI state: %s(%d)\n",
2132 cnss_mhi_state_to_str(mhi_state), mhi_state);
2133 }
2134
2135 cnss_pr_err("Cannot set MHI state %s(%d) in current MHI state (0x%lx)\n",
2136 cnss_mhi_state_to_str(mhi_state), mhi_state,
2137 pci_priv->mhi_state);
2138 if (mhi_state != CNSS_MHI_TRIGGER_RDDM)
2139 CNSS_ASSERT(0);
2140
2141 return -EINVAL;
2142 }
2143
cnss_rddm_trigger_debug(struct cnss_pci_data * pci_priv)2144 static int cnss_rddm_trigger_debug(struct cnss_pci_data *pci_priv)
2145 {
2146 int read_val, ret;
2147
2148 if (!pci_priv || pci_priv->device_id != QCA6490_DEVICE_ID)
2149 return -EOPNOTSUPP;
2150
2151 if (cnss_pci_check_link_status(pci_priv))
2152 return -EINVAL;
2153
2154 cnss_pr_err("Write GCC Spare with ACE55 Pattern");
2155 cnss_pci_reg_write(pci_priv, GCC_GCC_SPARE_REG_1, 0xACE55);
2156 ret = cnss_pci_reg_read(pci_priv, GCC_GCC_SPARE_REG_1, &read_val);
2157 cnss_pr_err("Read back GCC Spare: 0x%x, ret: %d", read_val, ret);
2158 ret = cnss_pci_reg_read(pci_priv, GCC_PRE_ARES_DEBUG_TIMER_VAL,
2159 &read_val);
2160 cnss_pr_err("Warm reset allowed check: 0x%x, ret: %d", read_val, ret);
2161 return ret;
2162 }
2163
cnss_rddm_trigger_check(struct cnss_pci_data * pci_priv)2164 static int cnss_rddm_trigger_check(struct cnss_pci_data *pci_priv)
2165 {
2166 int read_val, ret;
2167 u32 pbl_stage, sbl_log_start, sbl_log_size, pbl_wlan_boot_cfg;
2168
2169 if (!pci_priv || pci_priv->device_id != QCA6490_DEVICE_ID)
2170 return -EOPNOTSUPP;
2171
2172 if (cnss_pci_check_link_status(pci_priv))
2173 return -EINVAL;
2174
2175 ret = cnss_pci_reg_read(pci_priv, GCC_GCC_SPARE_REG_1, &read_val);
2176 cnss_pr_err("Read GCC spare to check reset status: 0x%x, ret: %d",
2177 read_val, ret);
2178
2179 cnss_pci_reg_read(pci_priv, TCSR_PBL_LOGGING_REG, &pbl_stage);
2180 cnss_pci_reg_read(pci_priv, PCIE_BHI_ERRDBG2_REG, &sbl_log_start);
2181 cnss_pci_reg_read(pci_priv, PCIE_BHI_ERRDBG3_REG, &sbl_log_size);
2182 cnss_pci_reg_read(pci_priv, PBL_WLAN_BOOT_CFG, &pbl_wlan_boot_cfg);
2183 cnss_pr_dbg("TCSR_PBL_LOGGING: 0x%08x PCIE_BHI_ERRDBG: Start: 0x%08x Size:0x%08x \n",
2184 pbl_stage, sbl_log_start, sbl_log_size);
2185 cnss_pr_dbg("PBL_WLAN_BOOT_CFG: 0x%08x\n", pbl_wlan_boot_cfg);
2186
2187 return ret;
2188 }
2189
cnss_pci_set_mhi_state_bit(struct cnss_pci_data * pci_priv,enum cnss_mhi_state mhi_state)2190 static void cnss_pci_set_mhi_state_bit(struct cnss_pci_data *pci_priv,
2191 enum cnss_mhi_state mhi_state)
2192 {
2193 switch (mhi_state) {
2194 case CNSS_MHI_INIT:
2195 set_bit(CNSS_MHI_INIT, &pci_priv->mhi_state);
2196 break;
2197 case CNSS_MHI_DEINIT:
2198 clear_bit(CNSS_MHI_INIT, &pci_priv->mhi_state);
2199 break;
2200 case CNSS_MHI_POWER_ON:
2201 set_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state);
2202 break;
2203 case CNSS_MHI_POWERING_OFF:
2204 set_bit(CNSS_MHI_POWERING_OFF, &pci_priv->mhi_state);
2205 break;
2206 case CNSS_MHI_POWER_OFF:
2207 case CNSS_MHI_FORCE_POWER_OFF:
2208 clear_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state);
2209 clear_bit(CNSS_MHI_POWERING_OFF, &pci_priv->mhi_state);
2210 clear_bit(CNSS_MHI_TRIGGER_RDDM, &pci_priv->mhi_state);
2211 clear_bit(CNSS_MHI_RDDM_DONE, &pci_priv->mhi_state);
2212 break;
2213 case CNSS_MHI_SUSPEND:
2214 set_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state);
2215 break;
2216 case CNSS_MHI_RESUME:
2217 clear_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state);
2218 break;
2219 case CNSS_MHI_TRIGGER_RDDM:
2220 set_bit(CNSS_MHI_TRIGGER_RDDM, &pci_priv->mhi_state);
2221 break;
2222 case CNSS_MHI_RDDM_DONE:
2223 set_bit(CNSS_MHI_RDDM_DONE, &pci_priv->mhi_state);
2224 break;
2225 default:
2226 cnss_pr_err("Unhandled MHI state (%d)\n", mhi_state);
2227 }
2228 }
2229
2230 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
cnss_mhi_pm_force_resume(struct cnss_pci_data * pci_priv)2231 static int cnss_mhi_pm_force_resume(struct cnss_pci_data *pci_priv)
2232 {
2233 return mhi_pm_resume_force(pci_priv->mhi_ctrl);
2234 }
2235 #else
cnss_mhi_pm_force_resume(struct cnss_pci_data * pci_priv)2236 static int cnss_mhi_pm_force_resume(struct cnss_pci_data *pci_priv)
2237 {
2238 return mhi_pm_resume(pci_priv->mhi_ctrl);
2239 }
2240 #endif
2241
cnss_pci_set_mhi_state(struct cnss_pci_data * pci_priv,enum cnss_mhi_state mhi_state)2242 static int cnss_pci_set_mhi_state(struct cnss_pci_data *pci_priv,
2243 enum cnss_mhi_state mhi_state)
2244 {
2245 int ret = 0, retry = 0;
2246
2247 if (pci_priv->device_id == QCA6174_DEVICE_ID)
2248 return 0;
2249
2250 if (mhi_state < 0) {
2251 cnss_pr_err("Invalid MHI state (%d)\n", mhi_state);
2252 return -EINVAL;
2253 }
2254
2255 ret = cnss_pci_check_mhi_state_bit(pci_priv, mhi_state);
2256 if (ret)
2257 goto out;
2258
2259 cnss_pr_vdbg("Setting MHI state: %s(%d)\n",
2260 cnss_mhi_state_to_str(mhi_state), mhi_state);
2261
2262 switch (mhi_state) {
2263 case CNSS_MHI_INIT:
2264 ret = mhi_prepare_for_power_up(pci_priv->mhi_ctrl);
2265 break;
2266 case CNSS_MHI_DEINIT:
2267 mhi_unprepare_after_power_down(pci_priv->mhi_ctrl);
2268 ret = 0;
2269 break;
2270 case CNSS_MHI_POWER_ON:
2271 ret = mhi_sync_power_up(pci_priv->mhi_ctrl);
2272 #if IS_ENABLED(CONFIG_MHI_BUS_MISC)
2273 /* Only set img_pre_alloc when power up succeeds */
2274 if (!ret && !pci_priv->mhi_ctrl->img_pre_alloc) {
2275 cnss_pr_dbg("Notify MHI to use already allocated images\n");
2276 pci_priv->mhi_ctrl->img_pre_alloc = true;
2277 }
2278 #endif
2279 break;
2280 case CNSS_MHI_POWER_OFF:
2281 mhi_power_down(pci_priv->mhi_ctrl, true);
2282 ret = 0;
2283 break;
2284 case CNSS_MHI_FORCE_POWER_OFF:
2285 mhi_power_down(pci_priv->mhi_ctrl, false);
2286 ret = 0;
2287 break;
2288 case CNSS_MHI_SUSPEND:
2289 retry_mhi_suspend:
2290 mutex_lock(&pci_priv->mhi_ctrl->pm_mutex);
2291 if (pci_priv->drv_connected_last)
2292 ret = cnss_mhi_pm_fast_suspend(pci_priv, true);
2293 else
2294 ret = mhi_pm_suspend(pci_priv->mhi_ctrl);
2295 mutex_unlock(&pci_priv->mhi_ctrl->pm_mutex);
2296 if (ret == -EBUSY && retry++ < MHI_SUSPEND_RETRY_MAX_TIMES) {
2297 cnss_pr_vdbg("Retry MHI suspend #%d\n", retry);
2298 usleep_range(MHI_SUSPEND_RETRY_DELAY_US,
2299 MHI_SUSPEND_RETRY_DELAY_US + 1000);
2300 goto retry_mhi_suspend;
2301 }
2302 break;
2303 case CNSS_MHI_RESUME:
2304 mutex_lock(&pci_priv->mhi_ctrl->pm_mutex);
2305 if (pci_priv->drv_connected_last) {
2306 ret = cnss_pci_prevent_l1(&pci_priv->pci_dev->dev);
2307 if (ret) {
2308 mutex_unlock(&pci_priv->mhi_ctrl->pm_mutex);
2309 break;
2310 }
2311 ret = cnss_mhi_pm_fast_resume(pci_priv, true);
2312 cnss_pci_allow_l1(&pci_priv->pci_dev->dev);
2313 } else {
2314 if (pci_priv->device_id == QCA6390_DEVICE_ID)
2315 ret = cnss_mhi_pm_force_resume(pci_priv);
2316 else
2317 ret = mhi_pm_resume(pci_priv->mhi_ctrl);
2318 }
2319 mutex_unlock(&pci_priv->mhi_ctrl->pm_mutex);
2320 break;
2321 case CNSS_MHI_TRIGGER_RDDM:
2322 cnss_rddm_trigger_debug(pci_priv);
2323 ret = mhi_force_rddm_mode(pci_priv->mhi_ctrl);
2324 if (ret) {
2325 cnss_pr_err("Failed to trigger RDDM, err = %d\n", ret);
2326 cnss_rddm_trigger_check(pci_priv);
2327 }
2328 break;
2329 case CNSS_MHI_RDDM_DONE:
2330 break;
2331 default:
2332 cnss_pr_err("Unhandled MHI state (%d)\n", mhi_state);
2333 ret = -EINVAL;
2334 }
2335
2336 if (ret)
2337 goto out;
2338
2339 cnss_pci_set_mhi_state_bit(pci_priv, mhi_state);
2340
2341 return 0;
2342
2343 out:
2344 cnss_pr_err("Failed to set MHI state: %s(%d), err = %d\n",
2345 cnss_mhi_state_to_str(mhi_state), mhi_state, ret);
2346 return ret;
2347 }
2348
cnss_pci_config_msi_addr(struct cnss_pci_data * pci_priv)2349 static int cnss_pci_config_msi_addr(struct cnss_pci_data *pci_priv)
2350 {
2351 int ret = 0;
2352 struct pci_dev *pci_dev = pci_priv->pci_dev;
2353 struct cnss_plat_data *plat_priv;
2354
2355 if (!pci_dev)
2356 return -ENODEV;
2357
2358 if (!pci_dev->msix_enabled)
2359 return ret;
2360
2361 plat_priv = pci_priv->plat_priv;
2362 if (!plat_priv) {
2363 cnss_pr_err("plat_priv is NULL\n");
2364 return -ENODEV;
2365 }
2366
2367 ret = of_property_read_u32(plat_priv->plat_dev->dev.of_node,
2368 "msix-match-addr",
2369 &pci_priv->msix_addr);
2370 cnss_pr_dbg("MSI-X Match address is 0x%X\n",
2371 pci_priv->msix_addr);
2372
2373 return ret;
2374 }
2375
cnss_pci_config_msi_data(struct cnss_pci_data * pci_priv)2376 static int cnss_pci_config_msi_data(struct cnss_pci_data *pci_priv)
2377 {
2378 struct msi_desc *msi_desc;
2379 struct cnss_msi_config *msi_config;
2380 struct pci_dev *pci_dev = pci_priv->pci_dev;
2381
2382 msi_config = pci_priv->msi_config;
2383
2384 if (pci_dev->msix_enabled) {
2385 pci_priv->msi_ep_base_data = msi_config->users[0].base_vector;
2386 cnss_pr_dbg("MSI-X base data is %d\n",
2387 pci_priv->msi_ep_base_data);
2388 return 0;
2389 }
2390
2391 msi_desc = irq_get_msi_desc(pci_dev->irq);
2392 if (!msi_desc) {
2393 cnss_pr_err("msi_desc is NULL!\n");
2394 return -EINVAL;
2395 }
2396
2397 pci_priv->msi_ep_base_data = msi_desc->msg.data;
2398 cnss_pr_dbg("MSI base data is %d\n", pci_priv->msi_ep_base_data);
2399
2400 return 0;
2401 }
2402
2403 #ifdef CONFIG_CNSS_SUPPORT_DUAL_DEV
2404 #define PLC_PCIE_NAME_LEN 14
2405
2406 static struct cnss_plat_data *
cnss_get_plat_priv_by_driver_ops(struct cnss_wlan_driver * driver_ops)2407 cnss_get_plat_priv_by_driver_ops(struct cnss_wlan_driver *driver_ops)
2408 {
2409 int plat_env_count = cnss_get_max_plat_env_count();
2410 struct cnss_plat_data *plat_env;
2411 struct cnss_pci_data *pci_priv;
2412 int i = 0;
2413
2414 if (!driver_ops) {
2415 cnss_pr_err("No cnss driver\n");
2416 return NULL;
2417 }
2418
2419 for (i = 0; i < plat_env_count; i++) {
2420 plat_env = cnss_get_plat_env(i);
2421 if (!plat_env)
2422 continue;
2423 if (driver_ops->name && plat_env->pld_bus_ops_name) {
2424 /* driver_ops->name = PLD_PCIE_OPS_NAME
2425 * #ifdef MULTI_IF_NAME
2426 * #define PLD_PCIE_OPS_NAME "pld_pcie_" MULTI_IF_NAME
2427 * #else
2428 * #define PLD_PCIE_OPS_NAME "pld_pcie"
2429 * #endif
2430 */
2431 if (memcmp(driver_ops->name,
2432 plat_env->pld_bus_ops_name,
2433 PLC_PCIE_NAME_LEN) == 0)
2434 return plat_env;
2435 }
2436 }
2437
2438 cnss_pr_vdbg("Invalid cnss driver name from ko %s\n", driver_ops->name);
2439 /* in the dual wlan card case, the pld_bus_ops_name from dts
2440 * and driver_ops-> name from ko should match, otherwise
2441 * wlanhost driver don't know which plat_env it can use;
2442 * if doesn't find the match one, then get first available
2443 * instance insteadly.
2444 */
2445
2446 for (i = 0; i < plat_env_count; i++) {
2447 plat_env = cnss_get_plat_env(i);
2448
2449 if (!plat_env)
2450 continue;
2451
2452 pci_priv = plat_env->bus_priv;
2453 if (!pci_priv) {
2454 cnss_pr_err("pci_priv is NULL\n");
2455 continue;
2456 }
2457
2458 if (driver_ops == pci_priv->driver_ops)
2459 return plat_env;
2460 }
2461 /* Doesn't find the existing instance,
2462 * so return the fist empty instance
2463 */
2464 for (i = 0; i < plat_env_count; i++) {
2465 plat_env = cnss_get_plat_env(i);
2466
2467 if (!plat_env)
2468 continue;
2469 pci_priv = plat_env->bus_priv;
2470 if (!pci_priv) {
2471 cnss_pr_err("pci_priv is NULL\n");
2472 continue;
2473 }
2474
2475 if (!pci_priv->driver_ops)
2476 return plat_env;
2477 }
2478
2479 return NULL;
2480 }
2481
cnss_pci_store_qrtr_node_id(struct cnss_pci_data * pci_priv)2482 static int cnss_pci_store_qrtr_node_id(struct cnss_pci_data *pci_priv)
2483 {
2484 int ret = 0;
2485 u32 scratch = QCA6390_PCIE_SOC_PCIE_REG_PCIE_SCRATCH_2_SOC_PCIE_REG;
2486 struct cnss_plat_data *plat_priv;
2487
2488 if (!pci_priv) {
2489 cnss_pr_err("pci_priv is NULL\n");
2490 return -ENODEV;
2491 }
2492
2493 plat_priv = pci_priv->plat_priv;
2494 /**
2495 * in the single wlan chipset case, plat_priv->qrtr_node_id always is 0,
2496 * wlan fw will use the hardcode 7 as the qrtr node id.
2497 * in the dual Hastings case, we will read qrtr node id
2498 * from device tree and pass to get plat_priv->qrtr_node_id,
2499 * which always is not zero. And then store this new value
2500 * to pcie register, wlan fw will read out this qrtr node id
2501 * from this register and overwrite to the hardcode one
2502 * while do initialization for ipc router.
2503 * without this change, two Hastings will use the same
2504 * qrtr node instance id, which will mess up qmi message
2505 * exchange. According to qrtr spec, every node should
2506 * have unique qrtr node id
2507 */
2508 if (plat_priv->device_id == QCA6390_DEVICE_ID &&
2509 plat_priv->qrtr_node_id) {
2510 u32 val;
2511
2512 cnss_pr_dbg("write 0x%x to SCRATCH REG\n",
2513 plat_priv->qrtr_node_id);
2514 ret = cnss_pci_reg_write(pci_priv, scratch,
2515 plat_priv->qrtr_node_id);
2516 if (ret) {
2517 cnss_pr_err("Failed to write register offset 0x%x, err = %d\n",
2518 scratch, ret);
2519 goto out;
2520 }
2521
2522 ret = cnss_pci_reg_read(pci_priv, scratch, &val);
2523 if (ret) {
2524 cnss_pr_err("Failed to read SCRATCH REG");
2525 goto out;
2526 }
2527
2528 if (val != plat_priv->qrtr_node_id) {
2529 cnss_pr_err("qrtr node id write to register doesn't match with readout value");
2530 return -ERANGE;
2531 }
2532 }
2533 out:
2534 return ret;
2535 }
2536 #else
2537 static struct cnss_plat_data *
cnss_get_plat_priv_by_driver_ops(struct cnss_wlan_driver * driver_ops)2538 cnss_get_plat_priv_by_driver_ops(struct cnss_wlan_driver *driver_ops)
2539 {
2540 return cnss_bus_dev_to_plat_priv(NULL);
2541 }
2542
cnss_pci_store_qrtr_node_id(struct cnss_pci_data * pci_priv)2543 static int cnss_pci_store_qrtr_node_id(struct cnss_pci_data *pci_priv)
2544 {
2545 return 0;
2546 }
2547 #endif
2548
cnss_pci_start_mhi(struct cnss_pci_data * pci_priv)2549 int cnss_pci_start_mhi(struct cnss_pci_data *pci_priv)
2550 {
2551 int ret = 0;
2552 struct cnss_plat_data *plat_priv;
2553 unsigned int timeout = 0;
2554 int retry = 0;
2555
2556 if (!pci_priv) {
2557 cnss_pr_err("pci_priv is NULL\n");
2558 return -ENODEV;
2559 }
2560
2561 plat_priv = pci_priv->plat_priv;
2562 if (test_bit(FBC_BYPASS, &plat_priv->ctrl_params.quirks))
2563 return 0;
2564
2565 if (MHI_TIMEOUT_OVERWRITE_MS)
2566 pci_priv->mhi_ctrl->timeout_ms = MHI_TIMEOUT_OVERWRITE_MS;
2567 cnss_mhi_set_m2_timeout_ms(pci_priv, MHI_M2_TIMEOUT_MS);
2568
2569 ret = cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_INIT);
2570 if (ret)
2571 return ret;
2572
2573 timeout = pci_priv->mhi_ctrl->timeout_ms;
2574 /* For non-perf builds the timeout is 10 (default) * 6 seconds */
2575 if (cnss_get_host_build_type() == QMI_HOST_BUILD_TYPE_PRIMARY_V01)
2576 pci_priv->mhi_ctrl->timeout_ms *= 6;
2577 else /* For perf builds the timeout is 10 (default) * 3 seconds */
2578 pci_priv->mhi_ctrl->timeout_ms *= 3;
2579
2580 retry:
2581 ret = cnss_pci_store_qrtr_node_id(pci_priv);
2582 if (ret) {
2583 if (retry++ < REG_RETRY_MAX_TIMES)
2584 goto retry;
2585 else
2586 return ret;
2587 }
2588
2589 /* Start the timer to dump MHI/PBL/SBL debug data periodically */
2590 mod_timer(&pci_priv->boot_debug_timer,
2591 jiffies + msecs_to_jiffies(BOOT_DEBUG_TIMEOUT_MS));
2592 ret = cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_POWER_ON);
2593 del_timer_sync(&pci_priv->boot_debug_timer);
2594 if (ret == 0)
2595 cnss_wlan_adsp_pc_enable(pci_priv, false);
2596
2597 pci_priv->mhi_ctrl->timeout_ms = timeout;
2598
2599 if (ret == -ETIMEDOUT) {
2600 /* This is a special case needs to be handled that if MHI
2601 * power on returns -ETIMEDOUT, controller needs to take care
2602 * the cleanup by calling MHI power down. Force to set the bit
2603 * for driver internal MHI state to make sure it can be handled
2604 * properly later.
2605 */
2606 set_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state);
2607 ret = cnss_pci_handle_mhi_poweron_timeout(pci_priv);
2608 } else if (!ret) {
2609 /* kernel may allocate a dummy vector before request_irq and
2610 * then allocate a real vector when request_irq is called.
2611 * So get msi_data here again to avoid spurious interrupt
2612 * as msi_data will configured to srngs.
2613 */
2614 if (cnss_pci_is_one_msi(pci_priv))
2615 ret = cnss_pci_config_msi_data(pci_priv);
2616 }
2617
2618 return ret;
2619 }
2620
cnss_pci_power_off_mhi(struct cnss_pci_data * pci_priv)2621 static void cnss_pci_power_off_mhi(struct cnss_pci_data *pci_priv)
2622 {
2623 struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
2624
2625 if (test_bit(FBC_BYPASS, &plat_priv->ctrl_params.quirks))
2626 return;
2627
2628 if (!test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state)) {
2629 cnss_pr_dbg("MHI is already powered off\n");
2630 return;
2631 }
2632 cnss_wlan_adsp_pc_enable(pci_priv, true);
2633 cnss_pci_set_mhi_state_bit(pci_priv, CNSS_MHI_RESUME);
2634 cnss_pci_set_mhi_state_bit(pci_priv, CNSS_MHI_POWERING_OFF);
2635
2636 if (!pci_priv->pci_link_down_ind)
2637 cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_POWER_OFF);
2638 else
2639 cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_FORCE_POWER_OFF);
2640 }
2641
cnss_pci_deinit_mhi(struct cnss_pci_data * pci_priv)2642 static void cnss_pci_deinit_mhi(struct cnss_pci_data *pci_priv)
2643 {
2644 struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
2645
2646 if (test_bit(FBC_BYPASS, &plat_priv->ctrl_params.quirks))
2647 return;
2648
2649 if (!test_bit(CNSS_MHI_INIT, &pci_priv->mhi_state)) {
2650 cnss_pr_dbg("MHI is already deinited\n");
2651 return;
2652 }
2653
2654 cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_DEINIT);
2655 }
2656
cnss_pci_set_wlaon_pwr_ctrl(struct cnss_pci_data * pci_priv,bool set_vddd4blow,bool set_shutdown,bool do_force_wake)2657 static void cnss_pci_set_wlaon_pwr_ctrl(struct cnss_pci_data *pci_priv,
2658 bool set_vddd4blow, bool set_shutdown,
2659 bool do_force_wake)
2660 {
2661 struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
2662 int ret;
2663 u32 val;
2664
2665 if (!plat_priv->set_wlaon_pwr_ctrl)
2666 return;
2667
2668 if (pci_priv->pci_link_state == PCI_LINK_DOWN ||
2669 pci_priv->pci_link_down_ind)
2670 return;
2671
2672 if (do_force_wake)
2673 if (cnss_pci_force_wake_get(pci_priv))
2674 return;
2675
2676 ret = cnss_pci_reg_read(pci_priv, WLAON_QFPROM_PWR_CTRL_REG, &val);
2677 if (ret) {
2678 cnss_pr_err("Failed to read register offset 0x%x, err = %d\n",
2679 WLAON_QFPROM_PWR_CTRL_REG, ret);
2680 goto force_wake_put;
2681 }
2682
2683 cnss_pr_dbg("Read register offset 0x%x, val = 0x%x\n",
2684 WLAON_QFPROM_PWR_CTRL_REG, val);
2685
2686 if (set_vddd4blow)
2687 val |= QFPROM_PWR_CTRL_VDD4BLOW_SW_EN_MASK;
2688 else
2689 val &= ~QFPROM_PWR_CTRL_VDD4BLOW_SW_EN_MASK;
2690
2691 if (set_shutdown)
2692 val |= QFPROM_PWR_CTRL_SHUTDOWN_EN_MASK;
2693 else
2694 val &= ~QFPROM_PWR_CTRL_SHUTDOWN_EN_MASK;
2695
2696 ret = cnss_pci_reg_write(pci_priv, WLAON_QFPROM_PWR_CTRL_REG, val);
2697 if (ret) {
2698 cnss_pr_err("Failed to write register offset 0x%x, err = %d\n",
2699 WLAON_QFPROM_PWR_CTRL_REG, ret);
2700 goto force_wake_put;
2701 }
2702
2703 cnss_pr_dbg("Write val 0x%x to register offset 0x%x\n", val,
2704 WLAON_QFPROM_PWR_CTRL_REG);
2705
2706 if (set_shutdown)
2707 usleep_range(WLAON_PWR_CTRL_SHUTDOWN_DELAY_MIN_US,
2708 WLAON_PWR_CTRL_SHUTDOWN_DELAY_MAX_US);
2709
2710 force_wake_put:
2711 if (do_force_wake)
2712 cnss_pci_force_wake_put(pci_priv);
2713 }
2714
cnss_pci_get_device_timestamp(struct cnss_pci_data * pci_priv,u64 * time_us)2715 static int cnss_pci_get_device_timestamp(struct cnss_pci_data *pci_priv,
2716 u64 *time_us)
2717 {
2718 struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
2719 u32 low, high;
2720 u64 device_ticks;
2721
2722 if (!plat_priv->device_freq_hz) {
2723 cnss_pr_err("Device time clock frequency is not valid\n");
2724 return -EINVAL;
2725 }
2726
2727 switch (pci_priv->device_id) {
2728 case KIWI_DEVICE_ID:
2729 case MANGO_DEVICE_ID:
2730 case PEACH_DEVICE_ID:
2731 cnss_pci_reg_read(pci_priv, PCIE_MHI_TIME_LOW, &low);
2732 cnss_pci_reg_read(pci_priv, PCIE_MHI_TIME_HIGH, &high);
2733 break;
2734 default:
2735 cnss_pci_reg_read(pci_priv, WLAON_GLOBAL_COUNTER_CTRL3, &low);
2736 cnss_pci_reg_read(pci_priv, WLAON_GLOBAL_COUNTER_CTRL4, &high);
2737 break;
2738 }
2739
2740 device_ticks = (u64)high << 32 | low;
2741 do_div(device_ticks, plat_priv->device_freq_hz / 100000);
2742 *time_us = device_ticks * 10;
2743
2744 return 0;
2745 }
2746
cnss_pci_enable_time_sync_counter(struct cnss_pci_data * pci_priv)2747 static void cnss_pci_enable_time_sync_counter(struct cnss_pci_data *pci_priv)
2748 {
2749 switch (pci_priv->device_id) {
2750 case KIWI_DEVICE_ID:
2751 case MANGO_DEVICE_ID:
2752 case PEACH_DEVICE_ID:
2753 return;
2754 default:
2755 break;
2756 }
2757
2758 cnss_pci_reg_write(pci_priv, WLAON_GLOBAL_COUNTER_CTRL5,
2759 TIME_SYNC_ENABLE);
2760 }
2761
cnss_pci_clear_time_sync_counter(struct cnss_pci_data * pci_priv)2762 static void cnss_pci_clear_time_sync_counter(struct cnss_pci_data *pci_priv)
2763 {
2764 switch (pci_priv->device_id) {
2765 case KIWI_DEVICE_ID:
2766 case MANGO_DEVICE_ID:
2767 case PEACH_DEVICE_ID:
2768 return;
2769 default:
2770 break;
2771 }
2772
2773 cnss_pci_reg_write(pci_priv, WLAON_GLOBAL_COUNTER_CTRL5,
2774 TIME_SYNC_CLEAR);
2775 }
2776
2777
cnss_pci_time_sync_reg_update(struct cnss_pci_data * pci_priv,u32 low,u32 high)2778 static void cnss_pci_time_sync_reg_update(struct cnss_pci_data *pci_priv,
2779 u32 low, u32 high)
2780 {
2781 u32 time_reg_low;
2782 u32 time_reg_high;
2783
2784 switch (pci_priv->device_id) {
2785 case KIWI_DEVICE_ID:
2786 case MANGO_DEVICE_ID:
2787 case PEACH_DEVICE_ID:
2788 /* Use the next two shadow registers after host's usage */
2789 time_reg_low = PCIE_SHADOW_REG_VALUE_0 +
2790 (pci_priv->plat_priv->num_shadow_regs_v3 *
2791 SHADOW_REG_LEN_BYTES);
2792 time_reg_high = time_reg_low + SHADOW_REG_LEN_BYTES;
2793 break;
2794 default:
2795 time_reg_low = PCIE_SHADOW_REG_VALUE_34;
2796 time_reg_high = PCIE_SHADOW_REG_VALUE_35;
2797 break;
2798 }
2799
2800 cnss_pci_reg_write(pci_priv, time_reg_low, low);
2801 cnss_pci_reg_write(pci_priv, time_reg_high, high);
2802
2803 cnss_pci_reg_read(pci_priv, time_reg_low, &low);
2804 cnss_pci_reg_read(pci_priv, time_reg_high, &high);
2805
2806 cnss_pr_dbg("Updated time sync regs [0x%x] = 0x%x, [0x%x] = 0x%x\n",
2807 time_reg_low, low, time_reg_high, high);
2808 }
2809
cnss_pci_update_timestamp(struct cnss_pci_data * pci_priv)2810 static int cnss_pci_update_timestamp(struct cnss_pci_data *pci_priv)
2811 {
2812 struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
2813 struct device *dev = &pci_priv->pci_dev->dev;
2814 unsigned long flags = 0;
2815 u64 host_time_us, device_time_us, offset;
2816 u32 low, high;
2817 int ret;
2818
2819 ret = cnss_pci_prevent_l1(dev);
2820 if (ret)
2821 goto out;
2822
2823 ret = cnss_pci_force_wake_get(pci_priv);
2824 if (ret)
2825 goto allow_l1;
2826
2827 spin_lock_irqsave(&time_sync_lock, flags);
2828 cnss_pci_clear_time_sync_counter(pci_priv);
2829 cnss_pci_enable_time_sync_counter(pci_priv);
2830 host_time_us = cnss_get_host_timestamp(plat_priv);
2831 ret = cnss_pci_get_device_timestamp(pci_priv, &device_time_us);
2832 cnss_pci_clear_time_sync_counter(pci_priv);
2833 spin_unlock_irqrestore(&time_sync_lock, flags);
2834 if (ret)
2835 goto force_wake_put;
2836
2837 if (host_time_us < device_time_us) {
2838 cnss_pr_err("Host time (%llu us) is smaller than device time (%llu us), stop\n",
2839 host_time_us, device_time_us);
2840 ret = -EINVAL;
2841 goto force_wake_put;
2842 }
2843
2844 offset = host_time_us - device_time_us;
2845 cnss_pr_dbg("Host time = %llu us, device time = %llu us, offset = %llu us\n",
2846 host_time_us, device_time_us, offset);
2847
2848 low = offset & 0xFFFFFFFF;
2849 high = offset >> 32;
2850
2851 cnss_pci_time_sync_reg_update(pci_priv, low, high);
2852
2853 force_wake_put:
2854 cnss_pci_force_wake_put(pci_priv);
2855 allow_l1:
2856 cnss_pci_allow_l1(dev);
2857 out:
2858 return ret;
2859 }
2860
cnss_pci_time_sync_work_hdlr(struct work_struct * work)2861 static void cnss_pci_time_sync_work_hdlr(struct work_struct *work)
2862 {
2863 struct cnss_pci_data *pci_priv =
2864 container_of(work, struct cnss_pci_data, time_sync_work.work);
2865 struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
2866 unsigned int time_sync_period_ms =
2867 plat_priv->ctrl_params.time_sync_period;
2868
2869 if (test_bit(DISABLE_TIME_SYNC, &plat_priv->ctrl_params.quirks)) {
2870 cnss_pr_dbg("Time sync is disabled\n");
2871 return;
2872 }
2873
2874 if (!time_sync_period_ms) {
2875 cnss_pr_dbg("Skip time sync as time period is 0\n");
2876 return;
2877 }
2878
2879 if (cnss_pci_is_device_down(&pci_priv->pci_dev->dev))
2880 return;
2881
2882 if (cnss_pci_pm_runtime_get_sync(pci_priv, RTPM_ID_CNSS) < 0)
2883 goto runtime_pm_put;
2884
2885 mutex_lock(&pci_priv->bus_lock);
2886 cnss_pci_update_timestamp(pci_priv);
2887 mutex_unlock(&pci_priv->bus_lock);
2888 schedule_delayed_work(&pci_priv->time_sync_work,
2889 msecs_to_jiffies(time_sync_period_ms));
2890
2891 runtime_pm_put:
2892 cnss_pci_pm_runtime_mark_last_busy(pci_priv);
2893 cnss_pci_pm_runtime_put_autosuspend(pci_priv, RTPM_ID_CNSS);
2894 }
2895
cnss_pci_start_time_sync_update(struct cnss_pci_data * pci_priv)2896 static int cnss_pci_start_time_sync_update(struct cnss_pci_data *pci_priv)
2897 {
2898 struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
2899
2900 switch (pci_priv->device_id) {
2901 case QCA6390_DEVICE_ID:
2902 case QCA6490_DEVICE_ID:
2903 case KIWI_DEVICE_ID:
2904 case MANGO_DEVICE_ID:
2905 case PEACH_DEVICE_ID:
2906 break;
2907 default:
2908 return -EOPNOTSUPP;
2909 }
2910
2911 if (!plat_priv->device_freq_hz) {
2912 cnss_pr_dbg("Device time clock frequency is not valid, skip time sync\n");
2913 return -EINVAL;
2914 }
2915
2916 cnss_pci_time_sync_work_hdlr(&pci_priv->time_sync_work.work);
2917
2918 return 0;
2919 }
2920
cnss_pci_stop_time_sync_update(struct cnss_pci_data * pci_priv)2921 static void cnss_pci_stop_time_sync_update(struct cnss_pci_data *pci_priv)
2922 {
2923 switch (pci_priv->device_id) {
2924 case QCA6390_DEVICE_ID:
2925 case QCA6490_DEVICE_ID:
2926 case KIWI_DEVICE_ID:
2927 case MANGO_DEVICE_ID:
2928 case PEACH_DEVICE_ID:
2929 break;
2930 default:
2931 return;
2932 }
2933
2934 cancel_delayed_work_sync(&pci_priv->time_sync_work);
2935 }
2936
cnss_pci_set_therm_cdev_state(struct cnss_pci_data * pci_priv,unsigned long thermal_state,int tcdev_id)2937 int cnss_pci_set_therm_cdev_state(struct cnss_pci_data *pci_priv,
2938 unsigned long thermal_state,
2939 int tcdev_id)
2940 {
2941 if (!pci_priv) {
2942 cnss_pr_err("pci_priv is NULL!\n");
2943 return -ENODEV;
2944 }
2945
2946 if (!pci_priv->driver_ops || !pci_priv->driver_ops->set_therm_cdev_state) {
2947 cnss_pr_err("driver_ops or set_therm_cdev_state is NULL\n");
2948 return -EINVAL;
2949 }
2950
2951 return pci_priv->driver_ops->set_therm_cdev_state(pci_priv->pci_dev,
2952 thermal_state,
2953 tcdev_id);
2954 }
2955
cnss_pci_update_time_sync_period(struct cnss_pci_data * pci_priv,unsigned int time_sync_period)2956 int cnss_pci_update_time_sync_period(struct cnss_pci_data *pci_priv,
2957 unsigned int time_sync_period)
2958 {
2959 struct cnss_plat_data *plat_priv;
2960
2961 if (!pci_priv)
2962 return -ENODEV;
2963
2964 plat_priv = pci_priv->plat_priv;
2965
2966 cnss_pci_stop_time_sync_update(pci_priv);
2967 plat_priv->ctrl_params.time_sync_period = time_sync_period;
2968 cnss_pci_start_time_sync_update(pci_priv);
2969 cnss_pr_dbg("WLAN time sync period %u ms\n",
2970 plat_priv->ctrl_params.time_sync_period);
2971
2972 return 0;
2973 }
2974
cnss_pci_call_driver_probe(struct cnss_pci_data * pci_priv)2975 int cnss_pci_call_driver_probe(struct cnss_pci_data *pci_priv)
2976 {
2977 int ret = 0;
2978 struct cnss_plat_data *plat_priv;
2979
2980 if (!pci_priv)
2981 return -ENODEV;
2982
2983 plat_priv = pci_priv->plat_priv;
2984 if (test_bit(CNSS_IN_REBOOT, &plat_priv->driver_state)) {
2985 cnss_pr_err("Reboot is in progress, skip driver probe\n");
2986 return -EINVAL;
2987 }
2988
2989 if (test_bit(CNSS_DRIVER_DEBUG, &plat_priv->driver_state)) {
2990 clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
2991 cnss_pr_dbg("Skip driver probe\n");
2992 goto out;
2993 }
2994
2995 if (!pci_priv->driver_ops) {
2996 cnss_pr_err("driver_ops is NULL\n");
2997 ret = -EINVAL;
2998 goto out;
2999 }
3000
3001 if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state) &&
3002 test_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state)) {
3003 ret = pci_priv->driver_ops->reinit(pci_priv->pci_dev,
3004 pci_priv->pci_device_id);
3005 if (ret) {
3006 cnss_pr_err("Failed to reinit host driver, err = %d\n",
3007 ret);
3008 goto out;
3009 }
3010 complete(&plat_priv->recovery_complete);
3011 } else if (test_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state)) {
3012 ret = pci_priv->driver_ops->probe(pci_priv->pci_dev,
3013 pci_priv->pci_device_id);
3014 if (ret) {
3015 cnss_pr_err("Failed to probe host driver, err = %d\n",
3016 ret);
3017 complete_all(&plat_priv->power_up_complete);
3018 goto out;
3019 }
3020 clear_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state);
3021 set_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state);
3022 cnss_pci_free_blob_mem(pci_priv);
3023 complete_all(&plat_priv->power_up_complete);
3024 } else if (test_bit(CNSS_DRIVER_IDLE_RESTART,
3025 &plat_priv->driver_state)) {
3026 ret = pci_priv->driver_ops->idle_restart(pci_priv->pci_dev,
3027 pci_priv->pci_device_id);
3028 if (ret) {
3029 cnss_pr_err("Failed to idle restart host driver, err = %d\n",
3030 ret);
3031 plat_priv->power_up_error = ret;
3032 complete_all(&plat_priv->power_up_complete);
3033 goto out;
3034 }
3035 clear_bit(CNSS_DRIVER_IDLE_RESTART, &plat_priv->driver_state);
3036 complete_all(&plat_priv->power_up_complete);
3037 } else {
3038 complete(&plat_priv->power_up_complete);
3039 }
3040
3041 if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) {
3042 clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
3043 __pm_relax(plat_priv->recovery_ws);
3044 }
3045
3046 cnss_pci_start_time_sync_update(pci_priv);
3047
3048 return 0;
3049
3050 out:
3051 return ret;
3052 }
3053
cnss_pci_call_driver_remove(struct cnss_pci_data * pci_priv)3054 int cnss_pci_call_driver_remove(struct cnss_pci_data *pci_priv)
3055 {
3056 struct cnss_plat_data *plat_priv;
3057 int ret;
3058
3059 if (!pci_priv)
3060 return -ENODEV;
3061
3062 plat_priv = pci_priv->plat_priv;
3063
3064 if (test_bit(CNSS_IN_COLD_BOOT_CAL, &plat_priv->driver_state) ||
3065 test_bit(CNSS_FW_BOOT_RECOVERY, &plat_priv->driver_state) ||
3066 test_bit(CNSS_DRIVER_DEBUG, &plat_priv->driver_state)) {
3067 cnss_pr_dbg("Skip driver remove\n");
3068 return 0;
3069 }
3070
3071 if (!pci_priv->driver_ops) {
3072 cnss_pr_err("driver_ops is NULL\n");
3073 return -EINVAL;
3074 }
3075
3076 cnss_pci_stop_time_sync_update(pci_priv);
3077
3078 if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state) &&
3079 test_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state)) {
3080 complete(&plat_priv->rddm_complete);
3081 pci_priv->driver_ops->shutdown(pci_priv->pci_dev);
3082 } else if (test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state)) {
3083 pci_priv->driver_ops->remove(pci_priv->pci_dev);
3084 clear_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state);
3085 } else if (test_bit(CNSS_DRIVER_IDLE_SHUTDOWN,
3086 &plat_priv->driver_state)) {
3087 ret = pci_priv->driver_ops->idle_shutdown(pci_priv->pci_dev);
3088 if (ret == -EAGAIN) {
3089 clear_bit(CNSS_DRIVER_IDLE_SHUTDOWN,
3090 &plat_priv->driver_state);
3091 return ret;
3092 }
3093 }
3094
3095 plat_priv->get_info_cb_ctx = NULL;
3096 plat_priv->get_info_cb = NULL;
3097 plat_priv->get_driver_async_data_ctx = NULL;
3098 plat_priv->get_driver_async_data_cb = NULL;
3099
3100 return 0;
3101 }
3102
cnss_pci_call_driver_modem_status(struct cnss_pci_data * pci_priv,int modem_current_status)3103 int cnss_pci_call_driver_modem_status(struct cnss_pci_data *pci_priv,
3104 int modem_current_status)
3105 {
3106 struct cnss_wlan_driver *driver_ops;
3107
3108 if (!pci_priv)
3109 return -ENODEV;
3110
3111 driver_ops = pci_priv->driver_ops;
3112 if (!driver_ops || !driver_ops->modem_status)
3113 return -EINVAL;
3114
3115 driver_ops->modem_status(pci_priv->pci_dev, modem_current_status);
3116
3117 return 0;
3118 }
3119
cnss_pci_update_status(struct cnss_pci_data * pci_priv,enum cnss_driver_status status)3120 int cnss_pci_update_status(struct cnss_pci_data *pci_priv,
3121 enum cnss_driver_status status)
3122 {
3123 struct cnss_wlan_driver *driver_ops;
3124
3125 if (!pci_priv)
3126 return -ENODEV;
3127
3128 driver_ops = pci_priv->driver_ops;
3129 if (!driver_ops || !driver_ops->update_status)
3130 return -EINVAL;
3131
3132 cnss_pr_dbg("Update driver status: %d\n", status);
3133
3134 driver_ops->update_status(pci_priv->pci_dev, status);
3135
3136 return 0;
3137 }
3138
cnss_pci_misc_reg_dump(struct cnss_pci_data * pci_priv,struct cnss_misc_reg * misc_reg,u32 misc_reg_size,char * reg_name)3139 static void cnss_pci_misc_reg_dump(struct cnss_pci_data *pci_priv,
3140 struct cnss_misc_reg *misc_reg,
3141 u32 misc_reg_size,
3142 char *reg_name)
3143 {
3144 struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
3145 bool do_force_wake_put = true;
3146 int i;
3147
3148 if (!misc_reg)
3149 return;
3150
3151 if (in_interrupt() || irqs_disabled())
3152 return;
3153
3154 if (cnss_pci_check_link_status(pci_priv))
3155 return;
3156
3157 if (cnss_pci_force_wake_get(pci_priv)) {
3158 /* Continue to dump when device has entered RDDM already */
3159 if (!test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state))
3160 return;
3161 do_force_wake_put = false;
3162 }
3163
3164 cnss_pr_dbg("Start to dump %s registers\n", reg_name);
3165
3166 for (i = 0; i < misc_reg_size; i++) {
3167 if (!test_bit(pci_priv->misc_reg_dev_mask,
3168 &misc_reg[i].dev_mask))
3169 continue;
3170
3171 if (misc_reg[i].wr) {
3172 if (misc_reg[i].offset ==
3173 QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_CFG &&
3174 i >= 1)
3175 misc_reg[i].val =
3176 QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_CFG_MSK |
3177 misc_reg[i - 1].val;
3178 if (cnss_pci_reg_write(pci_priv,
3179 misc_reg[i].offset,
3180 misc_reg[i].val))
3181 goto force_wake_put;
3182 cnss_pr_vdbg("Write 0x%X to 0x%X\n",
3183 misc_reg[i].val,
3184 misc_reg[i].offset);
3185
3186 } else {
3187 if (cnss_pci_reg_read(pci_priv,
3188 misc_reg[i].offset,
3189 &misc_reg[i].val))
3190 goto force_wake_put;
3191 }
3192 }
3193
3194 force_wake_put:
3195 if (do_force_wake_put)
3196 cnss_pci_force_wake_put(pci_priv);
3197 }
3198
cnss_pci_dump_misc_reg(struct cnss_pci_data * pci_priv)3199 static void cnss_pci_dump_misc_reg(struct cnss_pci_data *pci_priv)
3200 {
3201 if (in_interrupt() || irqs_disabled())
3202 return;
3203
3204 if (cnss_pci_check_link_status(pci_priv))
3205 return;
3206
3207 cnss_pci_misc_reg_dump(pci_priv, pci_priv->wcss_reg,
3208 WCSS_REG_SIZE, "wcss");
3209 cnss_pci_misc_reg_dump(pci_priv, pci_priv->pcie_reg,
3210 PCIE_REG_SIZE, "pcie");
3211 cnss_pci_misc_reg_dump(pci_priv, pci_priv->wlaon_reg,
3212 WLAON_REG_SIZE, "wlaon");
3213 cnss_pci_misc_reg_dump(pci_priv, pci_priv->syspm_reg,
3214 SYSPM_REG_SIZE, "syspm");
3215 }
3216
cnss_pci_dump_shadow_reg(struct cnss_pci_data * pci_priv)3217 static void cnss_pci_dump_shadow_reg(struct cnss_pci_data *pci_priv)
3218 {
3219 int i, j = 0, array_size = SHADOW_REG_COUNT + SHADOW_REG_INTER_COUNT;
3220 u32 reg_offset;
3221 bool do_force_wake_put = true;
3222
3223 if (in_interrupt() || irqs_disabled())
3224 return;
3225
3226 if (cnss_pci_check_link_status(pci_priv))
3227 return;
3228
3229 if (!pci_priv->debug_reg) {
3230 pci_priv->debug_reg = devm_kzalloc(&pci_priv->pci_dev->dev,
3231 sizeof(*pci_priv->debug_reg)
3232 * array_size, GFP_KERNEL);
3233 if (!pci_priv->debug_reg)
3234 return;
3235 }
3236
3237 if (cnss_pci_force_wake_get(pci_priv))
3238 do_force_wake_put = false;
3239
3240 cnss_pr_dbg("Start to dump shadow registers\n");
3241
3242 for (i = 0; i < SHADOW_REG_COUNT; i++, j++) {
3243 reg_offset = PCIE_SHADOW_REG_VALUE_0 + i * 4;
3244 pci_priv->debug_reg[j].offset = reg_offset;
3245 if (cnss_pci_reg_read(pci_priv, reg_offset,
3246 &pci_priv->debug_reg[j].val))
3247 goto force_wake_put;
3248 }
3249
3250 for (i = 0; i < SHADOW_REG_INTER_COUNT; i++, j++) {
3251 reg_offset = PCIE_SHADOW_REG_INTER_0 + i * 4;
3252 pci_priv->debug_reg[j].offset = reg_offset;
3253 if (cnss_pci_reg_read(pci_priv, reg_offset,
3254 &pci_priv->debug_reg[j].val))
3255 goto force_wake_put;
3256 }
3257
3258 force_wake_put:
3259 if (do_force_wake_put)
3260 cnss_pci_force_wake_put(pci_priv);
3261 }
3262
cnss_qca6174_powerup(struct cnss_pci_data * pci_priv)3263 static int cnss_qca6174_powerup(struct cnss_pci_data *pci_priv)
3264 {
3265 int ret = 0;
3266 struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
3267
3268 ret = cnss_power_on_device(plat_priv, false);
3269 if (ret) {
3270 cnss_pr_err("Failed to power on device, err = %d\n", ret);
3271 goto out;
3272 }
3273
3274 ret = cnss_resume_pci_link(pci_priv);
3275 if (ret) {
3276 cnss_pr_err("Failed to resume PCI link, err = %d\n", ret);
3277 goto power_off;
3278 }
3279
3280 ret = cnss_pci_call_driver_probe(pci_priv);
3281 if (ret)
3282 goto suspend_link;
3283
3284 return 0;
3285 suspend_link:
3286 cnss_suspend_pci_link(pci_priv);
3287 power_off:
3288 cnss_power_off_device(plat_priv);
3289 out:
3290 return ret;
3291 }
3292
cnss_qca6174_shutdown(struct cnss_pci_data * pci_priv)3293 static int cnss_qca6174_shutdown(struct cnss_pci_data *pci_priv)
3294 {
3295 int ret = 0;
3296 struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
3297
3298 cnss_pci_pm_runtime_resume(pci_priv);
3299
3300 ret = cnss_pci_call_driver_remove(pci_priv);
3301 if (ret == -EAGAIN)
3302 goto out;
3303
3304 cnss_request_bus_bandwidth(&plat_priv->plat_dev->dev,
3305 CNSS_BUS_WIDTH_NONE);
3306 cnss_pci_set_monitor_wake_intr(pci_priv, false);
3307 cnss_pci_set_auto_suspended(pci_priv, 0);
3308
3309 ret = cnss_suspend_pci_link(pci_priv);
3310 if (ret)
3311 cnss_pr_err("Failed to suspend PCI link, err = %d\n", ret);
3312
3313 cnss_power_off_device(plat_priv);
3314
3315 clear_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state);
3316 clear_bit(CNSS_DRIVER_IDLE_SHUTDOWN, &plat_priv->driver_state);
3317
3318 out:
3319 return ret;
3320 }
3321
cnss_qca6174_crash_shutdown(struct cnss_pci_data * pci_priv)3322 static void cnss_qca6174_crash_shutdown(struct cnss_pci_data *pci_priv)
3323 {
3324 if (pci_priv->driver_ops && pci_priv->driver_ops->crash_shutdown)
3325 pci_priv->driver_ops->crash_shutdown(pci_priv->pci_dev);
3326 }
3327
cnss_qca6174_ramdump(struct cnss_pci_data * pci_priv)3328 static int cnss_qca6174_ramdump(struct cnss_pci_data *pci_priv)
3329 {
3330 struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
3331 struct cnss_ramdump_info *ramdump_info;
3332
3333 ramdump_info = &plat_priv->ramdump_info;
3334 if (!ramdump_info->ramdump_size)
3335 return -EINVAL;
3336
3337 return cnss_do_ramdump(plat_priv);
3338 }
3339
cnss_qca6290_powerup(struct cnss_pci_data * pci_priv)3340 static int cnss_qca6290_powerup(struct cnss_pci_data *pci_priv)
3341 {
3342 int ret = 0;
3343 struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
3344 unsigned int timeout;
3345 int retry = 0, bt_en_gpio = plat_priv->pinctrl_info.bt_en_gpio;
3346 int sw_ctrl_gpio = plat_priv->pinctrl_info.sw_ctrl_gpio;
3347
3348 if (plat_priv->ramdump_info_v2.dump_data_valid) {
3349 cnss_pci_clear_dump_info(pci_priv);
3350 cnss_pci_power_off_mhi(pci_priv);
3351 cnss_suspend_pci_link(pci_priv);
3352 cnss_pci_deinit_mhi(pci_priv);
3353 cnss_power_off_device(plat_priv);
3354 }
3355
3356 /* Clear QMI send usage count during every power up */
3357 pci_priv->qmi_send_usage_count = 0;
3358
3359 plat_priv->power_up_error = 0;
3360 retry:
3361 ret = cnss_power_on_device(plat_priv, false);
3362 if (ret) {
3363 cnss_pr_err("Failed to power on device, err = %d\n", ret);
3364 goto out;
3365 }
3366
3367 ret = cnss_resume_pci_link(pci_priv);
3368 if (ret) {
3369 cnss_pr_err("Failed to resume PCI link, err = %d\n", ret);
3370 cnss_pr_dbg("Value of SW_CTRL GPIO: %d\n",
3371 cnss_get_input_gpio_value(plat_priv, sw_ctrl_gpio));
3372 if (test_bit(IGNORE_PCI_LINK_FAILURE,
3373 &plat_priv->ctrl_params.quirks)) {
3374 cnss_pr_dbg("Ignore PCI link resume failure\n");
3375 ret = 0;
3376 goto out;
3377 }
3378 if (ret == -EAGAIN && retry++ < POWER_ON_RETRY_MAX_TIMES) {
3379 cnss_power_off_device(plat_priv);
3380 /* Force toggle BT_EN GPIO low */
3381 if (retry == POWER_ON_RETRY_MAX_TIMES) {
3382 cnss_pr_dbg("Retry #%d. Set BT_EN GPIO(%u) low\n",
3383 retry, bt_en_gpio);
3384 if (bt_en_gpio >= 0)
3385 gpio_direction_output(bt_en_gpio, 0);
3386 cnss_pr_dbg("BT_EN GPIO val: %d\n",
3387 gpio_get_value(bt_en_gpio));
3388 }
3389 cnss_pr_dbg("Retry to resume PCI link #%d\n", retry);
3390 cnss_pr_dbg("Value of SW_CTRL GPIO: %d\n",
3391 cnss_get_input_gpio_value(plat_priv,
3392 sw_ctrl_gpio));
3393 msleep(POWER_ON_RETRY_DELAY_MS * retry);
3394 goto retry;
3395 }
3396 /* Assert when it reaches maximum retries */
3397 CNSS_ASSERT(0);
3398 goto power_off;
3399 }
3400
3401 cnss_pci_set_wlaon_pwr_ctrl(pci_priv, false, false, false);
3402 timeout = cnss_get_timeout(plat_priv, CNSS_TIMEOUT_QMI);
3403
3404 ret = cnss_pci_start_mhi(pci_priv);
3405 if (ret) {
3406 cnss_fatal_err("Failed to start MHI, err = %d\n", ret);
3407 if (!test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state) &&
3408 !pci_priv->pci_link_down_ind && timeout) {
3409 /* Start recovery directly for MHI start failures */
3410 cnss_schedule_recovery(&pci_priv->pci_dev->dev,
3411 CNSS_REASON_DEFAULT);
3412 }
3413 return 0;
3414 }
3415
3416 if (test_bit(USE_CORE_ONLY_FW, &plat_priv->ctrl_params.quirks)) {
3417 clear_bit(CNSS_FW_BOOT_RECOVERY, &plat_priv->driver_state);
3418 clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
3419 return 0;
3420 }
3421
3422 cnss_set_pin_connect_status(plat_priv);
3423
3424 if (test_bit(QMI_BYPASS, &plat_priv->ctrl_params.quirks)) {
3425 ret = cnss_pci_call_driver_probe(pci_priv);
3426 if (ret)
3427 goto stop_mhi;
3428 } else if (timeout) {
3429 if (test_bit(CNSS_IN_COLD_BOOT_CAL, &plat_priv->driver_state))
3430 timeout += WLAN_COLD_BOOT_CAL_TIMEOUT;
3431 else
3432 timeout += WLAN_MISSION_MODE_TIMEOUT;
3433 mod_timer(&plat_priv->fw_boot_timer,
3434 jiffies + msecs_to_jiffies(timeout));
3435 }
3436
3437 return 0;
3438
3439 stop_mhi:
3440 cnss_pci_set_wlaon_pwr_ctrl(pci_priv, false, true, true);
3441 cnss_pci_power_off_mhi(pci_priv);
3442 cnss_suspend_pci_link(pci_priv);
3443 cnss_pci_deinit_mhi(pci_priv);
3444 power_off:
3445 cnss_power_off_device(plat_priv);
3446 out:
3447 return ret;
3448 }
3449
cnss_qca6290_shutdown(struct cnss_pci_data * pci_priv)3450 static int cnss_qca6290_shutdown(struct cnss_pci_data *pci_priv)
3451 {
3452 int ret = 0;
3453 struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
3454 int do_force_wake = true;
3455
3456 cnss_pci_pm_runtime_resume(pci_priv);
3457
3458 ret = cnss_pci_call_driver_remove(pci_priv);
3459 if (ret == -EAGAIN)
3460 goto out;
3461
3462 cnss_request_bus_bandwidth(&plat_priv->plat_dev->dev,
3463 CNSS_BUS_WIDTH_NONE);
3464 cnss_pci_set_monitor_wake_intr(pci_priv, false);
3465 cnss_pci_set_auto_suspended(pci_priv, 0);
3466
3467 if ((test_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state) ||
3468 test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state) ||
3469 test_bit(CNSS_DRIVER_IDLE_RESTART, &plat_priv->driver_state) ||
3470 test_bit(CNSS_DRIVER_IDLE_SHUTDOWN, &plat_priv->driver_state) ||
3471 test_bit(CNSS_IN_COLD_BOOT_CAL, &plat_priv->driver_state)) &&
3472 test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state)) {
3473 del_timer(&pci_priv->dev_rddm_timer);
3474 cnss_pci_collect_dump_info(pci_priv, false);
3475
3476 if (!plat_priv->recovery_enabled)
3477 CNSS_ASSERT(0);
3478 }
3479
3480 if (!cnss_is_device_powered_on(plat_priv)) {
3481 cnss_pr_dbg("Device is already powered off, ignore\n");
3482 goto skip_power_off;
3483 }
3484
3485 if (test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state))
3486 do_force_wake = false;
3487 cnss_pci_set_wlaon_pwr_ctrl(pci_priv, false, true, do_force_wake);
3488
3489 /* FBC image will be freed after powering off MHI, so skip
3490 * if RAM dump data is still valid.
3491 */
3492 if (plat_priv->ramdump_info_v2.dump_data_valid)
3493 goto skip_power_off;
3494
3495 cnss_pci_power_off_mhi(pci_priv);
3496 ret = cnss_suspend_pci_link(pci_priv);
3497 if (ret)
3498 cnss_pr_err("Failed to suspend PCI link, err = %d\n", ret);
3499 cnss_pci_deinit_mhi(pci_priv);
3500 cnss_power_off_device(plat_priv);
3501
3502 skip_power_off:
3503 pci_priv->remap_window = 0;
3504
3505 clear_bit(CNSS_FW_READY, &plat_priv->driver_state);
3506 clear_bit(CNSS_FW_MEM_READY, &plat_priv->driver_state);
3507 if (test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state) ||
3508 test_bit(CNSS_DRIVER_IDLE_SHUTDOWN, &plat_priv->driver_state)) {
3509 clear_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state);
3510 pci_priv->pci_link_down_ind = false;
3511 }
3512 clear_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state);
3513 clear_bit(CNSS_DRIVER_IDLE_SHUTDOWN, &plat_priv->driver_state);
3514 memset(&print_optimize, 0, sizeof(print_optimize));
3515
3516 out:
3517 return ret;
3518 }
3519
cnss_qca6290_crash_shutdown(struct cnss_pci_data * pci_priv)3520 static void cnss_qca6290_crash_shutdown(struct cnss_pci_data *pci_priv)
3521 {
3522 struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
3523
3524 set_bit(CNSS_IN_PANIC, &plat_priv->driver_state);
3525 cnss_pr_dbg("Crash shutdown with driver_state 0x%lx\n",
3526 plat_priv->driver_state);
3527
3528 cnss_pci_collect_dump_info(pci_priv, true);
3529 clear_bit(CNSS_IN_PANIC, &plat_priv->driver_state);
3530 }
3531
cnss_qca6290_ramdump(struct cnss_pci_data * pci_priv)3532 static int cnss_qca6290_ramdump(struct cnss_pci_data *pci_priv)
3533 {
3534 struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
3535 struct cnss_ramdump_info_v2 *info_v2 = &plat_priv->ramdump_info_v2;
3536 struct cnss_dump_data *dump_data = &info_v2->dump_data;
3537 struct cnss_dump_seg *dump_seg = info_v2->dump_data_vaddr;
3538 int ret = 0;
3539
3540 if (!info_v2->dump_data_valid || !dump_seg ||
3541 dump_data->nentries == 0)
3542 return 0;
3543
3544 ret = cnss_do_elf_ramdump(plat_priv);
3545
3546 cnss_pci_clear_dump_info(pci_priv);
3547 cnss_pci_power_off_mhi(pci_priv);
3548 cnss_suspend_pci_link(pci_priv);
3549 cnss_pci_deinit_mhi(pci_priv);
3550 cnss_power_off_device(plat_priv);
3551
3552 return ret;
3553 }
3554
cnss_pci_dev_powerup(struct cnss_pci_data * pci_priv)3555 int cnss_pci_dev_powerup(struct cnss_pci_data *pci_priv)
3556 {
3557 int ret = 0;
3558
3559 if (!pci_priv) {
3560 cnss_pr_err("pci_priv is NULL\n");
3561 return -ENODEV;
3562 }
3563
3564 switch (pci_priv->device_id) {
3565 case QCA6174_DEVICE_ID:
3566 ret = cnss_qca6174_powerup(pci_priv);
3567 break;
3568 case QCA6290_DEVICE_ID:
3569 case QCA6390_DEVICE_ID:
3570 case QCN7605_DEVICE_ID:
3571 case QCA6490_DEVICE_ID:
3572 case KIWI_DEVICE_ID:
3573 case MANGO_DEVICE_ID:
3574 case PEACH_DEVICE_ID:
3575 ret = cnss_qca6290_powerup(pci_priv);
3576 break;
3577 default:
3578 cnss_pr_err("Unknown device_id found: 0x%x\n",
3579 pci_priv->device_id);
3580 ret = -ENODEV;
3581 }
3582
3583 return ret;
3584 }
3585
cnss_pci_dev_shutdown(struct cnss_pci_data * pci_priv)3586 int cnss_pci_dev_shutdown(struct cnss_pci_data *pci_priv)
3587 {
3588 int ret = 0;
3589
3590 if (!pci_priv) {
3591 cnss_pr_err("pci_priv is NULL\n");
3592 return -ENODEV;
3593 }
3594
3595 switch (pci_priv->device_id) {
3596 case QCA6174_DEVICE_ID:
3597 ret = cnss_qca6174_shutdown(pci_priv);
3598 break;
3599 case QCA6290_DEVICE_ID:
3600 case QCA6390_DEVICE_ID:
3601 case QCN7605_DEVICE_ID:
3602 case QCA6490_DEVICE_ID:
3603 case KIWI_DEVICE_ID:
3604 case MANGO_DEVICE_ID:
3605 case PEACH_DEVICE_ID:
3606 ret = cnss_qca6290_shutdown(pci_priv);
3607 break;
3608 default:
3609 cnss_pr_err("Unknown device_id found: 0x%x\n",
3610 pci_priv->device_id);
3611 ret = -ENODEV;
3612 }
3613
3614 return ret;
3615 }
3616
cnss_pci_dev_crash_shutdown(struct cnss_pci_data * pci_priv)3617 int cnss_pci_dev_crash_shutdown(struct cnss_pci_data *pci_priv)
3618 {
3619 int ret = 0;
3620
3621 if (!pci_priv) {
3622 cnss_pr_err("pci_priv is NULL\n");
3623 return -ENODEV;
3624 }
3625
3626 switch (pci_priv->device_id) {
3627 case QCA6174_DEVICE_ID:
3628 cnss_qca6174_crash_shutdown(pci_priv);
3629 break;
3630 case QCA6290_DEVICE_ID:
3631 case QCA6390_DEVICE_ID:
3632 case QCN7605_DEVICE_ID:
3633 case QCA6490_DEVICE_ID:
3634 case KIWI_DEVICE_ID:
3635 case MANGO_DEVICE_ID:
3636 case PEACH_DEVICE_ID:
3637 cnss_qca6290_crash_shutdown(pci_priv);
3638 break;
3639 default:
3640 cnss_pr_err("Unknown device_id found: 0x%x\n",
3641 pci_priv->device_id);
3642 ret = -ENODEV;
3643 }
3644
3645 return ret;
3646 }
3647
cnss_pci_dev_ramdump(struct cnss_pci_data * pci_priv)3648 int cnss_pci_dev_ramdump(struct cnss_pci_data *pci_priv)
3649 {
3650 int ret = 0;
3651
3652 if (!pci_priv) {
3653 cnss_pr_err("pci_priv is NULL\n");
3654 return -ENODEV;
3655 }
3656
3657 switch (pci_priv->device_id) {
3658 case QCA6174_DEVICE_ID:
3659 ret = cnss_qca6174_ramdump(pci_priv);
3660 break;
3661 case QCA6290_DEVICE_ID:
3662 case QCA6390_DEVICE_ID:
3663 case QCN7605_DEVICE_ID:
3664 case QCA6490_DEVICE_ID:
3665 case KIWI_DEVICE_ID:
3666 case MANGO_DEVICE_ID:
3667 case PEACH_DEVICE_ID:
3668 ret = cnss_qca6290_ramdump(pci_priv);
3669 break;
3670 default:
3671 cnss_pr_err("Unknown device_id found: 0x%x\n",
3672 pci_priv->device_id);
3673 ret = -ENODEV;
3674 }
3675
3676 return ret;
3677 }
3678
cnss_pci_is_drv_connected(struct device * dev)3679 int cnss_pci_is_drv_connected(struct device *dev)
3680 {
3681 struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
3682
3683 if (!pci_priv)
3684 return -ENODEV;
3685
3686 return pci_priv->drv_connected_last;
3687 }
3688 EXPORT_SYMBOL(cnss_pci_is_drv_connected);
3689
cnss_wlan_reg_driver_work(struct work_struct * work)3690 static void cnss_wlan_reg_driver_work(struct work_struct *work)
3691 {
3692 struct cnss_plat_data *plat_priv =
3693 container_of(work, struct cnss_plat_data, wlan_reg_driver_work.work);
3694 struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
3695 struct cnss_cal_info *cal_info;
3696 unsigned int timeout;
3697
3698 if (test_bit(CNSS_WLAN_HW_DISABLED, &plat_priv->driver_state))
3699 return;
3700
3701 if (test_bit(CNSS_COLD_BOOT_CAL_DONE, &plat_priv->driver_state)) {
3702 goto reg_driver;
3703 } else {
3704 if (plat_priv->charger_mode) {
3705 cnss_pr_err("Ignore calibration timeout in charger mode\n");
3706 return;
3707 }
3708 if (!test_bit(CNSS_IN_COLD_BOOT_CAL,
3709 &plat_priv->driver_state)) {
3710 timeout = cnss_get_timeout(plat_priv,
3711 CNSS_TIMEOUT_CALIBRATION);
3712 cnss_pr_dbg("File system not ready to start calibration. Wait for %ds..\n",
3713 timeout / 1000);
3714 schedule_delayed_work(&plat_priv->wlan_reg_driver_work,
3715 msecs_to_jiffies(timeout));
3716 return;
3717 }
3718
3719 del_timer(&plat_priv->fw_boot_timer);
3720 if (test_bit(CNSS_IN_COLD_BOOT_CAL, &plat_priv->driver_state) &&
3721 !test_bit(CNSS_IN_REBOOT, &plat_priv->driver_state)) {
3722 cnss_pr_err("Timeout waiting for calibration to complete\n");
3723 CNSS_ASSERT(0);
3724 }
3725 cal_info = kzalloc(sizeof(*cal_info), GFP_KERNEL);
3726 if (!cal_info)
3727 return;
3728 cal_info->cal_status = CNSS_CAL_TIMEOUT;
3729 cnss_driver_event_post(plat_priv,
3730 CNSS_DRIVER_EVENT_COLD_BOOT_CAL_DONE,
3731 0, cal_info);
3732 }
3733 reg_driver:
3734 if (test_bit(CNSS_IN_REBOOT, &plat_priv->driver_state)) {
3735 cnss_pr_dbg("Reboot/Shutdown is in progress, ignore register driver\n");
3736 return;
3737 }
3738 reinit_completion(&plat_priv->power_up_complete);
3739 cnss_driver_event_post(plat_priv,
3740 CNSS_DRIVER_EVENT_REGISTER_DRIVER,
3741 CNSS_EVENT_SYNC_UNKILLABLE,
3742 pci_priv->driver_ops);
3743 }
3744
cnss_wlan_register_driver(struct cnss_wlan_driver * driver_ops)3745 int cnss_wlan_register_driver(struct cnss_wlan_driver *driver_ops)
3746 {
3747 int ret = 0;
3748 struct cnss_plat_data *plat_priv;
3749 struct cnss_pci_data *pci_priv;
3750 const struct pci_device_id *id_table = driver_ops->id_table;
3751 unsigned int timeout;
3752
3753 if (!cnss_check_driver_loading_allowed()) {
3754 cnss_pr_info("No cnss2 dtsi entry present");
3755 return -ENODEV;
3756 }
3757
3758 plat_priv = cnss_get_plat_priv_by_driver_ops(driver_ops);
3759
3760 if (!plat_priv) {
3761 cnss_pr_buf("plat_priv is not ready for register driver\n");
3762 return -EAGAIN;
3763 }
3764
3765 pci_priv = plat_priv->bus_priv;
3766 if (test_bit(CNSS_WLAN_HW_DISABLED, &plat_priv->driver_state)) {
3767 while (id_table && id_table->device) {
3768 if (plat_priv->device_id == id_table->device) {
3769 if (plat_priv->device_id == KIWI_DEVICE_ID &&
3770 driver_ops->chip_version != 2) {
3771 cnss_pr_err("WLAN HW disabled. kiwi_v2 only supported\n");
3772 return -ENODEV;
3773 }
3774 cnss_pr_info("WLAN register driver deferred for device ID: 0x%x due to HW disable\n",
3775 id_table->device);
3776 plat_priv->driver_ops = driver_ops;
3777 return 0;
3778 }
3779 id_table++;
3780 }
3781 return -ENODEV;
3782 }
3783
3784 if (!test_bit(CNSS_PCI_PROBE_DONE, &plat_priv->driver_state)) {
3785 cnss_pr_info("pci probe not yet done for register driver\n");
3786 return -EAGAIN;
3787 }
3788
3789 if (test_bit(CNSS_DRIVER_REGISTERED, &plat_priv->driver_state)) {
3790 cnss_pr_err("Driver has already registered\n");
3791 return -EEXIST;
3792 }
3793
3794 if (test_bit(CNSS_IN_REBOOT, &plat_priv->driver_state)) {
3795 cnss_pr_dbg("Reboot/Shutdown is in progress, ignore register driver\n");
3796 return -EINVAL;
3797 }
3798
3799 if (!id_table || !pci_dev_present(id_table)) {
3800 /* id_table pointer will move from pci_dev_present(),
3801 * so check again using local pointer.
3802 */
3803 id_table = driver_ops->id_table;
3804 while (id_table && id_table->vendor) {
3805 cnss_pr_info("Host driver is built for PCIe device ID 0x%x\n",
3806 id_table->device);
3807 id_table++;
3808 }
3809 cnss_pr_err("Enumerated PCIe device id is 0x%x, reject unsupported driver\n",
3810 pci_priv->device_id);
3811 return -ENODEV;
3812 }
3813
3814 if (driver_ops->chip_version != CNSS_CHIP_VER_ANY &&
3815 driver_ops->chip_version != plat_priv->device_version.major_version) {
3816 cnss_pr_err("Driver built for chip ver 0x%x, enumerated ver 0x%x, reject unsupported driver\n",
3817 driver_ops->chip_version,
3818 plat_priv->device_version.major_version);
3819 return -ENODEV;
3820 }
3821 set_bit(CNSS_DRIVER_REGISTER, &plat_priv->driver_state);
3822
3823 if (!plat_priv->cbc_enabled ||
3824 test_bit(CNSS_COLD_BOOT_CAL_DONE, &plat_priv->driver_state))
3825 goto register_driver;
3826
3827 pci_priv->driver_ops = driver_ops;
3828 /* If Cold Boot Calibration is enabled, it is the 1st step in init
3829 * sequence.CBC is done on file system_ready trigger. Qcacld will be
3830 * loaded from vendor_modprobe.sh at early boot and must be deferred
3831 * until CBC is complete
3832 */
3833 timeout = cnss_get_timeout(plat_priv, CNSS_TIMEOUT_CALIBRATION);
3834 INIT_DELAYED_WORK(&plat_priv->wlan_reg_driver_work,
3835 cnss_wlan_reg_driver_work);
3836 schedule_delayed_work(&plat_priv->wlan_reg_driver_work,
3837 msecs_to_jiffies(timeout));
3838 cnss_pr_info("WLAN register driver deferred for Calibration\n");
3839 return 0;
3840 register_driver:
3841 reinit_completion(&plat_priv->power_up_complete);
3842 ret = cnss_driver_event_post(plat_priv,
3843 CNSS_DRIVER_EVENT_REGISTER_DRIVER,
3844 CNSS_EVENT_SYNC_UNKILLABLE,
3845 driver_ops);
3846
3847 return ret;
3848 }
3849 EXPORT_SYMBOL(cnss_wlan_register_driver);
3850
cnss_wlan_unregister_driver(struct cnss_wlan_driver * driver_ops)3851 void cnss_wlan_unregister_driver(struct cnss_wlan_driver *driver_ops)
3852 {
3853 struct cnss_plat_data *plat_priv;
3854 int ret = 0;
3855 unsigned int timeout;
3856
3857 plat_priv = cnss_get_plat_priv_by_driver_ops(driver_ops);
3858 if (!plat_priv) {
3859 cnss_pr_err("plat_priv is NULL\n");
3860 return;
3861 }
3862
3863 mutex_lock(&plat_priv->driver_ops_lock);
3864
3865 if (plat_priv->device_id == QCA6174_DEVICE_ID)
3866 goto skip_wait_power_up;
3867
3868 timeout = cnss_get_timeout(plat_priv, CNSS_TIMEOUT_WLAN_WATCHDOG);
3869 ret = wait_for_completion_timeout(&plat_priv->power_up_complete,
3870 msecs_to_jiffies(timeout));
3871 if (!ret) {
3872 cnss_pr_err("Timeout (%ums) waiting for driver power up to complete\n",
3873 timeout);
3874 CNSS_ASSERT(0);
3875 }
3876
3877 skip_wait_power_up:
3878 if (!test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state) &&
3879 !test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state))
3880 goto skip_wait_recovery;
3881
3882 reinit_completion(&plat_priv->recovery_complete);
3883 timeout = cnss_get_timeout(plat_priv, CNSS_TIMEOUT_RECOVERY);
3884 ret = wait_for_completion_timeout(&plat_priv->recovery_complete,
3885 msecs_to_jiffies(timeout));
3886 if (!ret) {
3887 cnss_pr_err("Timeout (%ums) waiting for recovery to complete\n",
3888 timeout);
3889 CNSS_ASSERT(0);
3890 }
3891
3892 skip_wait_recovery:
3893 cnss_driver_event_post(plat_priv,
3894 CNSS_DRIVER_EVENT_UNREGISTER_DRIVER,
3895 CNSS_EVENT_SYNC_UNKILLABLE, NULL);
3896
3897 mutex_unlock(&plat_priv->driver_ops_lock);
3898 }
3899 EXPORT_SYMBOL(cnss_wlan_unregister_driver);
3900
cnss_pci_register_driver_hdlr(struct cnss_pci_data * pci_priv,void * data)3901 int cnss_pci_register_driver_hdlr(struct cnss_pci_data *pci_priv,
3902 void *data)
3903 {
3904 int ret = 0;
3905 struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
3906
3907 if (test_bit(CNSS_IN_REBOOT, &plat_priv->driver_state)) {
3908 cnss_pr_dbg("Reboot or shutdown is in progress, ignore register driver\n");
3909 return -EINVAL;
3910 }
3911
3912 set_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state);
3913 pci_priv->driver_ops = data;
3914
3915 ret = cnss_pci_dev_powerup(pci_priv);
3916 if (ret) {
3917 clear_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state);
3918 pci_priv->driver_ops = NULL;
3919 } else {
3920 set_bit(CNSS_DRIVER_REGISTERED, &plat_priv->driver_state);
3921 }
3922
3923 return ret;
3924 }
3925
cnss_pci_unregister_driver_hdlr(struct cnss_pci_data * pci_priv)3926 int cnss_pci_unregister_driver_hdlr(struct cnss_pci_data *pci_priv)
3927 {
3928 struct cnss_plat_data *plat_priv;
3929
3930 if (!pci_priv)
3931 return -EINVAL;
3932
3933 plat_priv = pci_priv->plat_priv;
3934 set_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state);
3935 cnss_pci_dev_shutdown(pci_priv);
3936 pci_priv->driver_ops = NULL;
3937 clear_bit(CNSS_DRIVER_REGISTERED, &plat_priv->driver_state);
3938
3939 return 0;
3940 }
3941
cnss_pci_suspend_driver(struct cnss_pci_data * pci_priv)3942 static int cnss_pci_suspend_driver(struct cnss_pci_data *pci_priv)
3943 {
3944 struct pci_dev *pci_dev = pci_priv->pci_dev;
3945 struct cnss_wlan_driver *driver_ops = pci_priv->driver_ops;
3946 int ret = 0;
3947 struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
3948
3949 pm_message_t state = { .event = PM_EVENT_SUSPEND };
3950
3951 if (test_bit(CNSS_DRIVER_REGISTERED, &plat_priv->driver_state) &&
3952 driver_ops && driver_ops->suspend) {
3953 ret = driver_ops->suspend(pci_dev, state);
3954 if (ret) {
3955 cnss_pr_err("Failed to suspend host driver, err = %d\n",
3956 ret);
3957 ret = -EAGAIN;
3958 }
3959 }
3960
3961 return ret;
3962 }
3963
cnss_pci_resume_driver(struct cnss_pci_data * pci_priv)3964 static int cnss_pci_resume_driver(struct cnss_pci_data *pci_priv)
3965 {
3966 struct pci_dev *pci_dev = pci_priv->pci_dev;
3967 struct cnss_wlan_driver *driver_ops = pci_priv->driver_ops;
3968 int ret = 0;
3969 struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
3970
3971 if (test_bit(CNSS_DRIVER_REGISTERED, &plat_priv->driver_state) &&
3972 driver_ops && driver_ops->resume) {
3973 ret = driver_ops->resume(pci_dev);
3974 if (ret)
3975 cnss_pr_err("Failed to resume host driver, err = %d\n",
3976 ret);
3977 }
3978
3979 return ret;
3980 }
3981
cnss_pci_suspend_bus(struct cnss_pci_data * pci_priv)3982 int cnss_pci_suspend_bus(struct cnss_pci_data *pci_priv)
3983 {
3984 struct pci_dev *pci_dev = pci_priv->pci_dev;
3985 int ret = 0;
3986
3987 if (pci_priv->pci_link_state == PCI_LINK_DOWN)
3988 goto out;
3989
3990 if (cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_SUSPEND)) {
3991 ret = -EAGAIN;
3992 goto out;
3993 }
3994
3995 if (pci_priv->drv_connected_last)
3996 goto skip_disable_pci;
3997
3998 pci_clear_master(pci_dev);
3999 cnss_set_pci_config_space(pci_priv, SAVE_PCI_CONFIG_SPACE);
4000 pci_disable_device(pci_dev);
4001
4002 ret = pci_set_power_state(pci_dev, PCI_D3hot);
4003 if (ret)
4004 cnss_pr_err("Failed to set D3Hot, err = %d\n", ret);
4005
4006 skip_disable_pci:
4007 if (cnss_set_pci_link(pci_priv, PCI_LINK_DOWN)) {
4008 ret = -EAGAIN;
4009 goto resume_mhi;
4010 }
4011 pci_priv->pci_link_state = PCI_LINK_DOWN;
4012
4013 return 0;
4014
4015 resume_mhi:
4016 if (!pci_is_enabled(pci_dev))
4017 if (pci_enable_device(pci_dev))
4018 cnss_pr_err("Failed to enable PCI device\n");
4019 if (pci_priv->saved_state)
4020 cnss_set_pci_config_space(pci_priv, RESTORE_PCI_CONFIG_SPACE);
4021 pci_set_master(pci_dev);
4022 cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_RESUME);
4023 out:
4024 return ret;
4025 }
4026
cnss_pci_resume_bus(struct cnss_pci_data * pci_priv)4027 int cnss_pci_resume_bus(struct cnss_pci_data *pci_priv)
4028 {
4029 struct pci_dev *pci_dev = pci_priv->pci_dev;
4030 int ret = 0;
4031
4032 if (pci_priv->pci_link_state == PCI_LINK_UP)
4033 goto out;
4034
4035 if (cnss_set_pci_link(pci_priv, PCI_LINK_UP)) {
4036 cnss_fatal_err("Failed to resume PCI link from suspend\n");
4037 cnss_pci_link_down(&pci_dev->dev);
4038 ret = -EAGAIN;
4039 goto out;
4040 }
4041
4042 pci_priv->pci_link_state = PCI_LINK_UP;
4043
4044 if (pci_priv->drv_connected_last)
4045 goto skip_enable_pci;
4046
4047 ret = pci_enable_device(pci_dev);
4048 if (ret) {
4049 cnss_pr_err("Failed to enable PCI device, err = %d\n",
4050 ret);
4051 goto out;
4052 }
4053
4054 if (pci_priv->saved_state)
4055 cnss_set_pci_config_space(pci_priv,
4056 RESTORE_PCI_CONFIG_SPACE);
4057 pci_set_master(pci_dev);
4058
4059 skip_enable_pci:
4060 if (cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_RESUME))
4061 ret = -EAGAIN;
4062 out:
4063 return ret;
4064 }
4065
cnss_pci_suspend(struct device * dev)4066 static int cnss_pci_suspend(struct device *dev)
4067 {
4068 int ret = 0;
4069 struct pci_dev *pci_dev = to_pci_dev(dev);
4070 struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
4071 struct cnss_plat_data *plat_priv;
4072
4073 if (!pci_priv)
4074 goto out;
4075
4076 plat_priv = pci_priv->plat_priv;
4077 if (!plat_priv)
4078 goto out;
4079
4080 if (!cnss_is_device_powered_on(plat_priv))
4081 goto out;
4082
4083 /* No mhi state bit set if only finish pcie enumeration,
4084 * so test_bit is not applicable to check if it is INIT state.
4085 */
4086 if (pci_priv->mhi_state == CNSS_MHI_INIT) {
4087 bool suspend = cnss_should_suspend_pwroff(pci_dev);
4088
4089 /* Do PCI link suspend and power off in the LPM case
4090 * if chipset didn't do that after pcie enumeration.
4091 */
4092 if (!suspend) {
4093 ret = cnss_suspend_pci_link(pci_priv);
4094 if (ret)
4095 cnss_pr_err("Failed to suspend PCI link, err = %d\n",
4096 ret);
4097 cnss_power_off_device(plat_priv);
4098 goto out;
4099 }
4100 }
4101
4102 if (!test_bit(DISABLE_DRV, &plat_priv->ctrl_params.quirks) &&
4103 pci_priv->drv_supported) {
4104 pci_priv->drv_connected_last =
4105 cnss_pci_get_drv_connected(pci_priv);
4106 if (!pci_priv->drv_connected_last) {
4107 cnss_pr_dbg("Firmware does not support non-DRV suspend, reject\n");
4108 ret = -EAGAIN;
4109 goto out;
4110 }
4111 }
4112
4113 set_bit(CNSS_IN_SUSPEND_RESUME, &plat_priv->driver_state);
4114
4115 ret = cnss_pci_suspend_driver(pci_priv);
4116 if (ret)
4117 goto clear_flag;
4118
4119 if (!pci_priv->disable_pc) {
4120 mutex_lock(&pci_priv->bus_lock);
4121 ret = cnss_pci_suspend_bus(pci_priv);
4122 mutex_unlock(&pci_priv->bus_lock);
4123 if (ret)
4124 goto resume_driver;
4125 }
4126
4127 cnss_pci_set_monitor_wake_intr(pci_priv, false);
4128
4129 return 0;
4130
4131 resume_driver:
4132 cnss_pci_resume_driver(pci_priv);
4133 clear_flag:
4134 pci_priv->drv_connected_last = 0;
4135 clear_bit(CNSS_IN_SUSPEND_RESUME, &plat_priv->driver_state);
4136 out:
4137 return ret;
4138 }
4139
cnss_pci_resume(struct device * dev)4140 static int cnss_pci_resume(struct device *dev)
4141 {
4142 int ret = 0;
4143 struct pci_dev *pci_dev = to_pci_dev(dev);
4144 struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
4145 struct cnss_plat_data *plat_priv;
4146
4147 if (!pci_priv)
4148 goto out;
4149
4150 plat_priv = pci_priv->plat_priv;
4151 if (!plat_priv)
4152 goto out;
4153
4154 if (pci_priv->pci_link_down_ind)
4155 goto out;
4156
4157 if (!cnss_is_device_powered_on(pci_priv->plat_priv))
4158 goto out;
4159
4160 if (!pci_priv->disable_pc) {
4161 mutex_lock(&pci_priv->bus_lock);
4162 ret = cnss_pci_resume_bus(pci_priv);
4163 mutex_unlock(&pci_priv->bus_lock);
4164 if (ret)
4165 goto out;
4166 }
4167
4168 ret = cnss_pci_resume_driver(pci_priv);
4169
4170 pci_priv->drv_connected_last = 0;
4171 clear_bit(CNSS_IN_SUSPEND_RESUME, &plat_priv->driver_state);
4172
4173 out:
4174 return ret;
4175 }
4176
cnss_pci_suspend_noirq(struct device * dev)4177 static int cnss_pci_suspend_noirq(struct device *dev)
4178 {
4179 int ret = 0;
4180 struct pci_dev *pci_dev = to_pci_dev(dev);
4181 struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
4182 struct cnss_wlan_driver *driver_ops;
4183 struct cnss_plat_data *plat_priv;
4184
4185 if (!pci_priv)
4186 goto out;
4187
4188 if (!cnss_is_device_powered_on(pci_priv->plat_priv))
4189 goto out;
4190
4191 driver_ops = pci_priv->driver_ops;
4192 plat_priv = pci_priv->plat_priv;
4193 if (test_bit(CNSS_DRIVER_REGISTERED, &plat_priv->driver_state) &&
4194 driver_ops && driver_ops->suspend_noirq)
4195 ret = driver_ops->suspend_noirq(pci_dev);
4196
4197 if (pci_priv->disable_pc && !pci_dev->state_saved &&
4198 !pci_priv->plat_priv->use_pm_domain)
4199 pci_save_state(pci_dev);
4200
4201 out:
4202 return ret;
4203 }
4204
cnss_pci_resume_noirq(struct device * dev)4205 static int cnss_pci_resume_noirq(struct device *dev)
4206 {
4207 int ret = 0;
4208 struct pci_dev *pci_dev = to_pci_dev(dev);
4209 struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
4210 struct cnss_wlan_driver *driver_ops;
4211 struct cnss_plat_data *plat_priv;
4212
4213 if (!pci_priv)
4214 goto out;
4215
4216 if (!cnss_is_device_powered_on(pci_priv->plat_priv))
4217 goto out;
4218
4219 plat_priv = pci_priv->plat_priv;
4220 driver_ops = pci_priv->driver_ops;
4221 if (test_bit(CNSS_DRIVER_REGISTERED, &plat_priv->driver_state) &&
4222 driver_ops && driver_ops->resume_noirq &&
4223 !pci_priv->pci_link_down_ind)
4224 ret = driver_ops->resume_noirq(pci_dev);
4225
4226 out:
4227 return ret;
4228 }
4229
cnss_pci_runtime_suspend(struct device * dev)4230 static int cnss_pci_runtime_suspend(struct device *dev)
4231 {
4232 int ret = 0;
4233 struct pci_dev *pci_dev = to_pci_dev(dev);
4234 struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
4235 struct cnss_plat_data *plat_priv;
4236 struct cnss_wlan_driver *driver_ops;
4237
4238 if (!pci_priv)
4239 return -EAGAIN;
4240
4241 plat_priv = pci_priv->plat_priv;
4242 if (!plat_priv)
4243 return -EAGAIN;
4244
4245 if (!cnss_is_device_powered_on(pci_priv->plat_priv))
4246 return -EAGAIN;
4247
4248 if (pci_priv->pci_link_down_ind) {
4249 cnss_pr_dbg("PCI link down recovery is in progress!\n");
4250 return -EAGAIN;
4251 }
4252
4253 if (!test_bit(DISABLE_DRV, &plat_priv->ctrl_params.quirks) &&
4254 pci_priv->drv_supported) {
4255 pci_priv->drv_connected_last =
4256 cnss_pci_get_drv_connected(pci_priv);
4257 if (!pci_priv->drv_connected_last) {
4258 cnss_pr_dbg("Firmware does not support non-DRV suspend, reject\n");
4259 return -EAGAIN;
4260 }
4261 }
4262
4263 cnss_pr_vdbg("Runtime suspend start\n");
4264
4265 driver_ops = pci_priv->driver_ops;
4266 if (driver_ops && driver_ops->runtime_ops &&
4267 driver_ops->runtime_ops->runtime_suspend)
4268 ret = driver_ops->runtime_ops->runtime_suspend(pci_dev);
4269 else
4270 ret = cnss_auto_suspend(dev);
4271
4272 if (ret)
4273 pci_priv->drv_connected_last = 0;
4274
4275 cnss_pr_vdbg("Runtime suspend status: %d\n", ret);
4276
4277 return ret;
4278 }
4279
cnss_pci_runtime_resume(struct device * dev)4280 static int cnss_pci_runtime_resume(struct device *dev)
4281 {
4282 int ret = 0;
4283 struct pci_dev *pci_dev = to_pci_dev(dev);
4284 struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
4285 struct cnss_wlan_driver *driver_ops;
4286
4287 if (!pci_priv)
4288 return -EAGAIN;
4289
4290 if (!cnss_is_device_powered_on(pci_priv->plat_priv))
4291 return -EAGAIN;
4292
4293 if (pci_priv->pci_link_down_ind) {
4294 cnss_pr_dbg("PCI link down recovery is in progress!\n");
4295 return -EAGAIN;
4296 }
4297
4298 cnss_pr_vdbg("Runtime resume start\n");
4299
4300 driver_ops = pci_priv->driver_ops;
4301 if (driver_ops && driver_ops->runtime_ops &&
4302 driver_ops->runtime_ops->runtime_resume)
4303 ret = driver_ops->runtime_ops->runtime_resume(pci_dev);
4304 else
4305 ret = cnss_auto_resume(dev);
4306
4307 cnss_pr_vdbg("Runtime resume status: %d\n", ret);
4308
4309 return ret;
4310 }
4311
cnss_pci_runtime_idle(struct device * dev)4312 static int cnss_pci_runtime_idle(struct device *dev)
4313 {
4314 cnss_pr_vdbg("Runtime idle\n");
4315
4316 pm_request_autosuspend(dev);
4317
4318 return -EBUSY;
4319 }
4320
cnss_wlan_pm_control(struct device * dev,bool vote)4321 int cnss_wlan_pm_control(struct device *dev, bool vote)
4322 {
4323 struct pci_dev *pci_dev = to_pci_dev(dev);
4324 struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
4325 int ret = 0;
4326
4327 if (!pci_priv)
4328 return -ENODEV;
4329
4330 ret = cnss_pci_disable_pc(pci_priv, vote);
4331 if (ret)
4332 return ret;
4333
4334 pci_priv->disable_pc = vote;
4335 cnss_pr_dbg("%s PCIe power collapse\n", vote ? "disable" : "enable");
4336
4337 return 0;
4338 }
4339 EXPORT_SYMBOL(cnss_wlan_pm_control);
4340
cnss_pci_pm_runtime_get_record(struct cnss_pci_data * pci_priv,enum cnss_rtpm_id id)4341 static void cnss_pci_pm_runtime_get_record(struct cnss_pci_data *pci_priv,
4342 enum cnss_rtpm_id id)
4343 {
4344 if (id >= RTPM_ID_MAX)
4345 return;
4346
4347 atomic_inc(&pci_priv->pm_stats.runtime_get);
4348 atomic_inc(&pci_priv->pm_stats.runtime_get_id[id]);
4349 pci_priv->pm_stats.runtime_get_timestamp_id[id] =
4350 cnss_get_host_timestamp(pci_priv->plat_priv);
4351 }
4352
cnss_pci_pm_runtime_put_record(struct cnss_pci_data * pci_priv,enum cnss_rtpm_id id)4353 static void cnss_pci_pm_runtime_put_record(struct cnss_pci_data *pci_priv,
4354 enum cnss_rtpm_id id)
4355 {
4356 if (id >= RTPM_ID_MAX)
4357 return;
4358
4359 atomic_inc(&pci_priv->pm_stats.runtime_put);
4360 atomic_inc(&pci_priv->pm_stats.runtime_put_id[id]);
4361 pci_priv->pm_stats.runtime_put_timestamp_id[id] =
4362 cnss_get_host_timestamp(pci_priv->plat_priv);
4363 }
4364
cnss_pci_pm_runtime_show_usage_count(struct cnss_pci_data * pci_priv)4365 void cnss_pci_pm_runtime_show_usage_count(struct cnss_pci_data *pci_priv)
4366 {
4367 struct device *dev;
4368
4369 if (!pci_priv)
4370 return;
4371
4372 dev = &pci_priv->pci_dev->dev;
4373
4374 cnss_pr_dbg("Runtime PM usage count: %d\n",
4375 atomic_read(&dev->power.usage_count));
4376 }
4377
cnss_pci_pm_request_resume(struct cnss_pci_data * pci_priv)4378 int cnss_pci_pm_request_resume(struct cnss_pci_data *pci_priv)
4379 {
4380 struct device *dev;
4381 enum rpm_status status;
4382
4383 if (!pci_priv)
4384 return -ENODEV;
4385
4386 dev = &pci_priv->pci_dev->dev;
4387
4388 status = dev->power.runtime_status;
4389 if (status == RPM_SUSPENDING || status == RPM_SUSPENDED)
4390 cnss_pr_vdbg("Runtime PM resume is requested by %ps\n",
4391 (void *)_RET_IP_);
4392
4393 return pm_request_resume(dev);
4394 }
4395
cnss_pci_pm_runtime_resume(struct cnss_pci_data * pci_priv)4396 int cnss_pci_pm_runtime_resume(struct cnss_pci_data *pci_priv)
4397 {
4398 struct device *dev;
4399 enum rpm_status status;
4400
4401 if (!pci_priv)
4402 return -ENODEV;
4403
4404 dev = &pci_priv->pci_dev->dev;
4405
4406 status = dev->power.runtime_status;
4407 if (status == RPM_SUSPENDING || status == RPM_SUSPENDED)
4408 cnss_pr_vdbg("Runtime PM resume is requested by %ps\n",
4409 (void *)_RET_IP_);
4410
4411 return pm_runtime_resume(dev);
4412 }
4413
cnss_pci_pm_runtime_get(struct cnss_pci_data * pci_priv,enum cnss_rtpm_id id)4414 int cnss_pci_pm_runtime_get(struct cnss_pci_data *pci_priv,
4415 enum cnss_rtpm_id id)
4416 {
4417 struct device *dev;
4418 enum rpm_status status;
4419
4420 if (!pci_priv)
4421 return -ENODEV;
4422
4423 dev = &pci_priv->pci_dev->dev;
4424
4425 status = dev->power.runtime_status;
4426 if (status == RPM_SUSPENDING || status == RPM_SUSPENDED)
4427 cnss_pr_vdbg("Runtime PM resume is requested by %ps\n",
4428 (void *)_RET_IP_);
4429
4430 cnss_pci_pm_runtime_get_record(pci_priv, id);
4431
4432 return pm_runtime_get(dev);
4433 }
4434
cnss_pci_pm_runtime_get_sync(struct cnss_pci_data * pci_priv,enum cnss_rtpm_id id)4435 int cnss_pci_pm_runtime_get_sync(struct cnss_pci_data *pci_priv,
4436 enum cnss_rtpm_id id)
4437 {
4438 struct device *dev;
4439 enum rpm_status status;
4440
4441 if (!pci_priv)
4442 return -ENODEV;
4443
4444 dev = &pci_priv->pci_dev->dev;
4445
4446 status = dev->power.runtime_status;
4447 if (status == RPM_SUSPENDING || status == RPM_SUSPENDED)
4448 cnss_pr_vdbg("Runtime PM resume is requested by %ps\n",
4449 (void *)_RET_IP_);
4450
4451 cnss_pci_pm_runtime_get_record(pci_priv, id);
4452
4453 return pm_runtime_get_sync(dev);
4454 }
4455
cnss_pci_pm_runtime_get_noresume(struct cnss_pci_data * pci_priv,enum cnss_rtpm_id id)4456 void cnss_pci_pm_runtime_get_noresume(struct cnss_pci_data *pci_priv,
4457 enum cnss_rtpm_id id)
4458 {
4459 if (!pci_priv)
4460 return;
4461
4462 cnss_pci_pm_runtime_get_record(pci_priv, id);
4463 pm_runtime_get_noresume(&pci_priv->pci_dev->dev);
4464 }
4465
cnss_pci_pm_runtime_put_autosuspend(struct cnss_pci_data * pci_priv,enum cnss_rtpm_id id)4466 int cnss_pci_pm_runtime_put_autosuspend(struct cnss_pci_data *pci_priv,
4467 enum cnss_rtpm_id id)
4468 {
4469 struct device *dev;
4470
4471 if (!pci_priv)
4472 return -ENODEV;
4473
4474 dev = &pci_priv->pci_dev->dev;
4475
4476 if (atomic_read(&dev->power.usage_count) == 0) {
4477 cnss_pr_dbg("Ignore excessive runtime PM put operation\n");
4478 return -EINVAL;
4479 }
4480
4481 cnss_pci_pm_runtime_put_record(pci_priv, id);
4482
4483 return pm_runtime_put_autosuspend(&pci_priv->pci_dev->dev);
4484 }
4485
cnss_pci_pm_runtime_put_noidle(struct cnss_pci_data * pci_priv,enum cnss_rtpm_id id)4486 void cnss_pci_pm_runtime_put_noidle(struct cnss_pci_data *pci_priv,
4487 enum cnss_rtpm_id id)
4488 {
4489 struct device *dev;
4490
4491 if (!pci_priv)
4492 return;
4493
4494 dev = &pci_priv->pci_dev->dev;
4495
4496 if (atomic_read(&dev->power.usage_count) == 0) {
4497 cnss_pr_dbg("Ignore excessive runtime PM put operation\n");
4498 return;
4499 }
4500
4501 cnss_pci_pm_runtime_put_record(pci_priv, id);
4502 pm_runtime_put_noidle(&pci_priv->pci_dev->dev);
4503 }
4504
cnss_pci_pm_runtime_mark_last_busy(struct cnss_pci_data * pci_priv)4505 void cnss_pci_pm_runtime_mark_last_busy(struct cnss_pci_data *pci_priv)
4506 {
4507 if (!pci_priv)
4508 return;
4509
4510 pm_runtime_mark_last_busy(&pci_priv->pci_dev->dev);
4511 }
4512
cnss_auto_suspend(struct device * dev)4513 int cnss_auto_suspend(struct device *dev)
4514 {
4515 int ret = 0;
4516 struct pci_dev *pci_dev = to_pci_dev(dev);
4517 struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
4518 struct cnss_plat_data *plat_priv;
4519
4520 if (!pci_priv)
4521 return -ENODEV;
4522
4523 plat_priv = pci_priv->plat_priv;
4524 if (!plat_priv)
4525 return -ENODEV;
4526
4527 mutex_lock(&pci_priv->bus_lock);
4528 if (!pci_priv->qmi_send_usage_count) {
4529 ret = cnss_pci_suspend_bus(pci_priv);
4530 if (ret) {
4531 mutex_unlock(&pci_priv->bus_lock);
4532 return ret;
4533 }
4534 }
4535
4536 cnss_pci_set_auto_suspended(pci_priv, 1);
4537 mutex_unlock(&pci_priv->bus_lock);
4538
4539 cnss_pci_set_monitor_wake_intr(pci_priv, true);
4540
4541 /* For suspend temporarily set bandwidth vote to NONE and dont save in
4542 * current_bw_vote as in resume path we should vote for last used
4543 * bandwidth vote. Also ignore error if bw voting is not setup.
4544 */
4545 cnss_setup_bus_bandwidth(plat_priv, CNSS_BUS_WIDTH_NONE, false);
4546 return 0;
4547 }
4548 EXPORT_SYMBOL(cnss_auto_suspend);
4549
cnss_auto_resume(struct device * dev)4550 int cnss_auto_resume(struct device *dev)
4551 {
4552 int ret = 0;
4553 struct pci_dev *pci_dev = to_pci_dev(dev);
4554 struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
4555 struct cnss_plat_data *plat_priv;
4556
4557 if (!pci_priv)
4558 return -ENODEV;
4559
4560 plat_priv = pci_priv->plat_priv;
4561 if (!plat_priv)
4562 return -ENODEV;
4563
4564 mutex_lock(&pci_priv->bus_lock);
4565 ret = cnss_pci_resume_bus(pci_priv);
4566 if (ret) {
4567 mutex_unlock(&pci_priv->bus_lock);
4568 return ret;
4569 }
4570
4571 cnss_pci_set_auto_suspended(pci_priv, 0);
4572 mutex_unlock(&pci_priv->bus_lock);
4573
4574 cnss_request_bus_bandwidth(dev, plat_priv->icc.current_bw_vote);
4575 pci_priv->drv_connected_last = 0;
4576
4577 return 0;
4578 }
4579 EXPORT_SYMBOL(cnss_auto_resume);
4580
cnss_pci_force_wake_request_sync(struct device * dev,int timeout_us)4581 int cnss_pci_force_wake_request_sync(struct device *dev, int timeout_us)
4582 {
4583 struct pci_dev *pci_dev = to_pci_dev(dev);
4584 struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
4585 struct cnss_plat_data *plat_priv;
4586 struct mhi_controller *mhi_ctrl;
4587
4588 if (!pci_priv)
4589 return -ENODEV;
4590
4591 switch (pci_priv->device_id) {
4592 case QCA6390_DEVICE_ID:
4593 case QCA6490_DEVICE_ID:
4594 case KIWI_DEVICE_ID:
4595 case MANGO_DEVICE_ID:
4596 case PEACH_DEVICE_ID:
4597 break;
4598 default:
4599 return 0;
4600 }
4601
4602 mhi_ctrl = pci_priv->mhi_ctrl;
4603 if (!mhi_ctrl)
4604 return -EINVAL;
4605
4606 plat_priv = pci_priv->plat_priv;
4607 if (!plat_priv)
4608 return -ENODEV;
4609
4610 if (test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state))
4611 return -EAGAIN;
4612
4613 if (timeout_us) {
4614 /* Busy wait for timeout_us */
4615 return cnss_mhi_device_get_sync_atomic(pci_priv,
4616 timeout_us, false);
4617 } else {
4618 /* Sleep wait for mhi_ctrl->timeout_ms */
4619 return mhi_device_get_sync(mhi_ctrl->mhi_dev);
4620 }
4621 }
4622 EXPORT_SYMBOL(cnss_pci_force_wake_request_sync);
4623
cnss_pci_force_wake_request(struct device * dev)4624 int cnss_pci_force_wake_request(struct device *dev)
4625 {
4626 struct pci_dev *pci_dev = to_pci_dev(dev);
4627 struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
4628 struct cnss_plat_data *plat_priv;
4629 struct mhi_controller *mhi_ctrl;
4630
4631 if (!pci_priv)
4632 return -ENODEV;
4633
4634 switch (pci_priv->device_id) {
4635 case QCA6390_DEVICE_ID:
4636 case QCA6490_DEVICE_ID:
4637 case KIWI_DEVICE_ID:
4638 case MANGO_DEVICE_ID:
4639 case PEACH_DEVICE_ID:
4640 break;
4641 default:
4642 return 0;
4643 }
4644
4645 mhi_ctrl = pci_priv->mhi_ctrl;
4646 if (!mhi_ctrl)
4647 return -EINVAL;
4648
4649 plat_priv = pci_priv->plat_priv;
4650 if (!plat_priv)
4651 return -ENODEV;
4652
4653 if (test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state))
4654 return -EAGAIN;
4655
4656 mhi_device_get(mhi_ctrl->mhi_dev);
4657
4658 return 0;
4659 }
4660 EXPORT_SYMBOL(cnss_pci_force_wake_request);
4661
cnss_pci_is_device_awake(struct device * dev)4662 int cnss_pci_is_device_awake(struct device *dev)
4663 {
4664 struct pci_dev *pci_dev = to_pci_dev(dev);
4665 struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
4666 struct mhi_controller *mhi_ctrl;
4667
4668 if (!pci_priv)
4669 return -ENODEV;
4670
4671 switch (pci_priv->device_id) {
4672 case QCA6390_DEVICE_ID:
4673 case QCA6490_DEVICE_ID:
4674 case KIWI_DEVICE_ID:
4675 case MANGO_DEVICE_ID:
4676 case PEACH_DEVICE_ID:
4677 break;
4678 default:
4679 return 0;
4680 }
4681
4682 mhi_ctrl = pci_priv->mhi_ctrl;
4683 if (!mhi_ctrl)
4684 return -EINVAL;
4685
4686 return (mhi_ctrl->dev_state == MHI_STATE_M0);
4687 }
4688 EXPORT_SYMBOL(cnss_pci_is_device_awake);
4689
cnss_pci_force_wake_release(struct device * dev)4690 int cnss_pci_force_wake_release(struct device *dev)
4691 {
4692 struct pci_dev *pci_dev = to_pci_dev(dev);
4693 struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
4694 struct cnss_plat_data *plat_priv;
4695 struct mhi_controller *mhi_ctrl;
4696
4697 if (!pci_priv)
4698 return -ENODEV;
4699
4700 switch (pci_priv->device_id) {
4701 case QCA6390_DEVICE_ID:
4702 case QCA6490_DEVICE_ID:
4703 case KIWI_DEVICE_ID:
4704 case MANGO_DEVICE_ID:
4705 case PEACH_DEVICE_ID:
4706 break;
4707 default:
4708 return 0;
4709 }
4710
4711 mhi_ctrl = pci_priv->mhi_ctrl;
4712 if (!mhi_ctrl)
4713 return -EINVAL;
4714
4715 plat_priv = pci_priv->plat_priv;
4716 if (!plat_priv)
4717 return -ENODEV;
4718
4719 if (test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state))
4720 return -EAGAIN;
4721
4722 mhi_device_put(mhi_ctrl->mhi_dev);
4723
4724 return 0;
4725 }
4726 EXPORT_SYMBOL(cnss_pci_force_wake_release);
4727
cnss_pci_qmi_send_get(struct cnss_pci_data * pci_priv)4728 int cnss_pci_qmi_send_get(struct cnss_pci_data *pci_priv)
4729 {
4730 int ret = 0;
4731
4732 if (!pci_priv)
4733 return -ENODEV;
4734
4735 mutex_lock(&pci_priv->bus_lock);
4736 if (cnss_pci_get_auto_suspended(pci_priv) &&
4737 !pci_priv->qmi_send_usage_count)
4738 ret = cnss_pci_resume_bus(pci_priv);
4739 pci_priv->qmi_send_usage_count++;
4740 cnss_pr_buf("Increased QMI send usage count to %d\n",
4741 pci_priv->qmi_send_usage_count);
4742 mutex_unlock(&pci_priv->bus_lock);
4743
4744 return ret;
4745 }
4746
cnss_pci_qmi_send_put(struct cnss_pci_data * pci_priv)4747 int cnss_pci_qmi_send_put(struct cnss_pci_data *pci_priv)
4748 {
4749 int ret = 0;
4750
4751 if (!pci_priv)
4752 return -ENODEV;
4753
4754 mutex_lock(&pci_priv->bus_lock);
4755 if (pci_priv->qmi_send_usage_count)
4756 pci_priv->qmi_send_usage_count--;
4757 cnss_pr_buf("Decreased QMI send usage count to %d\n",
4758 pci_priv->qmi_send_usage_count);
4759 if (cnss_pci_get_auto_suspended(pci_priv) &&
4760 !pci_priv->qmi_send_usage_count &&
4761 !cnss_pcie_is_device_down(pci_priv))
4762 ret = cnss_pci_suspend_bus(pci_priv);
4763 mutex_unlock(&pci_priv->bus_lock);
4764
4765 return ret;
4766 }
4767
cnss_send_buffer_to_afcmem(struct device * dev,const uint8_t * afcdb,uint32_t len,uint8_t slotid)4768 int cnss_send_buffer_to_afcmem(struct device *dev, const uint8_t *afcdb,
4769 uint32_t len, uint8_t slotid)
4770 {
4771 struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
4772 struct cnss_fw_mem *fw_mem;
4773 void *mem = NULL;
4774 int i, ret;
4775 u32 *status;
4776
4777 if (!plat_priv)
4778 return -EINVAL;
4779
4780 fw_mem = plat_priv->fw_mem;
4781 if (slotid >= AFC_MAX_SLOT) {
4782 cnss_pr_err("Invalid slot id %d\n", slotid);
4783 ret = -EINVAL;
4784 goto err;
4785 }
4786 if (len > AFC_SLOT_SIZE) {
4787 cnss_pr_err("len %d greater than slot size", len);
4788 ret = -EINVAL;
4789 goto err;
4790 }
4791
4792 for (i = 0; i < plat_priv->fw_mem_seg_len; i++) {
4793 if (fw_mem[i].type == QMI_WLFW_AFC_MEM_V01) {
4794 mem = fw_mem[i].va;
4795 status = mem + (slotid * AFC_SLOT_SIZE);
4796 break;
4797 }
4798 }
4799
4800 if (!mem) {
4801 cnss_pr_err("AFC mem is not available\n");
4802 ret = -ENOMEM;
4803 goto err;
4804 }
4805
4806 memcpy(mem + (slotid * AFC_SLOT_SIZE), afcdb, len);
4807 if (len < AFC_SLOT_SIZE)
4808 memset(mem + (slotid * AFC_SLOT_SIZE) + len,
4809 0, AFC_SLOT_SIZE - len);
4810 status[AFC_AUTH_STATUS_OFFSET] = cpu_to_le32(AFC_AUTH_SUCCESS);
4811
4812 return 0;
4813 err:
4814 return ret;
4815 }
4816 EXPORT_SYMBOL(cnss_send_buffer_to_afcmem);
4817
cnss_reset_afcmem(struct device * dev,uint8_t slotid)4818 int cnss_reset_afcmem(struct device *dev, uint8_t slotid)
4819 {
4820 struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
4821 struct cnss_fw_mem *fw_mem;
4822 void *mem = NULL;
4823 int i, ret;
4824
4825 if (!plat_priv)
4826 return -EINVAL;
4827
4828 fw_mem = plat_priv->fw_mem;
4829 if (slotid >= AFC_MAX_SLOT) {
4830 cnss_pr_err("Invalid slot id %d\n", slotid);
4831 ret = -EINVAL;
4832 goto err;
4833 }
4834
4835 for (i = 0; i < plat_priv->fw_mem_seg_len; i++) {
4836 if (fw_mem[i].type == QMI_WLFW_AFC_MEM_V01) {
4837 mem = fw_mem[i].va;
4838 break;
4839 }
4840 }
4841
4842 if (!mem) {
4843 cnss_pr_err("AFC mem is not available\n");
4844 ret = -ENOMEM;
4845 goto err;
4846 }
4847
4848 memset(mem + (slotid * AFC_SLOT_SIZE), 0, AFC_SLOT_SIZE);
4849 return 0;
4850
4851 err:
4852 return ret;
4853 }
4854 EXPORT_SYMBOL(cnss_reset_afcmem);
4855
cnss_pci_alloc_fw_mem(struct cnss_pci_data * pci_priv)4856 int cnss_pci_alloc_fw_mem(struct cnss_pci_data *pci_priv)
4857 {
4858 struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
4859 struct cnss_fw_mem *fw_mem = plat_priv->fw_mem;
4860 struct device *dev = &pci_priv->pci_dev->dev;
4861 int i;
4862
4863 for (i = 0; i < plat_priv->fw_mem_seg_len; i++) {
4864 if (!fw_mem[i].va && fw_mem[i].size) {
4865 retry:
4866 fw_mem[i].va =
4867 dma_alloc_attrs(dev, fw_mem[i].size,
4868 &fw_mem[i].pa, GFP_KERNEL,
4869 fw_mem[i].attrs);
4870
4871 if (!fw_mem[i].va) {
4872 if ((fw_mem[i].attrs &
4873 DMA_ATTR_FORCE_CONTIGUOUS)) {
4874 fw_mem[i].attrs &=
4875 ~DMA_ATTR_FORCE_CONTIGUOUS;
4876
4877 cnss_pr_dbg("Fallback to non-contiguous memory for FW, Mem type: %u\n",
4878 fw_mem[i].type);
4879 goto retry;
4880 }
4881 cnss_pr_err("Failed to allocate memory for FW, size: 0x%zx, type: %u\n",
4882 fw_mem[i].size, fw_mem[i].type);
4883 CNSS_ASSERT(0);
4884 return -ENOMEM;
4885 }
4886 }
4887 }
4888
4889 return 0;
4890 }
4891
cnss_pci_free_fw_mem(struct cnss_pci_data * pci_priv)4892 static void cnss_pci_free_fw_mem(struct cnss_pci_data *pci_priv)
4893 {
4894 struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
4895 struct cnss_fw_mem *fw_mem = plat_priv->fw_mem;
4896 struct device *dev = &pci_priv->pci_dev->dev;
4897 int i;
4898
4899 for (i = 0; i < plat_priv->fw_mem_seg_len; i++) {
4900 if (fw_mem[i].va && fw_mem[i].size) {
4901 cnss_pr_dbg("Freeing memory for FW, va: 0x%pK, pa: %pa, size: 0x%zx, type: %u\n",
4902 fw_mem[i].va, &fw_mem[i].pa,
4903 fw_mem[i].size, fw_mem[i].type);
4904 dma_free_attrs(dev, fw_mem[i].size,
4905 fw_mem[i].va, fw_mem[i].pa,
4906 fw_mem[i].attrs);
4907 fw_mem[i].va = NULL;
4908 fw_mem[i].pa = 0;
4909 fw_mem[i].size = 0;
4910 fw_mem[i].type = 0;
4911 }
4912 }
4913
4914 plat_priv->fw_mem_seg_len = 0;
4915 }
4916
cnss_pci_alloc_qdss_mem(struct cnss_pci_data * pci_priv)4917 int cnss_pci_alloc_qdss_mem(struct cnss_pci_data *pci_priv)
4918 {
4919 struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
4920 struct cnss_fw_mem *qdss_mem = plat_priv->qdss_mem;
4921 int i, j;
4922
4923 for (i = 0; i < plat_priv->qdss_mem_seg_len; i++) {
4924 if (!qdss_mem[i].va && qdss_mem[i].size) {
4925 qdss_mem[i].va =
4926 dma_alloc_coherent(&pci_priv->pci_dev->dev,
4927 qdss_mem[i].size,
4928 &qdss_mem[i].pa,
4929 GFP_KERNEL);
4930 if (!qdss_mem[i].va) {
4931 cnss_pr_err("Failed to allocate QDSS memory for FW, size: 0x%zx, type: %u, chuck-ID: %d\n",
4932 qdss_mem[i].size,
4933 qdss_mem[i].type, i);
4934 break;
4935 }
4936 }
4937 }
4938
4939 /* Best-effort allocation for QDSS trace */
4940 if (i < plat_priv->qdss_mem_seg_len) {
4941 for (j = i; j < plat_priv->qdss_mem_seg_len; j++) {
4942 qdss_mem[j].type = 0;
4943 qdss_mem[j].size = 0;
4944 }
4945 plat_priv->qdss_mem_seg_len = i;
4946 }
4947
4948 return 0;
4949 }
4950
cnss_pci_free_qdss_mem(struct cnss_pci_data * pci_priv)4951 void cnss_pci_free_qdss_mem(struct cnss_pci_data *pci_priv)
4952 {
4953 struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
4954 struct cnss_fw_mem *qdss_mem = plat_priv->qdss_mem;
4955 int i;
4956
4957 for (i = 0; i < plat_priv->qdss_mem_seg_len; i++) {
4958 if (qdss_mem[i].va && qdss_mem[i].size) {
4959 cnss_pr_dbg("Freeing memory for QDSS: pa: %pa, size: 0x%zx, type: %u\n",
4960 &qdss_mem[i].pa, qdss_mem[i].size,
4961 qdss_mem[i].type);
4962 dma_free_coherent(&pci_priv->pci_dev->dev,
4963 qdss_mem[i].size, qdss_mem[i].va,
4964 qdss_mem[i].pa);
4965 qdss_mem[i].va = NULL;
4966 qdss_mem[i].pa = 0;
4967 qdss_mem[i].size = 0;
4968 qdss_mem[i].type = 0;
4969 }
4970 }
4971 plat_priv->qdss_mem_seg_len = 0;
4972 }
4973
cnss_pci_load_tme_patch(struct cnss_pci_data * pci_priv)4974 int cnss_pci_load_tme_patch(struct cnss_pci_data *pci_priv)
4975 {
4976 struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
4977 struct cnss_fw_mem *tme_lite_mem = &plat_priv->tme_lite_mem;
4978 char filename[MAX_FIRMWARE_NAME_LEN];
4979 char *tme_patch_filename = NULL;
4980 const struct firmware *fw_entry;
4981 int ret = 0;
4982
4983 switch (pci_priv->device_id) {
4984 case PEACH_DEVICE_ID:
4985 if (plat_priv->device_version.major_version == FW_V1_NUMBER)
4986 tme_patch_filename = TME_PATCH_FILE_NAME_1_0;
4987 else if (plat_priv->device_version.major_version == FW_V2_NUMBER)
4988 tme_patch_filename = TME_PATCH_FILE_NAME_2_0;
4989 break;
4990 case QCA6174_DEVICE_ID:
4991 case QCA6290_DEVICE_ID:
4992 case QCA6390_DEVICE_ID:
4993 case QCA6490_DEVICE_ID:
4994 case KIWI_DEVICE_ID:
4995 case MANGO_DEVICE_ID:
4996 default:
4997 cnss_pr_dbg("TME-L not supported for device ID: (0x%x)\n",
4998 pci_priv->device_id);
4999 return 0;
5000 }
5001
5002 if (!tme_lite_mem->va && !tme_lite_mem->size) {
5003 scnprintf(filename, MAX_FIRMWARE_NAME_LEN, "%s", tme_patch_filename);
5004
5005 ret = firmware_request_nowarn(&fw_entry, filename,
5006 &pci_priv->pci_dev->dev);
5007 if (ret) {
5008 cnss_pr_err("Failed to load TME-L patch: %s, ret: %d\n",
5009 filename, ret);
5010 return ret;
5011 }
5012
5013 tme_lite_mem->va = dma_alloc_coherent(&pci_priv->pci_dev->dev,
5014 fw_entry->size, &tme_lite_mem->pa,
5015 GFP_KERNEL);
5016 if (!tme_lite_mem->va) {
5017 cnss_pr_err("Failed to allocate memory for M3, size: 0x%zx\n",
5018 fw_entry->size);
5019 release_firmware(fw_entry);
5020 return -ENOMEM;
5021 }
5022
5023 memcpy(tme_lite_mem->va, fw_entry->data, fw_entry->size);
5024 tme_lite_mem->size = fw_entry->size;
5025 release_firmware(fw_entry);
5026 }
5027
5028 return 0;
5029 }
5030
cnss_pci_free_tme_lite_mem(struct cnss_pci_data * pci_priv)5031 static void cnss_pci_free_tme_lite_mem(struct cnss_pci_data *pci_priv)
5032 {
5033 struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
5034 struct cnss_fw_mem *tme_lite_mem = &plat_priv->tme_lite_mem;
5035
5036 if (tme_lite_mem->va && tme_lite_mem->size) {
5037 cnss_pr_dbg("Freeing memory for TME patch, va: 0x%pK, pa: %pa, size: 0x%zx\n",
5038 tme_lite_mem->va, &tme_lite_mem->pa, tme_lite_mem->size);
5039 dma_free_coherent(&pci_priv->pci_dev->dev, tme_lite_mem->size,
5040 tme_lite_mem->va, tme_lite_mem->pa);
5041 }
5042
5043 tme_lite_mem->va = NULL;
5044 tme_lite_mem->pa = 0;
5045 tme_lite_mem->size = 0;
5046 }
5047
cnss_pci_load_tme_opt_file(struct cnss_pci_data * pci_priv,enum wlfw_tme_lite_file_type_v01 file)5048 int cnss_pci_load_tme_opt_file(struct cnss_pci_data *pci_priv,
5049 enum wlfw_tme_lite_file_type_v01 file)
5050 {
5051 struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
5052 struct cnss_fw_mem *tme_lite_mem = NULL;
5053 char filename[MAX_FIRMWARE_NAME_LEN];
5054 char *tme_opt_filename = NULL;
5055 const struct firmware *fw_entry;
5056 int ret = 0;
5057
5058 switch (pci_priv->device_id) {
5059 case PEACH_DEVICE_ID:
5060 if (file == WLFW_TME_LITE_OEM_FUSE_FILE_V01) {
5061 tme_opt_filename = TME_OEM_FUSE_FILE_NAME;
5062 tme_lite_mem = &plat_priv->tme_opt_file_mem[0];
5063 } else if (file == WLFW_TME_LITE_RPR_FILE_V01) {
5064 tme_opt_filename = TME_RPR_FILE_NAME;
5065 tme_lite_mem = &plat_priv->tme_opt_file_mem[1];
5066 } else if (file == WLFW_TME_LITE_DPR_FILE_V01) {
5067 tme_opt_filename = TME_DPR_FILE_NAME;
5068 tme_lite_mem = &plat_priv->tme_opt_file_mem[2];
5069 }
5070 break;
5071 case QCA6174_DEVICE_ID:
5072 case QCA6290_DEVICE_ID:
5073 case QCA6390_DEVICE_ID:
5074 case QCA6490_DEVICE_ID:
5075 case KIWI_DEVICE_ID:
5076 case MANGO_DEVICE_ID:
5077 default:
5078 cnss_pr_dbg("TME-L opt file: %s not supported for device ID: (0x%x)\n",
5079 tme_opt_filename, pci_priv->device_id);
5080 return 0;
5081 }
5082
5083 if (!tme_lite_mem)
5084 return 0;
5085
5086 if (!tme_lite_mem->va && !tme_lite_mem->size) {
5087 cnss_pci_add_fw_prefix_name(pci_priv, filename,
5088 tme_opt_filename);
5089
5090 ret = firmware_request_nowarn(&fw_entry, filename,
5091 &pci_priv->pci_dev->dev);
5092 if (ret) {
5093 cnss_pr_err("Failed to load TME-L opt file: %s, ret: %d\n",
5094 filename, ret);
5095 return ret;
5096 }
5097
5098 tme_lite_mem->va = dma_alloc_coherent(&pci_priv->pci_dev->dev,
5099 fw_entry->size, &tme_lite_mem->pa,
5100 GFP_KERNEL);
5101 if (!tme_lite_mem->va) {
5102 cnss_pr_err("Failed to allocate memory for TME-L opt file %s,size: 0x%zx\n",
5103 filename, fw_entry->size);
5104 release_firmware(fw_entry);
5105 return -ENOMEM;
5106 }
5107
5108 memcpy(tme_lite_mem->va, fw_entry->data, fw_entry->size);
5109 tme_lite_mem->size = fw_entry->size;
5110 release_firmware(fw_entry);
5111 }
5112
5113 return 0;
5114 }
5115
cnss_pci_free_tme_opt_file_mem(struct cnss_pci_data * pci_priv)5116 static void cnss_pci_free_tme_opt_file_mem(struct cnss_pci_data *pci_priv)
5117 {
5118 struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
5119 struct cnss_fw_mem *tme_opt_file_mem = plat_priv->tme_opt_file_mem;
5120 int i = 0;
5121
5122 for (i = 0; i < QMI_WLFW_MAX_TME_OPT_FILE_NUM; i++) {
5123 if (tme_opt_file_mem[i].va && tme_opt_file_mem[i].size) {
5124 cnss_pr_dbg("Free memory for TME opt file,va:0x%pK, pa:%pa, size:0x%zx\n",
5125 tme_opt_file_mem[i].va, &tme_opt_file_mem[i].pa,
5126 tme_opt_file_mem[i].size);
5127 dma_free_coherent(&pci_priv->pci_dev->dev, tme_opt_file_mem[i].size,
5128 tme_opt_file_mem[i].va, tme_opt_file_mem[i].pa);
5129 }
5130 tme_opt_file_mem[i].va = NULL;
5131 tme_opt_file_mem[i].pa = 0;
5132 tme_opt_file_mem[i].size = 0;
5133 }
5134 }
5135
cnss_pci_load_m3(struct cnss_pci_data * pci_priv)5136 int cnss_pci_load_m3(struct cnss_pci_data *pci_priv)
5137 {
5138 struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
5139 struct cnss_fw_mem *m3_mem = &plat_priv->m3_mem;
5140 char filename[MAX_FIRMWARE_NAME_LEN];
5141 char *phy_filename = DEFAULT_PHY_UCODE_FILE_NAME;
5142 const struct firmware *fw_entry;
5143 int ret = 0;
5144
5145 /* Use forward compatibility here since for any recent device
5146 * it should use DEFAULT_PHY_UCODE_FILE_NAME.
5147 */
5148 switch (pci_priv->device_id) {
5149 case QCA6174_DEVICE_ID:
5150 cnss_pr_err("Invalid device ID (0x%x) to load phy image\n",
5151 pci_priv->device_id);
5152 return -EINVAL;
5153 case QCA6290_DEVICE_ID:
5154 case QCA6390_DEVICE_ID:
5155 case QCA6490_DEVICE_ID:
5156 phy_filename = DEFAULT_PHY_M3_FILE_NAME;
5157 break;
5158 case KIWI_DEVICE_ID:
5159 case MANGO_DEVICE_ID:
5160 case PEACH_DEVICE_ID:
5161 switch (plat_priv->device_version.major_version) {
5162 case FW_V2_NUMBER:
5163 phy_filename = PHY_UCODE_V2_FILE_NAME;
5164 break;
5165 default:
5166 break;
5167 }
5168 break;
5169 default:
5170 break;
5171 }
5172
5173 if (!m3_mem->va && !m3_mem->size) {
5174 cnss_pci_add_fw_prefix_name(pci_priv, filename,
5175 phy_filename);
5176
5177 ret = firmware_request_nowarn(&fw_entry, filename,
5178 &pci_priv->pci_dev->dev);
5179 if (ret) {
5180 cnss_pr_err("Failed to load M3 image: %s\n", filename);
5181 return ret;
5182 }
5183
5184 m3_mem->va = dma_alloc_coherent(&pci_priv->pci_dev->dev,
5185 fw_entry->size, &m3_mem->pa,
5186 GFP_KERNEL);
5187 if (!m3_mem->va) {
5188 cnss_pr_err("Failed to allocate memory for M3, size: 0x%zx\n",
5189 fw_entry->size);
5190 release_firmware(fw_entry);
5191 return -ENOMEM;
5192 }
5193
5194 memcpy(m3_mem->va, fw_entry->data, fw_entry->size);
5195 m3_mem->size = fw_entry->size;
5196 release_firmware(fw_entry);
5197 }
5198
5199 return 0;
5200 }
5201
cnss_pci_free_m3_mem(struct cnss_pci_data * pci_priv)5202 static void cnss_pci_free_m3_mem(struct cnss_pci_data *pci_priv)
5203 {
5204 struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
5205 struct cnss_fw_mem *m3_mem = &plat_priv->m3_mem;
5206
5207 if (m3_mem->va && m3_mem->size) {
5208 cnss_pr_dbg("Freeing memory for M3, va: 0x%pK, pa: %pa, size: 0x%zx\n",
5209 m3_mem->va, &m3_mem->pa, m3_mem->size);
5210 dma_free_coherent(&pci_priv->pci_dev->dev, m3_mem->size,
5211 m3_mem->va, m3_mem->pa);
5212 }
5213
5214 m3_mem->va = NULL;
5215 m3_mem->pa = 0;
5216 m3_mem->size = 0;
5217 }
5218
5219 #ifdef CONFIG_FREE_M3_BLOB_MEM
cnss_pci_free_blob_mem(struct cnss_pci_data * pci_priv)5220 void cnss_pci_free_blob_mem(struct cnss_pci_data *pci_priv)
5221 {
5222 cnss_pci_free_m3_mem(pci_priv);
5223 }
5224 #else
cnss_pci_free_blob_mem(struct cnss_pci_data * pci_priv)5225 void cnss_pci_free_blob_mem(struct cnss_pci_data *pci_priv)
5226 {
5227 }
5228 #endif
5229
cnss_pci_load_aux(struct cnss_pci_data * pci_priv)5230 int cnss_pci_load_aux(struct cnss_pci_data *pci_priv)
5231 {
5232 struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
5233 struct cnss_fw_mem *aux_mem = &plat_priv->aux_mem;
5234 char filename[MAX_FIRMWARE_NAME_LEN];
5235 char *aux_filename = DEFAULT_AUX_FILE_NAME;
5236 const struct firmware *fw_entry;
5237 int ret = 0;
5238
5239 if (!aux_mem->va && !aux_mem->size) {
5240 cnss_pci_add_fw_prefix_name(pci_priv, filename,
5241 aux_filename);
5242
5243 ret = firmware_request_nowarn(&fw_entry, filename,
5244 &pci_priv->pci_dev->dev);
5245 if (ret) {
5246 cnss_pr_err("Failed to load AUX image: %s\n", filename);
5247 return ret;
5248 }
5249
5250 aux_mem->va = dma_alloc_coherent(&pci_priv->pci_dev->dev,
5251 fw_entry->size, &aux_mem->pa,
5252 GFP_KERNEL);
5253 if (!aux_mem->va) {
5254 cnss_pr_err("Failed to allocate memory for AUX, size: 0x%zx\n",
5255 fw_entry->size);
5256 release_firmware(fw_entry);
5257 return -ENOMEM;
5258 }
5259
5260 memcpy(aux_mem->va, fw_entry->data, fw_entry->size);
5261 aux_mem->size = fw_entry->size;
5262 release_firmware(fw_entry);
5263 }
5264
5265 return 0;
5266 }
5267
cnss_pci_free_aux_mem(struct cnss_pci_data * pci_priv)5268 static void cnss_pci_free_aux_mem(struct cnss_pci_data *pci_priv)
5269 {
5270 struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
5271 struct cnss_fw_mem *aux_mem = &plat_priv->aux_mem;
5272
5273 if (aux_mem->va && aux_mem->size) {
5274 cnss_pr_dbg("Freeing memory for AUX, va: 0x%pK, pa: %pa, size: 0x%zx\n",
5275 aux_mem->va, &aux_mem->pa, aux_mem->size);
5276 dma_free_coherent(&pci_priv->pci_dev->dev, aux_mem->size,
5277 aux_mem->va, aux_mem->pa);
5278 }
5279
5280 aux_mem->va = NULL;
5281 aux_mem->pa = 0;
5282 aux_mem->size = 0;
5283 }
5284
cnss_pci_fw_boot_timeout_hdlr(struct cnss_pci_data * pci_priv)5285 void cnss_pci_fw_boot_timeout_hdlr(struct cnss_pci_data *pci_priv)
5286 {
5287 struct cnss_plat_data *plat_priv;
5288
5289 if (!pci_priv)
5290 return;
5291
5292 cnss_fatal_err("Timeout waiting for FW ready indication\n");
5293
5294 plat_priv = pci_priv->plat_priv;
5295 if (!plat_priv)
5296 return;
5297
5298 if (test_bit(CNSS_IN_COLD_BOOT_CAL, &plat_priv->driver_state)) {
5299 cnss_pr_dbg("Ignore FW ready timeout for calibration mode\n");
5300 return;
5301 }
5302
5303 cnss_schedule_recovery(&pci_priv->pci_dev->dev,
5304 CNSS_REASON_TIMEOUT);
5305 }
5306
cnss_pci_deinit_smmu(struct cnss_pci_data * pci_priv)5307 static void cnss_pci_deinit_smmu(struct cnss_pci_data *pci_priv)
5308 {
5309 pci_priv->iommu_domain = NULL;
5310 }
5311
cnss_pci_get_iova(struct cnss_pci_data * pci_priv,u64 * addr,u64 * size)5312 int cnss_pci_get_iova(struct cnss_pci_data *pci_priv, u64 *addr, u64 *size)
5313 {
5314 if (!pci_priv)
5315 return -ENODEV;
5316
5317 if (!pci_priv->smmu_iova_len)
5318 return -EINVAL;
5319
5320 *addr = pci_priv->smmu_iova_start;
5321 *size = pci_priv->smmu_iova_len;
5322
5323 return 0;
5324 }
5325
cnss_pci_get_iova_ipa(struct cnss_pci_data * pci_priv,u64 * addr,u64 * size)5326 int cnss_pci_get_iova_ipa(struct cnss_pci_data *pci_priv, u64 *addr, u64 *size)
5327 {
5328 if (!pci_priv)
5329 return -ENODEV;
5330
5331 if (!pci_priv->smmu_iova_ipa_len)
5332 return -EINVAL;
5333
5334 *addr = pci_priv->smmu_iova_ipa_start;
5335 *size = pci_priv->smmu_iova_ipa_len;
5336
5337 return 0;
5338 }
5339
cnss_pci_is_smmu_s1_enabled(struct cnss_pci_data * pci_priv)5340 bool cnss_pci_is_smmu_s1_enabled(struct cnss_pci_data *pci_priv)
5341 {
5342 if (pci_priv)
5343 return pci_priv->smmu_s1_enable;
5344
5345 return false;
5346 }
cnss_smmu_get_domain(struct device * dev)5347 struct iommu_domain *cnss_smmu_get_domain(struct device *dev)
5348 {
5349 struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
5350
5351 if (!pci_priv)
5352 return NULL;
5353
5354 return pci_priv->iommu_domain;
5355 }
5356 EXPORT_SYMBOL(cnss_smmu_get_domain);
5357
cnss_smmu_map(struct device * dev,phys_addr_t paddr,uint32_t * iova_addr,size_t size)5358 int cnss_smmu_map(struct device *dev,
5359 phys_addr_t paddr, uint32_t *iova_addr, size_t size)
5360 {
5361 struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
5362 struct cnss_plat_data *plat_priv;
5363 unsigned long iova;
5364 size_t len;
5365 int ret = 0;
5366 int flag = IOMMU_READ | IOMMU_WRITE;
5367 struct pci_dev *root_port;
5368 struct device_node *root_of_node;
5369 bool dma_coherent = false;
5370
5371 if (!pci_priv)
5372 return -ENODEV;
5373
5374 if (!iova_addr) {
5375 cnss_pr_err("iova_addr is NULL, paddr %pa, size %zu\n",
5376 &paddr, size);
5377 return -EINVAL;
5378 }
5379
5380 plat_priv = pci_priv->plat_priv;
5381
5382 len = roundup(size + paddr - rounddown(paddr, PAGE_SIZE), PAGE_SIZE);
5383 iova = roundup(pci_priv->smmu_iova_ipa_current, PAGE_SIZE);
5384
5385 if (pci_priv->iommu_geometry &&
5386 iova >= pci_priv->smmu_iova_ipa_start +
5387 pci_priv->smmu_iova_ipa_len) {
5388 cnss_pr_err("No IOVA space to map, iova %lx, smmu_iova_ipa_start %pad, smmu_iova_ipa_len %zu\n",
5389 iova,
5390 &pci_priv->smmu_iova_ipa_start,
5391 pci_priv->smmu_iova_ipa_len);
5392 return -ENOMEM;
5393 }
5394
5395 if (!test_bit(DISABLE_IO_COHERENCY,
5396 &plat_priv->ctrl_params.quirks)) {
5397 root_port = pcie_find_root_port(pci_priv->pci_dev);
5398 if (!root_port) {
5399 cnss_pr_err("Root port is null, so dma_coherent is disabled\n");
5400 } else {
5401 root_of_node = root_port->dev.of_node;
5402 if (root_of_node && root_of_node->parent) {
5403 dma_coherent =
5404 of_property_read_bool(root_of_node->parent,
5405 "dma-coherent");
5406 cnss_pr_dbg("dma-coherent is %s\n",
5407 dma_coherent ? "enabled" : "disabled");
5408 if (dma_coherent)
5409 flag |= IOMMU_CACHE;
5410 }
5411 }
5412 }
5413
5414 cnss_pr_dbg("IOMMU map: iova %lx, len %zu\n", iova, len);
5415
5416 ret = cnss_iommu_map(pci_priv->iommu_domain, iova,
5417 rounddown(paddr, PAGE_SIZE), len, flag);
5418 if (ret) {
5419 cnss_pr_err("PA to IOVA mapping failed, ret %d\n", ret);
5420 return ret;
5421 }
5422
5423 pci_priv->smmu_iova_ipa_current = iova + len;
5424 *iova_addr = (uint32_t)(iova + paddr - rounddown(paddr, PAGE_SIZE));
5425 cnss_pr_dbg("IOMMU map: iova_addr %lx\n", *iova_addr);
5426
5427 return 0;
5428 }
5429 EXPORT_SYMBOL(cnss_smmu_map);
5430
cnss_smmu_unmap(struct device * dev,uint32_t iova_addr,size_t size)5431 int cnss_smmu_unmap(struct device *dev, uint32_t iova_addr, size_t size)
5432 {
5433 struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
5434 unsigned long iova;
5435 size_t unmapped;
5436 size_t len;
5437
5438 if (!pci_priv)
5439 return -ENODEV;
5440
5441 iova = rounddown(iova_addr, PAGE_SIZE);
5442 len = roundup(size + iova_addr - iova, PAGE_SIZE);
5443
5444 if (iova >= pci_priv->smmu_iova_ipa_start +
5445 pci_priv->smmu_iova_ipa_len) {
5446 cnss_pr_err("Out of IOVA space to unmap, iova %lx, smmu_iova_ipa_start %pad, smmu_iova_ipa_len %zu\n",
5447 iova,
5448 &pci_priv->smmu_iova_ipa_start,
5449 pci_priv->smmu_iova_ipa_len);
5450 return -ENOMEM;
5451 }
5452
5453 cnss_pr_dbg("IOMMU unmap: iova %lx, len %zu\n", iova, len);
5454
5455 unmapped = iommu_unmap(pci_priv->iommu_domain, iova, len);
5456 if (unmapped != len) {
5457 cnss_pr_err("IOMMU unmap failed, unmapped = %zu, requested = %zu\n",
5458 unmapped, len);
5459 return -EINVAL;
5460 }
5461
5462 pci_priv->smmu_iova_ipa_current = iova;
5463 return 0;
5464 }
5465 EXPORT_SYMBOL(cnss_smmu_unmap);
5466
cnss_get_soc_info(struct device * dev,struct cnss_soc_info * info)5467 int cnss_get_soc_info(struct device *dev, struct cnss_soc_info *info)
5468 {
5469 struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
5470 struct cnss_plat_data *plat_priv;
5471
5472 if (!pci_priv)
5473 return -ENODEV;
5474
5475 plat_priv = pci_priv->plat_priv;
5476 if (!plat_priv)
5477 return -ENODEV;
5478
5479 info->va = pci_priv->bar;
5480 info->pa = pci_resource_start(pci_priv->pci_dev, PCI_BAR_NUM);
5481 info->chip_id = plat_priv->chip_info.chip_id;
5482 info->chip_family = plat_priv->chip_info.chip_family;
5483 info->board_id = plat_priv->board_info.board_id;
5484 info->soc_id = plat_priv->soc_info.soc_id;
5485 info->fw_version = plat_priv->fw_version_info.fw_version;
5486 strlcpy(info->fw_build_timestamp,
5487 plat_priv->fw_version_info.fw_build_timestamp,
5488 sizeof(info->fw_build_timestamp));
5489 memcpy(&info->device_version, &plat_priv->device_version,
5490 sizeof(info->device_version));
5491 memcpy(&info->dev_mem_info, &plat_priv->dev_mem_info,
5492 sizeof(info->dev_mem_info));
5493 memcpy(&info->fw_build_id, &plat_priv->fw_build_id,
5494 sizeof(info->fw_build_id));
5495
5496 return 0;
5497 }
5498 EXPORT_SYMBOL(cnss_get_soc_info);
5499
cnss_pci_get_user_msi_assignment(struct cnss_pci_data * pci_priv,char * user_name,int * num_vectors,u32 * user_base_data,u32 * base_vector)5500 int cnss_pci_get_user_msi_assignment(struct cnss_pci_data *pci_priv,
5501 char *user_name,
5502 int *num_vectors,
5503 u32 *user_base_data,
5504 u32 *base_vector)
5505 {
5506 return cnss_get_user_msi_assignment(&pci_priv->pci_dev->dev,
5507 user_name,
5508 num_vectors,
5509 user_base_data,
5510 base_vector);
5511 }
5512
cnss_pci_irq_set_affinity_hint(struct cnss_pci_data * pci_priv,unsigned int vec,const struct cpumask * cpumask)5513 static int cnss_pci_irq_set_affinity_hint(struct cnss_pci_data *pci_priv,
5514 unsigned int vec,
5515 const struct cpumask *cpumask)
5516 {
5517 int ret;
5518 struct pci_dev *pci_dev = pci_priv->pci_dev;
5519
5520 ret = irq_set_affinity_hint(pci_irq_vector(pci_dev, vec),
5521 cpumask);
5522
5523 return ret;
5524 }
5525
cnss_pci_enable_msi(struct cnss_pci_data * pci_priv)5526 static int cnss_pci_enable_msi(struct cnss_pci_data *pci_priv)
5527 {
5528 int ret = 0;
5529 struct pci_dev *pci_dev = pci_priv->pci_dev;
5530 int num_vectors;
5531 struct cnss_msi_config *msi_config;
5532
5533 if (pci_priv->device_id == QCA6174_DEVICE_ID)
5534 return 0;
5535
5536 if (cnss_pci_is_force_one_msi(pci_priv)) {
5537 ret = cnss_pci_get_one_msi_assignment(pci_priv);
5538 cnss_pr_dbg("force one msi\n");
5539 } else {
5540 ret = cnss_pci_get_msi_assignment(pci_priv);
5541 }
5542 if (ret) {
5543 cnss_pr_err("Failed to get MSI assignment, err = %d\n", ret);
5544 goto out;
5545 }
5546
5547 msi_config = pci_priv->msi_config;
5548 if (!msi_config) {
5549 cnss_pr_err("msi_config is NULL!\n");
5550 ret = -EINVAL;
5551 goto out;
5552 }
5553
5554 num_vectors = pci_alloc_irq_vectors(pci_dev,
5555 msi_config->total_vectors,
5556 msi_config->total_vectors,
5557 PCI_IRQ_MSI | PCI_IRQ_MSIX);
5558 if ((num_vectors != msi_config->total_vectors) &&
5559 !cnss_pci_fallback_one_msi(pci_priv, &num_vectors)) {
5560 cnss_pr_err("Failed to get enough MSI vectors (%d), available vectors = %d",
5561 msi_config->total_vectors, num_vectors);
5562 if (num_vectors >= 0)
5563 ret = -EINVAL;
5564 goto reset_msi_config;
5565 }
5566
5567 /* With VT-d disabled on x86 platform, only one pci irq vector is
5568 * allocated. Once suspend the irq may be migrated to CPU0 if it was
5569 * affine to other CPU with one new msi vector re-allocated.
5570 * The observation cause the issue about no irq handler for vector
5571 * once resume.
5572 * The fix is to set irq vector affinity to CPU0 before calling
5573 * request_irq to avoid the irq migration.
5574 */
5575 if (cnss_pci_is_one_msi(pci_priv)) {
5576 ret = cnss_pci_irq_set_affinity_hint(pci_priv,
5577 0,
5578 cpumask_of(0));
5579 if (ret) {
5580 cnss_pr_err("Failed to affinize irq vector to CPU0\n");
5581 goto free_msi_vector;
5582 }
5583 }
5584
5585 if (cnss_pci_config_msi_addr(pci_priv)) {
5586 ret = -EINVAL;
5587 goto free_msi_vector;
5588 }
5589
5590 if (cnss_pci_config_msi_data(pci_priv)) {
5591 ret = -EINVAL;
5592 goto free_msi_vector;
5593 }
5594
5595 return 0;
5596
5597 free_msi_vector:
5598 if (cnss_pci_is_one_msi(pci_priv))
5599 cnss_pci_irq_set_affinity_hint(pci_priv, 0, NULL);
5600 pci_free_irq_vectors(pci_priv->pci_dev);
5601 reset_msi_config:
5602 pci_priv->msi_config = NULL;
5603 out:
5604 return ret;
5605 }
5606
cnss_pci_disable_msi(struct cnss_pci_data * pci_priv)5607 static void cnss_pci_disable_msi(struct cnss_pci_data *pci_priv)
5608 {
5609 if (pci_priv->device_id == QCA6174_DEVICE_ID)
5610 return;
5611
5612 if (cnss_pci_is_one_msi(pci_priv))
5613 cnss_pci_irq_set_affinity_hint(pci_priv, 0, NULL);
5614
5615 pci_free_irq_vectors(pci_priv->pci_dev);
5616 }
5617
cnss_get_user_msi_assignment(struct device * dev,char * user_name,int * num_vectors,u32 * user_base_data,u32 * base_vector)5618 int cnss_get_user_msi_assignment(struct device *dev, char *user_name,
5619 int *num_vectors, u32 *user_base_data,
5620 u32 *base_vector)
5621 {
5622 struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
5623 struct cnss_msi_config *msi_config;
5624 int idx;
5625
5626 if (!pci_priv)
5627 return -ENODEV;
5628
5629 msi_config = pci_priv->msi_config;
5630 if (!msi_config) {
5631 cnss_pr_err("MSI is not supported.\n");
5632 return -EINVAL;
5633 }
5634
5635 for (idx = 0; idx < msi_config->total_users; idx++) {
5636 if (strcmp(user_name, msi_config->users[idx].name) == 0) {
5637 *num_vectors = msi_config->users[idx].num_vectors;
5638 *user_base_data = msi_config->users[idx].base_vector
5639 + pci_priv->msi_ep_base_data;
5640 *base_vector = msi_config->users[idx].base_vector;
5641 /*Add only single print for each user*/
5642 if (print_optimize.msi_log_chk[idx]++)
5643 goto skip_print;
5644
5645 cnss_pr_dbg("Assign MSI to user: %s, num_vectors: %d, user_base_data: %u, base_vector: %u\n",
5646 user_name, *num_vectors, *user_base_data,
5647 *base_vector);
5648 skip_print:
5649 return 0;
5650 }
5651 }
5652
5653 cnss_pr_err("Failed to find MSI assignment for %s!\n", user_name);
5654
5655 return -EINVAL;
5656 }
5657 EXPORT_SYMBOL(cnss_get_user_msi_assignment);
5658
cnss_get_msi_irq(struct device * dev,unsigned int vector)5659 int cnss_get_msi_irq(struct device *dev, unsigned int vector)
5660 {
5661 struct pci_dev *pci_dev = to_pci_dev(dev);
5662 int irq_num;
5663
5664 irq_num = pci_irq_vector(pci_dev, vector);
5665 cnss_pr_dbg("Get IRQ number %d for vector index %d\n", irq_num, vector);
5666
5667 return irq_num;
5668 }
5669 EXPORT_SYMBOL(cnss_get_msi_irq);
5670
cnss_is_one_msi(struct device * dev)5671 bool cnss_is_one_msi(struct device *dev)
5672 {
5673 struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
5674
5675 if (!pci_priv)
5676 return false;
5677
5678 return cnss_pci_is_one_msi(pci_priv);
5679 }
5680 EXPORT_SYMBOL(cnss_is_one_msi);
5681
cnss_get_msi_address(struct device * dev,u32 * msi_addr_low,u32 * msi_addr_high)5682 void cnss_get_msi_address(struct device *dev, u32 *msi_addr_low,
5683 u32 *msi_addr_high)
5684 {
5685 struct pci_dev *pci_dev = to_pci_dev(dev);
5686 struct cnss_pci_data *pci_priv;
5687 u16 control;
5688
5689 if (!pci_dev)
5690 return;
5691
5692 pci_priv = cnss_get_pci_priv(pci_dev);
5693 if (!pci_priv)
5694 return;
5695
5696 if (pci_dev->msix_enabled) {
5697 *msi_addr_low = pci_priv->msix_addr;
5698 *msi_addr_high = 0;
5699 if (!print_optimize.msi_addr_chk++)
5700 cnss_pr_dbg("Get MSI low addr = 0x%x, high addr = 0x%x\n",
5701 *msi_addr_low, *msi_addr_high);
5702 return;
5703 }
5704
5705 pci_read_config_word(pci_dev, pci_dev->msi_cap + PCI_MSI_FLAGS,
5706 &control);
5707 pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_LO,
5708 msi_addr_low);
5709 /* Return MSI high address only when device supports 64-bit MSI */
5710 if (control & PCI_MSI_FLAGS_64BIT)
5711 pci_read_config_dword(pci_dev,
5712 pci_dev->msi_cap + PCI_MSI_ADDRESS_HI,
5713 msi_addr_high);
5714 else
5715 *msi_addr_high = 0;
5716 /*Add only single print as the address is constant*/
5717 if (!print_optimize.msi_addr_chk++)
5718 cnss_pr_dbg("Get MSI low addr = 0x%x, high addr = 0x%x\n",
5719 *msi_addr_low, *msi_addr_high);
5720 }
5721 EXPORT_SYMBOL(cnss_get_msi_address);
5722
cnss_pci_get_wake_msi(struct cnss_pci_data * pci_priv)5723 u32 cnss_pci_get_wake_msi(struct cnss_pci_data *pci_priv)
5724 {
5725 int ret, num_vectors;
5726 u32 user_base_data, base_vector;
5727
5728 if (!pci_priv)
5729 return -ENODEV;
5730
5731 ret = cnss_get_user_msi_assignment(&pci_priv->pci_dev->dev,
5732 WAKE_MSI_NAME, &num_vectors,
5733 &user_base_data, &base_vector);
5734 if (ret) {
5735 cnss_pr_err("WAKE MSI is not valid\n");
5736 return 0;
5737 }
5738
5739 return user_base_data;
5740 }
5741
5742 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0))
cnss_pci_set_dma_mask(struct pci_dev * pci_dev,u64 mask)5743 static inline int cnss_pci_set_dma_mask(struct pci_dev *pci_dev, u64 mask)
5744 {
5745 return dma_set_mask(&pci_dev->dev, mask);
5746 }
5747
cnss_pci_set_coherent_dma_mask(struct pci_dev * pci_dev,u64 mask)5748 static inline int cnss_pci_set_coherent_dma_mask(struct pci_dev *pci_dev,
5749 u64 mask)
5750 {
5751 return dma_set_coherent_mask(&pci_dev->dev, mask);
5752 }
5753 #else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0)) */
cnss_pci_set_dma_mask(struct pci_dev * pci_dev,u64 mask)5754 static inline int cnss_pci_set_dma_mask(struct pci_dev *pci_dev, u64 mask)
5755 {
5756 return pci_set_dma_mask(pci_dev, mask);
5757 }
5758
cnss_pci_set_coherent_dma_mask(struct pci_dev * pci_dev,u64 mask)5759 static inline int cnss_pci_set_coherent_dma_mask(struct pci_dev *pci_dev,
5760 u64 mask)
5761 {
5762 return pci_set_consistent_dma_mask(pci_dev, mask);
5763 }
5764 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0)) */
5765
cnss_pci_enable_bus(struct cnss_pci_data * pci_priv)5766 static int cnss_pci_enable_bus(struct cnss_pci_data *pci_priv)
5767 {
5768 int ret = 0;
5769 struct pci_dev *pci_dev = pci_priv->pci_dev;
5770 u16 device_id;
5771
5772 pci_read_config_word(pci_dev, PCI_DEVICE_ID, &device_id);
5773 if (device_id != pci_priv->pci_device_id->device) {
5774 cnss_pr_err("PCI device ID mismatch, config ID: 0x%x, probe ID: 0x%x\n",
5775 device_id, pci_priv->pci_device_id->device);
5776 ret = -EIO;
5777 goto out;
5778 }
5779
5780 ret = pci_assign_resource(pci_dev, PCI_BAR_NUM);
5781 if (ret) {
5782 pr_err("Failed to assign PCI resource, err = %d\n", ret);
5783 goto out;
5784 }
5785
5786 ret = pci_enable_device(pci_dev);
5787 if (ret) {
5788 cnss_pr_err("Failed to enable PCI device, err = %d\n", ret);
5789 goto out;
5790 }
5791
5792 ret = pci_request_region(pci_dev, PCI_BAR_NUM, "cnss");
5793 if (ret) {
5794 cnss_pr_err("Failed to request PCI region, err = %d\n", ret);
5795 goto disable_device;
5796 }
5797
5798 switch (device_id) {
5799 case QCA6174_DEVICE_ID:
5800 case QCN7605_DEVICE_ID:
5801 pci_priv->dma_bit_mask = PCI_DMA_MASK_32_BIT;
5802 break;
5803 case QCA6390_DEVICE_ID:
5804 case QCA6490_DEVICE_ID:
5805 case KIWI_DEVICE_ID:
5806 case MANGO_DEVICE_ID:
5807 case PEACH_DEVICE_ID:
5808 pci_priv->dma_bit_mask = PCI_DMA_MASK_36_BIT;
5809 break;
5810 default:
5811 pci_priv->dma_bit_mask = PCI_DMA_MASK_32_BIT;
5812 break;
5813 }
5814
5815 cnss_pr_dbg("Set PCI DMA MASK (0x%llx)\n", pci_priv->dma_bit_mask);
5816
5817 ret = cnss_pci_set_dma_mask(pci_dev, pci_priv->dma_bit_mask);
5818 if (ret) {
5819 cnss_pr_err("Failed to set PCI DMA mask, err = %d\n", ret);
5820 goto release_region;
5821 }
5822
5823 ret = cnss_pci_set_coherent_dma_mask(pci_dev, pci_priv->dma_bit_mask);
5824 if (ret) {
5825 cnss_pr_err("Failed to set PCI coherent DMA mask, err = %d\n",
5826 ret);
5827 goto release_region;
5828 }
5829
5830 pci_priv->bar = pci_iomap(pci_dev, PCI_BAR_NUM, 0);
5831 if (!pci_priv->bar) {
5832 cnss_pr_err("Failed to do PCI IO map!\n");
5833 ret = -EIO;
5834 goto release_region;
5835 }
5836
5837 /* Save default config space without BME enabled */
5838 pci_save_state(pci_dev);
5839 pci_priv->default_state = pci_store_saved_state(pci_dev);
5840
5841 pci_set_master(pci_dev);
5842
5843 return 0;
5844
5845 release_region:
5846 pci_release_region(pci_dev, PCI_BAR_NUM);
5847 disable_device:
5848 pci_disable_device(pci_dev);
5849 out:
5850 return ret;
5851 }
5852
cnss_pci_disable_bus(struct cnss_pci_data * pci_priv)5853 static void cnss_pci_disable_bus(struct cnss_pci_data *pci_priv)
5854 {
5855 struct pci_dev *pci_dev = pci_priv->pci_dev;
5856
5857 pci_clear_master(pci_dev);
5858 pci_load_and_free_saved_state(pci_dev, &pci_priv->saved_state);
5859 pci_load_and_free_saved_state(pci_dev, &pci_priv->default_state);
5860
5861 if (pci_priv->bar) {
5862 pci_iounmap(pci_dev, pci_priv->bar);
5863 pci_priv->bar = NULL;
5864 }
5865
5866 pci_release_region(pci_dev, PCI_BAR_NUM);
5867 if (pci_is_enabled(pci_dev))
5868 pci_disable_device(pci_dev);
5869 }
5870
cnss_pci_dump_qdss_reg(struct cnss_pci_data * pci_priv)5871 static void cnss_pci_dump_qdss_reg(struct cnss_pci_data *pci_priv)
5872 {
5873 struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
5874 int i, array_size = ARRAY_SIZE(qdss_csr) - 1;
5875 gfp_t gfp = GFP_KERNEL;
5876 u32 reg_offset;
5877
5878 if (in_interrupt() || irqs_disabled())
5879 gfp = GFP_ATOMIC;
5880
5881 if (!plat_priv->qdss_reg) {
5882 plat_priv->qdss_reg = devm_kzalloc(&pci_priv->pci_dev->dev,
5883 sizeof(*plat_priv->qdss_reg)
5884 * array_size, gfp);
5885 if (!plat_priv->qdss_reg)
5886 return;
5887 }
5888
5889 cnss_pr_dbg("Start to dump qdss registers\n");
5890
5891 for (i = 0; qdss_csr[i].name; i++) {
5892 reg_offset = QDSS_APB_DEC_CSR_BASE + qdss_csr[i].offset;
5893 if (cnss_pci_reg_read(pci_priv, reg_offset,
5894 &plat_priv->qdss_reg[i]))
5895 return;
5896 cnss_pr_dbg("%s[0x%x] = 0x%x\n", qdss_csr[i].name, reg_offset,
5897 plat_priv->qdss_reg[i]);
5898 }
5899 }
5900
cnss_pci_dump_ce_reg(struct cnss_pci_data * pci_priv,enum cnss_ce_index ce)5901 static void cnss_pci_dump_ce_reg(struct cnss_pci_data *pci_priv,
5902 enum cnss_ce_index ce)
5903 {
5904 int i;
5905 u32 ce_base = ce * CE_REG_INTERVAL;
5906 u32 reg_offset, src_ring_base, dst_ring_base, cmn_base, val;
5907
5908 switch (pci_priv->device_id) {
5909 case QCA6390_DEVICE_ID:
5910 src_ring_base = QCA6390_CE_SRC_RING_REG_BASE;
5911 dst_ring_base = QCA6390_CE_DST_RING_REG_BASE;
5912 cmn_base = QCA6390_CE_COMMON_REG_BASE;
5913 break;
5914 case QCA6490_DEVICE_ID:
5915 src_ring_base = QCA6490_CE_SRC_RING_REG_BASE;
5916 dst_ring_base = QCA6490_CE_DST_RING_REG_BASE;
5917 cmn_base = QCA6490_CE_COMMON_REG_BASE;
5918 break;
5919 default:
5920 return;
5921 }
5922
5923 switch (ce) {
5924 case CNSS_CE_09:
5925 case CNSS_CE_10:
5926 for (i = 0; ce_src[i].name; i++) {
5927 reg_offset = src_ring_base + ce_base + ce_src[i].offset;
5928 if (cnss_pci_reg_read(pci_priv, reg_offset, &val))
5929 return;
5930 cnss_pr_dbg("CE_%02d_%s[0x%x] = 0x%x\n",
5931 ce, ce_src[i].name, reg_offset, val);
5932 }
5933
5934 for (i = 0; ce_dst[i].name; i++) {
5935 reg_offset = dst_ring_base + ce_base + ce_dst[i].offset;
5936 if (cnss_pci_reg_read(pci_priv, reg_offset, &val))
5937 return;
5938 cnss_pr_dbg("CE_%02d_%s[0x%x] = 0x%x\n",
5939 ce, ce_dst[i].name, reg_offset, val);
5940 }
5941 break;
5942 case CNSS_CE_COMMON:
5943 for (i = 0; ce_cmn[i].name; i++) {
5944 reg_offset = cmn_base + ce_cmn[i].offset;
5945 if (cnss_pci_reg_read(pci_priv, reg_offset, &val))
5946 return;
5947 cnss_pr_dbg("CE_COMMON_%s[0x%x] = 0x%x\n",
5948 ce_cmn[i].name, reg_offset, val);
5949 }
5950 break;
5951 default:
5952 cnss_pr_err("Unsupported CE[%d] registers dump\n", ce);
5953 }
5954 }
5955
cnss_pci_dump_debug_reg(struct cnss_pci_data * pci_priv)5956 static void cnss_pci_dump_debug_reg(struct cnss_pci_data *pci_priv)
5957 {
5958 if (cnss_pci_check_link_status(pci_priv))
5959 return;
5960
5961 cnss_pr_dbg("Start to dump debug registers\n");
5962
5963 cnss_mhi_debug_reg_dump(pci_priv);
5964 cnss_pci_bhi_debug_reg_dump(pci_priv);
5965 cnss_pci_soc_scratch_reg_dump(pci_priv);
5966 cnss_pci_dump_ce_reg(pci_priv, CNSS_CE_COMMON);
5967 cnss_pci_dump_ce_reg(pci_priv, CNSS_CE_09);
5968 cnss_pci_dump_ce_reg(pci_priv, CNSS_CE_10);
5969 }
5970
cnss_pci_assert_host_sol(struct cnss_pci_data * pci_priv)5971 static int cnss_pci_assert_host_sol(struct cnss_pci_data *pci_priv)
5972 {
5973 int ret;
5974
5975 ret = cnss_get_host_sol_value(pci_priv->plat_priv);
5976 if (ret) {
5977 if (ret < 0) {
5978 cnss_pr_dbg("Host SOL functionality is not enabled\n");
5979 return ret;
5980 } else {
5981 cnss_pr_dbg("Host SOL is already high\n");
5982 /*
5983 * Return success if HOST SOL is already high.
5984 * This will indicate caller that a HOST SOL is
5985 * already asserted from some other thread and
5986 * no further action required from the caller.
5987 */
5988 return 0;
5989 }
5990 }
5991
5992 cnss_pr_dbg("Assert host SOL GPIO to retry RDDM, expecting link down\n");
5993 cnss_set_host_sol_value(pci_priv->plat_priv, 1);
5994
5995 return 0;
5996 }
5997
cnss_pci_mhi_reg_dump(struct cnss_pci_data * pci_priv)5998 static void cnss_pci_mhi_reg_dump(struct cnss_pci_data *pci_priv)
5999 {
6000 if (!cnss_pci_check_link_status(pci_priv))
6001 cnss_mhi_debug_reg_dump(pci_priv);
6002
6003 cnss_pci_bhi_debug_reg_dump(pci_priv);
6004 cnss_pci_soc_scratch_reg_dump(pci_priv);
6005 cnss_pci_dump_misc_reg(pci_priv);
6006 cnss_pci_dump_shadow_reg(pci_priv);
6007 }
6008
cnss_pci_recover_link_down(struct cnss_pci_data * pci_priv)6009 int cnss_pci_recover_link_down(struct cnss_pci_data *pci_priv)
6010 {
6011 int ret;
6012 int retry = 0;
6013 enum mhi_ee_type mhi_ee;
6014
6015 switch (pci_priv->device_id) {
6016 case QCA6390_DEVICE_ID:
6017 case QCA6490_DEVICE_ID:
6018 case KIWI_DEVICE_ID:
6019 case MANGO_DEVICE_ID:
6020 case PEACH_DEVICE_ID:
6021 break;
6022 default:
6023 return -EOPNOTSUPP;
6024 }
6025
6026 /* Always wait here to avoid missing WAKE assert for RDDM
6027 * before link recovery
6028 */
6029 ret = wait_for_completion_timeout(&pci_priv->wake_event_complete,
6030 msecs_to_jiffies(WAKE_EVENT_TIMEOUT));
6031 if (!ret)
6032 cnss_pr_err("Timeout waiting for wake event after link down\n");
6033
6034 ret = cnss_suspend_pci_link(pci_priv);
6035 if (ret)
6036 cnss_pr_err("Failed to suspend PCI link, err = %d\n", ret);
6037
6038 ret = cnss_resume_pci_link(pci_priv);
6039 if (ret) {
6040 cnss_pr_err("Failed to resume PCI link, err = %d\n", ret);
6041 del_timer(&pci_priv->dev_rddm_timer);
6042 return ret;
6043 }
6044
6045 retry:
6046 /*
6047 * After PCIe link resumes, 20 to 400 ms delay is observerved
6048 * before device moves to RDDM.
6049 */
6050 msleep(RDDM_LINK_RECOVERY_RETRY_DELAY_MS);
6051 mhi_ee = mhi_get_exec_env(pci_priv->mhi_ctrl);
6052 if (mhi_ee == MHI_EE_RDDM) {
6053 del_timer(&pci_priv->dev_rddm_timer);
6054 cnss_pr_info("Device in RDDM after link recovery, try to collect dump\n");
6055 cnss_schedule_recovery(&pci_priv->pci_dev->dev,
6056 CNSS_REASON_RDDM);
6057 return 0;
6058 } else if (retry++ < RDDM_LINK_RECOVERY_RETRY) {
6059 cnss_pr_dbg("Wait for RDDM after link recovery, retry #%d, Device EE: %d\n",
6060 retry, mhi_ee);
6061 goto retry;
6062 }
6063
6064 if (!cnss_pci_assert_host_sol(pci_priv))
6065 return 0;
6066 cnss_mhi_debug_reg_dump(pci_priv);
6067 cnss_pci_bhi_debug_reg_dump(pci_priv);
6068 cnss_pci_soc_scratch_reg_dump(pci_priv);
6069 cnss_schedule_recovery(&pci_priv->pci_dev->dev,
6070 CNSS_REASON_TIMEOUT);
6071 return 0;
6072 }
6073
cnss_pci_force_fw_assert_hdlr(struct cnss_pci_data * pci_priv)6074 int cnss_pci_force_fw_assert_hdlr(struct cnss_pci_data *pci_priv)
6075 {
6076 int ret;
6077 struct cnss_plat_data *plat_priv;
6078
6079 if (!pci_priv)
6080 return -ENODEV;
6081
6082 plat_priv = pci_priv->plat_priv;
6083 if (!plat_priv)
6084 return -ENODEV;
6085
6086 if (!test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state) ||
6087 test_bit(CNSS_MHI_POWERING_OFF, &pci_priv->mhi_state))
6088 return -EINVAL;
6089 /*
6090 * Call pm_runtime_get_sync insteat of auto_resume to get
6091 * reference and make sure runtime_suspend wont get called.
6092 */
6093 ret = cnss_pci_pm_runtime_get_sync(pci_priv, RTPM_ID_CNSS);
6094 if (ret < 0)
6095 goto runtime_pm_put;
6096 /*
6097 * In some scenarios, cnss_pci_pm_runtime_get_sync
6098 * might not resume PCI bus. For those cases do auto resume.
6099 */
6100 cnss_auto_resume(&pci_priv->pci_dev->dev);
6101
6102 if (!pci_priv->is_smmu_fault)
6103 cnss_pci_mhi_reg_dump(pci_priv);
6104
6105 /* If link is still down here, directly trigger link down recovery */
6106 ret = cnss_pci_check_link_status(pci_priv);
6107 if (ret) {
6108 cnss_pci_link_down(&pci_priv->pci_dev->dev);
6109 cnss_pci_pm_runtime_mark_last_busy(pci_priv);
6110 cnss_pci_pm_runtime_put_autosuspend(pci_priv, RTPM_ID_CNSS);
6111 return 0;
6112 }
6113
6114 /*
6115 * Fist try MHI SYS_ERR, if fails try HOST SOL and return.
6116 * If SOL is not enabled try HOST Reset Rquest after MHI
6117 * SYS_ERRR fails.
6118 */
6119 ret = cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_TRIGGER_RDDM);
6120 if (ret) {
6121 if (pci_priv->is_smmu_fault) {
6122 cnss_pci_mhi_reg_dump(pci_priv);
6123 pci_priv->is_smmu_fault = false;
6124 }
6125 if (!test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state) ||
6126 test_bit(CNSS_MHI_POWERING_OFF, &pci_priv->mhi_state)) {
6127 cnss_pr_dbg("MHI is not powered on, ignore RDDM failure\n");
6128 cnss_pci_pm_runtime_mark_last_busy(pci_priv);
6129 cnss_pci_pm_runtime_put_autosuspend(pci_priv, RTPM_ID_CNSS);
6130 return 0;
6131 }
6132 cnss_fatal_err("Failed to trigger RDDM, err = %d\n", ret);
6133 if (!cnss_pci_assert_host_sol(pci_priv)) {
6134 cnss_pci_pm_runtime_mark_last_busy(pci_priv);
6135 cnss_pci_pm_runtime_put_autosuspend(pci_priv, RTPM_ID_CNSS);
6136 return 0;
6137 }
6138
6139 cnss_pr_dbg("Sending Host Reset Req\n");
6140 if (!cnss_mhi_force_reset(pci_priv)) {
6141 ret = 0;
6142 goto mhi_reg_dump;
6143 }
6144
6145 cnss_pci_dump_debug_reg(pci_priv);
6146 cnss_schedule_recovery(&pci_priv->pci_dev->dev,
6147 CNSS_REASON_DEFAULT);
6148 ret = 0;
6149 goto runtime_pm_put;
6150 }
6151
6152 mhi_reg_dump:
6153 if (pci_priv->is_smmu_fault) {
6154 cnss_pci_mhi_reg_dump(pci_priv);
6155 pci_priv->is_smmu_fault = false;
6156 }
6157
6158 if (!test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state)) {
6159 mod_timer(&pci_priv->dev_rddm_timer,
6160 jiffies + msecs_to_jiffies(DEV_RDDM_TIMEOUT));
6161 }
6162
6163 runtime_pm_put:
6164 cnss_pci_pm_runtime_mark_last_busy(pci_priv);
6165 cnss_pci_pm_runtime_put_autosuspend(pci_priv, RTPM_ID_CNSS);
6166 return ret;
6167 }
6168
cnss_pci_add_dump_seg(struct cnss_pci_data * pci_priv,struct cnss_dump_seg * dump_seg,enum cnss_fw_dump_type type,int seg_no,void * va,dma_addr_t dma,size_t size)6169 static void cnss_pci_add_dump_seg(struct cnss_pci_data *pci_priv,
6170 struct cnss_dump_seg *dump_seg,
6171 enum cnss_fw_dump_type type, int seg_no,
6172 void *va, dma_addr_t dma, size_t size)
6173 {
6174 struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
6175 struct device *dev = &pci_priv->pci_dev->dev;
6176 phys_addr_t pa;
6177
6178 dump_seg->address = dma;
6179 dump_seg->v_address = va;
6180 dump_seg->size = size;
6181 dump_seg->type = type;
6182
6183 cnss_pr_dbg("Seg: %x, va: %pK, dma: %pa, size: 0x%zx\n",
6184 seg_no, va, &dma, size);
6185
6186 if (type == CNSS_FW_CAL || cnss_va_to_pa(dev, size, va, dma, &pa, DMA_ATTR_FORCE_CONTIGUOUS))
6187 return;
6188
6189 cnss_minidump_add_region(plat_priv, type, seg_no, va, pa, size);
6190 }
6191
cnss_pci_remove_dump_seg(struct cnss_pci_data * pci_priv,struct cnss_dump_seg * dump_seg,enum cnss_fw_dump_type type,int seg_no,void * va,dma_addr_t dma,size_t size)6192 static void cnss_pci_remove_dump_seg(struct cnss_pci_data *pci_priv,
6193 struct cnss_dump_seg *dump_seg,
6194 enum cnss_fw_dump_type type, int seg_no,
6195 void *va, dma_addr_t dma, size_t size)
6196 {
6197 struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
6198 struct device *dev = &pci_priv->pci_dev->dev;
6199 phys_addr_t pa;
6200
6201 cnss_va_to_pa(dev, size, va, dma, &pa, DMA_ATTR_FORCE_CONTIGUOUS);
6202 cnss_minidump_remove_region(plat_priv, type, seg_no, va, pa, size);
6203 }
6204
cnss_pci_call_driver_uevent(struct cnss_pci_data * pci_priv,enum cnss_driver_status status,void * data)6205 int cnss_pci_call_driver_uevent(struct cnss_pci_data *pci_priv,
6206 enum cnss_driver_status status, void *data)
6207 {
6208 struct cnss_uevent_data uevent_data;
6209 struct cnss_wlan_driver *driver_ops;
6210
6211 driver_ops = pci_priv->driver_ops;
6212 if (!driver_ops || !driver_ops->update_event) {
6213 cnss_pr_dbg("Hang event driver ops is NULL\n");
6214 return -EINVAL;
6215 }
6216
6217 cnss_pr_dbg("Calling driver uevent: %d\n", status);
6218
6219 uevent_data.status = status;
6220 uevent_data.data = data;
6221
6222 return driver_ops->update_event(pci_priv->pci_dev, &uevent_data);
6223 }
6224
cnss_pci_send_hang_event(struct cnss_pci_data * pci_priv)6225 static void cnss_pci_send_hang_event(struct cnss_pci_data *pci_priv)
6226 {
6227 struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
6228 struct cnss_fw_mem *fw_mem = plat_priv->fw_mem;
6229 struct cnss_hang_event hang_event;
6230 void *hang_data_va = NULL;
6231 u64 offset = 0;
6232 u16 length = 0;
6233 int i = 0;
6234
6235 if (!fw_mem || !plat_priv->fw_mem_seg_len)
6236 return;
6237
6238 memset(&hang_event, 0, sizeof(hang_event));
6239 switch (pci_priv->device_id) {
6240 case QCA6390_DEVICE_ID:
6241 offset = HST_HANG_DATA_OFFSET;
6242 length = HANG_DATA_LENGTH;
6243 break;
6244 case QCA6490_DEVICE_ID:
6245 /* Fallback to hard-coded values if hang event params not
6246 * present in QMI. Once all the firmware branches have the
6247 * fix to send params over QMI, this can be removed.
6248 */
6249 if (plat_priv->hang_event_data_len) {
6250 offset = plat_priv->hang_data_addr_offset;
6251 length = plat_priv->hang_event_data_len;
6252 } else {
6253 offset = HSP_HANG_DATA_OFFSET;
6254 length = HANG_DATA_LENGTH;
6255 }
6256 break;
6257 case KIWI_DEVICE_ID:
6258 case MANGO_DEVICE_ID:
6259 case PEACH_DEVICE_ID:
6260 offset = plat_priv->hang_data_addr_offset;
6261 length = plat_priv->hang_event_data_len;
6262 break;
6263 case QCN7605_DEVICE_ID:
6264 offset = GNO_HANG_DATA_OFFSET;
6265 length = HANG_DATA_LENGTH;
6266 break;
6267 default:
6268 cnss_pr_err("Skip Hang Event Data as unsupported Device ID received: %d\n",
6269 pci_priv->device_id);
6270 return;
6271 }
6272
6273 for (i = 0; i < plat_priv->fw_mem_seg_len; i++) {
6274 if (fw_mem[i].type == QMI_WLFW_MEM_TYPE_DDR_V01 &&
6275 fw_mem[i].va) {
6276 /* The offset must be < (fw_mem size- hangdata length) */
6277 if (!(offset <= fw_mem[i].size - length))
6278 goto exit;
6279
6280 hang_data_va = fw_mem[i].va + offset;
6281 hang_event.hang_event_data = kmemdup(hang_data_va,
6282 length,
6283 GFP_ATOMIC);
6284 if (!hang_event.hang_event_data) {
6285 cnss_pr_dbg("Hang data memory alloc failed\n");
6286 return;
6287 }
6288 hang_event.hang_event_data_len = length;
6289 break;
6290 }
6291 }
6292
6293 cnss_pci_call_driver_uevent(pci_priv, CNSS_HANG_EVENT, &hang_event);
6294
6295 kfree(hang_event.hang_event_data);
6296 hang_event.hang_event_data = NULL;
6297 return;
6298 exit:
6299 cnss_pr_dbg("Invalid hang event params, offset:0x%x, length:0x%x\n",
6300 plat_priv->hang_data_addr_offset,
6301 plat_priv->hang_event_data_len);
6302 }
6303
6304 #ifdef CONFIG_CNSS2_SSR_DRIVER_DUMP
cnss_pci_collect_host_dump_info(struct cnss_pci_data * pci_priv)6305 void cnss_pci_collect_host_dump_info(struct cnss_pci_data *pci_priv)
6306 {
6307 struct cnss_ssr_driver_dump_entry *ssr_entry;
6308 struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
6309 size_t num_entries_loaded = 0;
6310 int x;
6311 int ret = -1;
6312
6313 ssr_entry = kmalloc(sizeof(*ssr_entry) * CNSS_HOST_DUMP_TYPE_MAX, GFP_KERNEL);
6314 if (!ssr_entry) {
6315 cnss_pr_err("ssr_entry malloc failed");
6316 return;
6317 }
6318
6319 if (pci_priv->driver_ops &&
6320 pci_priv->driver_ops->collect_driver_dump) {
6321 ret = pci_priv->driver_ops->collect_driver_dump(pci_priv->pci_dev,
6322 ssr_entry,
6323 &num_entries_loaded);
6324 }
6325
6326 if (!ret) {
6327 for (x = 0; x < num_entries_loaded; x++) {
6328 cnss_pr_info("Idx:%d, ptr: %p, name: %s, size: %d\n",
6329 x, ssr_entry[x].buffer_pointer,
6330 ssr_entry[x].region_name,
6331 ssr_entry[x].buffer_size);
6332 }
6333
6334 cnss_do_host_ramdump(plat_priv, ssr_entry, num_entries_loaded);
6335 } else {
6336 cnss_pr_info("Host SSR elf dump collection feature disabled\n");
6337 }
6338
6339 kfree(ssr_entry);
6340 }
6341 #endif
6342
cnss_pci_collect_dump_info(struct cnss_pci_data * pci_priv,bool in_panic)6343 void cnss_pci_collect_dump_info(struct cnss_pci_data *pci_priv, bool in_panic)
6344 {
6345 struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
6346 struct cnss_dump_data *dump_data =
6347 &plat_priv->ramdump_info_v2.dump_data;
6348 struct cnss_dump_seg *dump_seg =
6349 plat_priv->ramdump_info_v2.dump_data_vaddr;
6350 struct image_info *fw_image, *rddm_image;
6351 struct cnss_fw_mem *fw_mem = plat_priv->fw_mem;
6352 int ret, i, j;
6353
6354 if (test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state) &&
6355 !test_bit(CNSS_IN_PANIC, &plat_priv->driver_state))
6356 cnss_pci_send_hang_event(pci_priv);
6357
6358 if (test_bit(CNSS_MHI_RDDM_DONE, &pci_priv->mhi_state)) {
6359 cnss_pr_dbg("RAM dump is already collected, skip\n");
6360 return;
6361 }
6362
6363 if (!cnss_is_device_powered_on(plat_priv)) {
6364 cnss_pr_dbg("Device is already powered off, skip\n");
6365 return;
6366 }
6367
6368 if (!in_panic) {
6369 mutex_lock(&pci_priv->bus_lock);
6370 ret = cnss_pci_check_link_status(pci_priv);
6371 if (ret) {
6372 if (ret != -EACCES) {
6373 mutex_unlock(&pci_priv->bus_lock);
6374 return;
6375 }
6376 if (cnss_pci_resume_bus(pci_priv)) {
6377 mutex_unlock(&pci_priv->bus_lock);
6378 return;
6379 }
6380 }
6381 mutex_unlock(&pci_priv->bus_lock);
6382 } else {
6383 if (cnss_pci_check_link_status(pci_priv))
6384 return;
6385 /* Inside panic handler, reduce timeout for RDDM to avoid
6386 * unnecessary hypervisor watchdog bite.
6387 */
6388 pci_priv->mhi_ctrl->timeout_ms /= 2;
6389 }
6390
6391 cnss_mhi_debug_reg_dump(pci_priv);
6392 cnss_pci_bhi_debug_reg_dump(pci_priv);
6393 cnss_pci_soc_scratch_reg_dump(pci_priv);
6394 cnss_pci_dump_misc_reg(pci_priv);
6395 cnss_rddm_trigger_debug(pci_priv);
6396 ret = mhi_download_rddm_image(pci_priv->mhi_ctrl, in_panic);
6397 if (ret) {
6398 cnss_fatal_err("Failed to download RDDM image, err = %d\n",
6399 ret);
6400 if (!cnss_pci_assert_host_sol(pci_priv))
6401 return;
6402 cnss_rddm_trigger_check(pci_priv);
6403 cnss_pci_dump_debug_reg(pci_priv);
6404 return;
6405 }
6406 cnss_rddm_trigger_check(pci_priv);
6407 fw_image = pci_priv->mhi_ctrl->fbc_image;
6408 rddm_image = pci_priv->mhi_ctrl->rddm_image;
6409 dump_data->nentries = 0;
6410
6411 if (plat_priv->qdss_mem_seg_len)
6412 cnss_pci_dump_qdss_reg(pci_priv);
6413 cnss_mhi_dump_sfr(pci_priv);
6414
6415 if (!dump_seg) {
6416 cnss_pr_warn("FW image dump collection not setup");
6417 goto skip_dump;
6418 }
6419
6420 cnss_pr_dbg("Collect FW image dump segment, nentries %d\n",
6421 fw_image->entries);
6422
6423 for (i = 0; i < fw_image->entries; i++) {
6424 cnss_pci_add_dump_seg(pci_priv, dump_seg, CNSS_FW_IMAGE, i,
6425 fw_image->mhi_buf[i].buf,
6426 fw_image->mhi_buf[i].dma_addr,
6427 fw_image->mhi_buf[i].len);
6428 dump_seg++;
6429 }
6430
6431 dump_data->nentries += fw_image->entries;
6432
6433 cnss_pr_dbg("Collect RDDM image dump segment, nentries %d\n",
6434 rddm_image->entries);
6435
6436 for (i = 0; i < rddm_image->entries; i++) {
6437 cnss_pci_add_dump_seg(pci_priv, dump_seg, CNSS_FW_RDDM, i,
6438 rddm_image->mhi_buf[i].buf,
6439 rddm_image->mhi_buf[i].dma_addr,
6440 rddm_image->mhi_buf[i].len);
6441 dump_seg++;
6442 }
6443
6444 dump_data->nentries += rddm_image->entries;
6445
6446 for (i = 0, j = 0; i < plat_priv->fw_mem_seg_len; i++) {
6447 if (fw_mem[i].type == CNSS_MEM_TYPE_DDR) {
6448 if (fw_mem[i].attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
6449 cnss_pr_dbg("Collect remote heap dump segment\n");
6450 cnss_pci_add_dump_seg(pci_priv, dump_seg,
6451 CNSS_FW_REMOTE_HEAP, j,
6452 fw_mem[i].va,
6453 fw_mem[i].pa,
6454 fw_mem[i].size);
6455 dump_seg++;
6456 dump_data->nentries++;
6457 j++;
6458 } else {
6459 cnss_pr_dbg("Skip remote heap dumps as it is non-contiguous\n");
6460 }
6461 } else if (fw_mem[i].type == CNSS_MEM_CAL_V01) {
6462 cnss_pr_dbg("Collect CAL memory dump segment\n");
6463 cnss_pci_add_dump_seg(pci_priv, dump_seg,
6464 CNSS_FW_CAL, j,
6465 fw_mem[i].va,
6466 fw_mem[i].pa,
6467 fw_mem[i].size);
6468 dump_seg++;
6469 dump_data->nentries++;
6470 j++;
6471 }
6472 }
6473
6474 if (dump_data->nentries > 0)
6475 plat_priv->ramdump_info_v2.dump_data_valid = true;
6476
6477 cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_RDDM_DONE);
6478
6479 skip_dump:
6480 complete(&plat_priv->rddm_complete);
6481 }
6482
cnss_pci_clear_dump_info(struct cnss_pci_data * pci_priv)6483 void cnss_pci_clear_dump_info(struct cnss_pci_data *pci_priv)
6484 {
6485 struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
6486 struct cnss_dump_seg *dump_seg =
6487 plat_priv->ramdump_info_v2.dump_data_vaddr;
6488 struct image_info *fw_image, *rddm_image;
6489 struct cnss_fw_mem *fw_mem = plat_priv->fw_mem;
6490 int i, j;
6491
6492 if (!dump_seg)
6493 return;
6494
6495 fw_image = pci_priv->mhi_ctrl->fbc_image;
6496 rddm_image = pci_priv->mhi_ctrl->rddm_image;
6497
6498 for (i = 0; i < fw_image->entries; i++) {
6499 cnss_pci_remove_dump_seg(pci_priv, dump_seg, CNSS_FW_IMAGE, i,
6500 fw_image->mhi_buf[i].buf,
6501 fw_image->mhi_buf[i].dma_addr,
6502 fw_image->mhi_buf[i].len);
6503 dump_seg++;
6504 }
6505
6506 for (i = 0; i < rddm_image->entries; i++) {
6507 cnss_pci_remove_dump_seg(pci_priv, dump_seg, CNSS_FW_RDDM, i,
6508 rddm_image->mhi_buf[i].buf,
6509 rddm_image->mhi_buf[i].dma_addr,
6510 rddm_image->mhi_buf[i].len);
6511 dump_seg++;
6512 }
6513
6514 for (i = 0, j = 0; i < plat_priv->fw_mem_seg_len; i++) {
6515 if (fw_mem[i].type == CNSS_MEM_TYPE_DDR &&
6516 (fw_mem[i].attrs & DMA_ATTR_FORCE_CONTIGUOUS)) {
6517 cnss_pci_remove_dump_seg(pci_priv, dump_seg,
6518 CNSS_FW_REMOTE_HEAP, j,
6519 fw_mem[i].va, fw_mem[i].pa,
6520 fw_mem[i].size);
6521 dump_seg++;
6522 j++;
6523 } else if (fw_mem[i].type == CNSS_MEM_CAL_V01) {
6524 cnss_pci_remove_dump_seg(pci_priv, dump_seg,
6525 CNSS_FW_CAL, j,
6526 fw_mem[i].va, fw_mem[i].pa,
6527 fw_mem[i].size);
6528 dump_seg++;
6529 j++;
6530 }
6531 }
6532
6533 plat_priv->ramdump_info_v2.dump_data.nentries = 0;
6534 plat_priv->ramdump_info_v2.dump_data_valid = false;
6535 }
6536
cnss_pci_device_crashed(struct cnss_pci_data * pci_priv)6537 void cnss_pci_device_crashed(struct cnss_pci_data *pci_priv)
6538 {
6539 struct cnss_plat_data *plat_priv;
6540
6541 if (!pci_priv) {
6542 cnss_pr_err("pci_priv is NULL\n");
6543 return;
6544 }
6545
6546 plat_priv = pci_priv->plat_priv;
6547 if (!plat_priv) {
6548 cnss_pr_err("plat_priv is NULL\n");
6549 return;
6550 }
6551
6552 if (plat_priv->recovery_enabled)
6553 cnss_pci_collect_host_dump_info(pci_priv);
6554
6555 /* Call recovery handler in the DRIVER_RECOVERY event context
6556 * instead of scheduling work. In that way complete recovery
6557 * will be done as part of DRIVER_RECOVERY event and get
6558 * serialized with other events.
6559 */
6560 cnss_recovery_handler(plat_priv);
6561 }
6562
cnss_mhi_pm_runtime_get(struct mhi_controller * mhi_ctrl)6563 static int cnss_mhi_pm_runtime_get(struct mhi_controller *mhi_ctrl)
6564 {
6565 struct cnss_pci_data *pci_priv = dev_get_drvdata(mhi_ctrl->cntrl_dev);
6566
6567 return cnss_pci_pm_runtime_get(pci_priv, RTPM_ID_MHI);
6568 }
6569
cnss_mhi_pm_runtime_put_noidle(struct mhi_controller * mhi_ctrl)6570 static void cnss_mhi_pm_runtime_put_noidle(struct mhi_controller *mhi_ctrl)
6571 {
6572 struct cnss_pci_data *pci_priv = dev_get_drvdata(mhi_ctrl->cntrl_dev);
6573
6574 cnss_pci_pm_runtime_put_noidle(pci_priv, RTPM_ID_MHI);
6575 }
6576
cnss_pci_add_fw_prefix_name(struct cnss_pci_data * pci_priv,char * prefix_name,char * name)6577 void cnss_pci_add_fw_prefix_name(struct cnss_pci_data *pci_priv,
6578 char *prefix_name, char *name)
6579 {
6580 struct cnss_plat_data *plat_priv;
6581
6582 if (!pci_priv)
6583 return;
6584
6585 plat_priv = pci_priv->plat_priv;
6586
6587 if (!plat_priv->use_fw_path_with_prefix) {
6588 scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN, "%s", name);
6589 return;
6590 }
6591
6592 switch (pci_priv->device_id) {
6593 case QCN7605_DEVICE_ID:
6594 scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN,
6595 QCN7605_PATH_PREFIX "%s", name);
6596 break;
6597 case QCA6390_DEVICE_ID:
6598 scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN,
6599 QCA6390_PATH_PREFIX "%s", name);
6600 break;
6601 case QCA6490_DEVICE_ID:
6602 scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN,
6603 QCA6490_PATH_PREFIX "%s", name);
6604 break;
6605 case KIWI_DEVICE_ID:
6606 scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN,
6607 KIWI_PATH_PREFIX "%s", name);
6608 break;
6609 case MANGO_DEVICE_ID:
6610 scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN,
6611 MANGO_PATH_PREFIX "%s", name);
6612 break;
6613 case PEACH_DEVICE_ID:
6614 scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN,
6615 PEACH_PATH_PREFIX "%s", name);
6616 break;
6617 default:
6618 scnprintf(prefix_name, MAX_FIRMWARE_NAME_LEN, "%s", name);
6619 break;
6620 }
6621
6622 cnss_pr_dbg("FW name added with prefix: %s\n", prefix_name);
6623 }
6624
cnss_pci_update_fw_name(struct cnss_pci_data * pci_priv)6625 static int cnss_pci_update_fw_name(struct cnss_pci_data *pci_priv)
6626 {
6627 struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
6628
6629 switch (pci_priv->device_id) {
6630 case QCA6390_DEVICE_ID:
6631 if (plat_priv->device_version.major_version < FW_V2_NUMBER) {
6632 cnss_pr_dbg("Device ID:version (0x%lx:%d) is not supported\n",
6633 pci_priv->device_id,
6634 plat_priv->device_version.major_version);
6635 return -EINVAL;
6636 }
6637 cnss_pci_add_fw_prefix_name(pci_priv, plat_priv->firmware_name,
6638 FW_V2_FILE_NAME);
6639 snprintf(plat_priv->fw_fallback_name, MAX_FIRMWARE_NAME_LEN,
6640 FW_V2_FILE_NAME);
6641 break;
6642 case QCA6490_DEVICE_ID:
6643 case KIWI_DEVICE_ID:
6644 case MANGO_DEVICE_ID:
6645 case PEACH_DEVICE_ID:
6646 switch (plat_priv->device_version.major_version) {
6647 case FW_V2_NUMBER:
6648 cnss_pci_add_fw_prefix_name(pci_priv,
6649 plat_priv->firmware_name,
6650 FW_V2_FILE_NAME);
6651 snprintf(plat_priv->fw_fallback_name,
6652 MAX_FIRMWARE_NAME_LEN,
6653 FW_V2_FILE_NAME);
6654 break;
6655 default:
6656 cnss_pci_add_fw_prefix_name(pci_priv,
6657 plat_priv->firmware_name,
6658 DEFAULT_FW_FILE_NAME);
6659 snprintf(plat_priv->fw_fallback_name,
6660 MAX_FIRMWARE_NAME_LEN,
6661 DEFAULT_FW_FILE_NAME);
6662 break;
6663 }
6664 break;
6665 default:
6666 cnss_pci_add_fw_prefix_name(pci_priv, plat_priv->firmware_name,
6667 DEFAULT_FW_FILE_NAME);
6668 snprintf(plat_priv->fw_fallback_name, MAX_FIRMWARE_NAME_LEN,
6669 DEFAULT_FW_FILE_NAME);
6670 break;
6671 }
6672
6673 cnss_pr_dbg("FW name is %s, FW fallback name is %s\n",
6674 plat_priv->firmware_name, plat_priv->fw_fallback_name);
6675
6676 return 0;
6677 }
6678
cnss_mhi_notify_status_to_str(enum mhi_callback status)6679 static char *cnss_mhi_notify_status_to_str(enum mhi_callback status)
6680 {
6681 switch (status) {
6682 case MHI_CB_IDLE:
6683 return "IDLE";
6684 case MHI_CB_EE_RDDM:
6685 return "RDDM";
6686 case MHI_CB_SYS_ERROR:
6687 return "SYS_ERROR";
6688 case MHI_CB_FATAL_ERROR:
6689 return "FATAL_ERROR";
6690 case MHI_CB_EE_MISSION_MODE:
6691 return "MISSION_MODE";
6692 #if IS_ENABLED(CONFIG_MHI_BUS_MISC) && \
6693 (LINUX_VERSION_CODE < KERNEL_VERSION(6, 2, 0))
6694 case MHI_CB_FALLBACK_IMG:
6695 return "FW_FALLBACK";
6696 #endif
6697 default:
6698 return "UNKNOWN";
6699 }
6700 };
6701
cnss_dev_rddm_timeout_hdlr(struct timer_list * t)6702 static void cnss_dev_rddm_timeout_hdlr(struct timer_list *t)
6703 {
6704 struct cnss_pci_data *pci_priv =
6705 from_timer(pci_priv, t, dev_rddm_timer);
6706 enum mhi_ee_type mhi_ee;
6707
6708 if (!pci_priv)
6709 return;
6710
6711 cnss_fatal_err("Timeout waiting for RDDM notification\n");
6712
6713 mhi_ee = mhi_get_exec_env(pci_priv->mhi_ctrl);
6714 if (mhi_ee == MHI_EE_PBL)
6715 cnss_pr_err("Device MHI EE is PBL, unable to collect dump\n");
6716
6717 if (mhi_ee == MHI_EE_RDDM) {
6718 cnss_pr_info("Device MHI EE is RDDM, try to collect dump\n");
6719 cnss_schedule_recovery(&pci_priv->pci_dev->dev,
6720 CNSS_REASON_RDDM);
6721 } else {
6722 if (!cnss_pci_assert_host_sol(pci_priv))
6723 return;
6724 cnss_mhi_debug_reg_dump(pci_priv);
6725 cnss_pci_bhi_debug_reg_dump(pci_priv);
6726 cnss_pci_soc_scratch_reg_dump(pci_priv);
6727 cnss_schedule_recovery(&pci_priv->pci_dev->dev,
6728 CNSS_REASON_TIMEOUT);
6729 }
6730 }
6731
cnss_boot_debug_timeout_hdlr(struct timer_list * t)6732 static void cnss_boot_debug_timeout_hdlr(struct timer_list *t)
6733 {
6734 struct cnss_pci_data *pci_priv =
6735 from_timer(pci_priv, t, boot_debug_timer);
6736
6737 if (!pci_priv)
6738 return;
6739
6740 if (cnss_pci_check_link_status(pci_priv))
6741 return;
6742
6743 if (cnss_pci_is_device_down(&pci_priv->pci_dev->dev))
6744 return;
6745
6746 if (test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state))
6747 return;
6748
6749 if (cnss_mhi_scan_rddm_cookie(pci_priv, DEVICE_RDDM_COOKIE))
6750 return;
6751
6752 cnss_pr_dbg("Dump MHI/PBL/SBL debug data every %ds during MHI power on\n",
6753 BOOT_DEBUG_TIMEOUT_MS / 1000);
6754 cnss_mhi_debug_reg_dump(pci_priv);
6755 cnss_pci_bhi_debug_reg_dump(pci_priv);
6756 cnss_pci_soc_scratch_reg_dump(pci_priv);
6757 cnss_pci_dump_bl_sram_mem(pci_priv);
6758
6759 mod_timer(&pci_priv->boot_debug_timer,
6760 jiffies + msecs_to_jiffies(BOOT_DEBUG_TIMEOUT_MS));
6761 }
6762
cnss_pci_handle_mhi_sys_err(struct cnss_pci_data * pci_priv)6763 static int cnss_pci_handle_mhi_sys_err(struct cnss_pci_data *pci_priv)
6764 {
6765 struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
6766
6767 cnss_ignore_qmi_failure(true);
6768 set_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state);
6769 del_timer(&plat_priv->fw_boot_timer);
6770 reinit_completion(&pci_priv->wake_event_complete);
6771 mod_timer(&pci_priv->dev_rddm_timer,
6772 jiffies + msecs_to_jiffies(DEV_RDDM_TIMEOUT));
6773 cnss_pci_update_status(pci_priv, CNSS_FW_DOWN);
6774
6775 return 0;
6776 }
6777
cnss_pci_handle_dev_sol_irq(struct cnss_pci_data * pci_priv)6778 int cnss_pci_handle_dev_sol_irq(struct cnss_pci_data *pci_priv)
6779 {
6780 return cnss_pci_handle_mhi_sys_err(pci_priv);
6781 }
6782
cnss_mhi_notify_status(struct mhi_controller * mhi_ctrl,enum mhi_callback reason)6783 static void cnss_mhi_notify_status(struct mhi_controller *mhi_ctrl,
6784 enum mhi_callback reason)
6785 {
6786 struct cnss_pci_data *pci_priv = dev_get_drvdata(mhi_ctrl->cntrl_dev);
6787 struct cnss_plat_data *plat_priv;
6788 enum cnss_recovery_reason cnss_reason;
6789
6790 if (!pci_priv) {
6791 cnss_pr_err("pci_priv is NULL");
6792 return;
6793 }
6794
6795 plat_priv = pci_priv->plat_priv;
6796
6797 if (reason != MHI_CB_IDLE)
6798 cnss_pr_dbg("MHI status cb is called with reason %s(%d)\n",
6799 cnss_mhi_notify_status_to_str(reason), reason);
6800
6801 switch (reason) {
6802 case MHI_CB_IDLE:
6803 case MHI_CB_EE_MISSION_MODE:
6804 return;
6805 case MHI_CB_FATAL_ERROR:
6806 cnss_ignore_qmi_failure(true);
6807 set_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state);
6808 del_timer(&plat_priv->fw_boot_timer);
6809 cnss_pci_update_status(pci_priv, CNSS_FW_DOWN);
6810 cnss_reason = CNSS_REASON_DEFAULT;
6811 break;
6812 case MHI_CB_SYS_ERROR:
6813 cnss_pci_handle_mhi_sys_err(pci_priv);
6814 return;
6815 case MHI_CB_EE_RDDM:
6816 cnss_ignore_qmi_failure(true);
6817 set_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state);
6818 del_timer(&plat_priv->fw_boot_timer);
6819 del_timer(&pci_priv->dev_rddm_timer);
6820 cnss_pci_update_status(pci_priv, CNSS_FW_DOWN);
6821 cnss_reason = CNSS_REASON_RDDM;
6822 break;
6823 #if IS_ENABLED(CONFIG_MHI_BUS_MISC) && \
6824 (LINUX_VERSION_CODE < KERNEL_VERSION(6, 2, 0))
6825 case MHI_CB_FALLBACK_IMG:
6826 plat_priv->use_fw_path_with_prefix = false;
6827 cnss_pci_update_fw_name(pci_priv);
6828 return;
6829 #endif
6830
6831 default:
6832 cnss_pr_err("Unsupported MHI status cb reason: %d\n", reason);
6833 return;
6834 }
6835
6836 cnss_schedule_recovery(&pci_priv->pci_dev->dev, cnss_reason);
6837 }
6838
cnss_pci_get_mhi_msi(struct cnss_pci_data * pci_priv)6839 static int cnss_pci_get_mhi_msi(struct cnss_pci_data *pci_priv)
6840 {
6841 int ret, num_vectors, i;
6842 u32 user_base_data, base_vector;
6843 int *irq;
6844 unsigned int msi_data;
6845 bool is_one_msi = false;
6846
6847 ret = cnss_get_user_msi_assignment(&pci_priv->pci_dev->dev,
6848 MHI_MSI_NAME, &num_vectors,
6849 &user_base_data, &base_vector);
6850 if (ret)
6851 return ret;
6852
6853 if (cnss_pci_is_one_msi(pci_priv)) {
6854 is_one_msi = true;
6855 num_vectors = cnss_pci_get_one_msi_mhi_irq_array_size(pci_priv);
6856 }
6857 cnss_pr_dbg("Number of assigned MSI for MHI is %d, base vector is %d\n",
6858 num_vectors, base_vector);
6859
6860 irq = kcalloc(num_vectors, sizeof(int), GFP_KERNEL);
6861 if (!irq)
6862 return -ENOMEM;
6863
6864 for (i = 0; i < num_vectors; i++) {
6865 msi_data = base_vector;
6866 if (!is_one_msi)
6867 msi_data += i;
6868 irq[i] = cnss_get_msi_irq(&pci_priv->pci_dev->dev, msi_data);
6869 }
6870
6871 pci_priv->mhi_ctrl->irq = irq;
6872 pci_priv->mhi_ctrl->nr_irqs = num_vectors;
6873
6874 return 0;
6875 }
6876
cnss_mhi_bw_scale(struct mhi_controller * mhi_ctrl,struct mhi_link_info * link_info)6877 static int cnss_mhi_bw_scale(struct mhi_controller *mhi_ctrl,
6878 struct mhi_link_info *link_info)
6879 {
6880 struct cnss_pci_data *pci_priv = dev_get_drvdata(mhi_ctrl->cntrl_dev);
6881 struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
6882 int ret = 0;
6883
6884 cnss_pr_dbg("Setting link speed:0x%x, width:0x%x\n",
6885 link_info->target_link_speed,
6886 link_info->target_link_width);
6887
6888 /* It has to set target link speed here before setting link bandwidth
6889 * when device requests link speed change. This can avoid setting link
6890 * bandwidth getting rejected if requested link speed is higher than
6891 * current one.
6892 */
6893 ret = cnss_pci_set_max_link_speed(pci_priv, plat_priv->rc_num,
6894 link_info->target_link_speed);
6895 if (ret)
6896 cnss_pr_err("Failed to set target link speed to 0x%x, err = %d\n",
6897 link_info->target_link_speed, ret);
6898
6899 ret = cnss_pci_set_link_bandwidth(pci_priv,
6900 link_info->target_link_speed,
6901 link_info->target_link_width);
6902
6903 if (ret) {
6904 cnss_pr_err("Failed to set link bandwidth, err = %d\n", ret);
6905 return ret;
6906 }
6907
6908 pci_priv->def_link_speed = link_info->target_link_speed;
6909 pci_priv->def_link_width = link_info->target_link_width;
6910
6911 return 0;
6912 }
6913
cnss_mhi_read_reg(struct mhi_controller * mhi_ctrl,void __iomem * addr,u32 * out)6914 static int cnss_mhi_read_reg(struct mhi_controller *mhi_ctrl,
6915 void __iomem *addr, u32 *out)
6916 {
6917 struct cnss_pci_data *pci_priv = dev_get_drvdata(mhi_ctrl->cntrl_dev);
6918
6919 u32 tmp = readl_relaxed(addr);
6920
6921 /* Unexpected value, query the link status */
6922 if (PCI_INVALID_READ(tmp) &&
6923 cnss_pci_check_link_status(pci_priv))
6924 return -EIO;
6925
6926 *out = tmp;
6927
6928 return 0;
6929 }
6930
cnss_mhi_write_reg(struct mhi_controller * mhi_ctrl,void __iomem * addr,u32 val)6931 static void cnss_mhi_write_reg(struct mhi_controller *mhi_ctrl,
6932 void __iomem *addr, u32 val)
6933 {
6934 writel_relaxed(val, addr);
6935 }
6936
6937 #if IS_ENABLED(CONFIG_MHI_BUS_MISC)
6938 /**
6939 * __cnss_get_mhi_soc_info - Get SoC info before registering mhi controller
6940 * @mhi_ctrl: MHI controller
6941 *
6942 * Return: 0 for success, error code on failure
6943 */
__cnss_get_mhi_soc_info(struct mhi_controller * mhi_ctrl)6944 static inline int __cnss_get_mhi_soc_info(struct mhi_controller *mhi_ctrl)
6945 {
6946 return mhi_get_soc_info(mhi_ctrl);
6947 }
6948 #else
6949 #define SOC_HW_VERSION_OFFS (0x224)
6950 #define SOC_HW_VERSION_FAM_NUM_BMSK (0xF0000000)
6951 #define SOC_HW_VERSION_FAM_NUM_SHFT (28)
6952 #define SOC_HW_VERSION_DEV_NUM_BMSK (0x0FFF0000)
6953 #define SOC_HW_VERSION_DEV_NUM_SHFT (16)
6954 #define SOC_HW_VERSION_MAJOR_VER_BMSK (0x0000FF00)
6955 #define SOC_HW_VERSION_MAJOR_VER_SHFT (8)
6956 #define SOC_HW_VERSION_MINOR_VER_BMSK (0x000000FF)
6957 #define SOC_HW_VERSION_MINOR_VER_SHFT (0)
6958
__cnss_get_mhi_soc_info(struct mhi_controller * mhi_ctrl)6959 static int __cnss_get_mhi_soc_info(struct mhi_controller *mhi_ctrl)
6960 {
6961 u32 soc_info;
6962 int ret;
6963
6964 ret = mhi_ctrl->read_reg(mhi_ctrl,
6965 mhi_ctrl->regs + SOC_HW_VERSION_OFFS,
6966 &soc_info);
6967 if (ret)
6968 return ret;
6969
6970 mhi_ctrl->family_number = (soc_info & SOC_HW_VERSION_FAM_NUM_BMSK) >>
6971 SOC_HW_VERSION_FAM_NUM_SHFT;
6972 mhi_ctrl->device_number = (soc_info & SOC_HW_VERSION_DEV_NUM_BMSK) >>
6973 SOC_HW_VERSION_DEV_NUM_SHFT;
6974 mhi_ctrl->major_version = (soc_info & SOC_HW_VERSION_MAJOR_VER_BMSK) >>
6975 SOC_HW_VERSION_MAJOR_VER_SHFT;
6976 mhi_ctrl->minor_version = (soc_info & SOC_HW_VERSION_MINOR_VER_BMSK) >>
6977 SOC_HW_VERSION_MINOR_VER_SHFT;
6978 return 0;
6979 }
6980 #endif
6981
cnss_get_mhi_soc_info(struct cnss_plat_data * plat_priv,struct mhi_controller * mhi_ctrl)6982 static int cnss_get_mhi_soc_info(struct cnss_plat_data *plat_priv,
6983 struct mhi_controller *mhi_ctrl)
6984 {
6985 int ret = 0;
6986
6987 ret = __cnss_get_mhi_soc_info(mhi_ctrl);
6988 if (ret) {
6989 cnss_pr_err("failed to get mhi soc info, ret %d\n", ret);
6990 goto exit;
6991 }
6992
6993 plat_priv->device_version.family_number = mhi_ctrl->family_number;
6994 plat_priv->device_version.device_number = mhi_ctrl->device_number;
6995 plat_priv->device_version.major_version = mhi_ctrl->major_version;
6996 plat_priv->device_version.minor_version = mhi_ctrl->minor_version;
6997
6998 cnss_pr_dbg("Get device version info, family number: 0x%x, device number: 0x%x, major version: 0x%x, minor version: 0x%x\n",
6999 plat_priv->device_version.family_number,
7000 plat_priv->device_version.device_number,
7001 plat_priv->device_version.major_version,
7002 plat_priv->device_version.minor_version);
7003
7004 /* Only keep lower 4 bits as real device major version */
7005 plat_priv->device_version.major_version &= DEVICE_MAJOR_VERSION_MASK;
7006
7007 exit:
7008 return ret;
7009 }
7010
cnss_is_tme_supported(struct cnss_pci_data * pci_priv)7011 static bool cnss_is_tme_supported(struct cnss_pci_data *pci_priv)
7012 {
7013 if (!pci_priv) {
7014 cnss_pr_dbg("pci_priv is NULL");
7015 return false;
7016 }
7017
7018 switch (pci_priv->device_id) {
7019 case PEACH_DEVICE_ID:
7020 return true;
7021 default:
7022 return false;
7023 }
7024 }
7025
cnss_pci_register_mhi(struct cnss_pci_data * pci_priv)7026 static int cnss_pci_register_mhi(struct cnss_pci_data *pci_priv)
7027 {
7028 int ret = 0;
7029 struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
7030 struct pci_dev *pci_dev = pci_priv->pci_dev;
7031 struct mhi_controller *mhi_ctrl;
7032 phys_addr_t bar_start;
7033 const struct mhi_controller_config *cnss_mhi_config =
7034 &cnss_mhi_config_default;
7035
7036 ret = cnss_qmi_init(plat_priv);
7037 if (ret)
7038 return -EINVAL;
7039
7040 if (pci_priv->device_id == QCA6174_DEVICE_ID)
7041 return 0;
7042
7043 mhi_ctrl = mhi_alloc_controller();
7044 if (!mhi_ctrl) {
7045 cnss_pr_err("Invalid MHI controller context\n");
7046 return -EINVAL;
7047 }
7048
7049 pci_priv->mhi_ctrl = mhi_ctrl;
7050 mhi_ctrl->cntrl_dev = &pci_dev->dev;
7051
7052 mhi_ctrl->fw_image = plat_priv->firmware_name;
7053 #if IS_ENABLED(CONFIG_MHI_BUS_MISC) && \
7054 (LINUX_VERSION_CODE < KERNEL_VERSION(6, 2, 0))
7055 mhi_ctrl->fallback_fw_image = plat_priv->fw_fallback_name;
7056 #endif
7057
7058 mhi_ctrl->regs = pci_priv->bar;
7059 mhi_ctrl->reg_len = pci_resource_len(pci_priv->pci_dev, PCI_BAR_NUM);
7060 bar_start = pci_resource_start(pci_priv->pci_dev, PCI_BAR_NUM);
7061 cnss_pr_dbg("BAR starts at %pa, length is %x\n",
7062 &bar_start, mhi_ctrl->reg_len);
7063
7064 ret = cnss_pci_get_mhi_msi(pci_priv);
7065 if (ret) {
7066 cnss_pr_err("Failed to get MSI for MHI, err = %d\n", ret);
7067 goto free_mhi_ctrl;
7068 }
7069
7070 if (cnss_pci_is_one_msi(pci_priv))
7071 mhi_ctrl->irq_flags = IRQF_SHARED | IRQF_NOBALANCING;
7072
7073 if (pci_priv->smmu_s1_enable) {
7074 mhi_ctrl->iova_start = pci_priv->smmu_iova_start;
7075 mhi_ctrl->iova_stop = pci_priv->smmu_iova_start +
7076 pci_priv->smmu_iova_len;
7077 } else {
7078 mhi_ctrl->iova_start = 0;
7079 mhi_ctrl->iova_stop = pci_priv->dma_bit_mask;
7080 }
7081
7082 mhi_ctrl->status_cb = cnss_mhi_notify_status;
7083 mhi_ctrl->runtime_get = cnss_mhi_pm_runtime_get;
7084 mhi_ctrl->runtime_put = cnss_mhi_pm_runtime_put_noidle;
7085 mhi_ctrl->read_reg = cnss_mhi_read_reg;
7086 mhi_ctrl->write_reg = cnss_mhi_write_reg;
7087
7088 mhi_ctrl->rddm_size = pci_priv->plat_priv->ramdump_info_v2.ramdump_size;
7089 if (!mhi_ctrl->rddm_size)
7090 mhi_ctrl->rddm_size = RAMDUMP_SIZE_DEFAULT;
7091
7092 if (plat_priv->device_id == QCN7605_DEVICE_ID)
7093 mhi_ctrl->sbl_size = SZ_256K;
7094 else
7095 mhi_ctrl->sbl_size = SZ_512K;
7096
7097 mhi_ctrl->seg_len = SZ_512K;
7098 mhi_ctrl->fbc_download = true;
7099
7100 ret = cnss_get_mhi_soc_info(plat_priv, mhi_ctrl);
7101 if (ret)
7102 goto free_mhi_irq;
7103
7104 /* Satellite config only supported on KIWI V2 and later chipset */
7105 if (plat_priv->device_id <= QCA6490_DEVICE_ID ||
7106 (plat_priv->device_id == KIWI_DEVICE_ID &&
7107 plat_priv->device_version.major_version == 1)) {
7108 if (plat_priv->device_id == QCN7605_DEVICE_ID)
7109 cnss_mhi_config = &cnss_mhi_config_genoa;
7110 else
7111 cnss_mhi_config = &cnss_mhi_config_no_satellite;
7112 }
7113
7114 /* DIAG no longer supported on PEACH and later chipset */
7115 if (plat_priv->device_id >= PEACH_DEVICE_ID) {
7116 cnss_mhi_config = &cnss_mhi_config_no_diag;
7117 }
7118
7119 mhi_ctrl->tme_supported_image = cnss_is_tme_supported(pci_priv);
7120
7121 ret = mhi_register_controller(mhi_ctrl, cnss_mhi_config);
7122 if (ret) {
7123 cnss_pr_err("Failed to register to MHI bus, err = %d\n", ret);
7124 goto free_mhi_irq;
7125 }
7126
7127 /* MHI satellite driver only needs to connect when DRV is supported */
7128 if (cnss_pci_get_drv_supported(pci_priv))
7129 cnss_mhi_controller_set_base(pci_priv, bar_start);
7130
7131 cnss_get_bwscal_info(plat_priv);
7132 cnss_pr_dbg("no_bwscale: %d\n", plat_priv->no_bwscale);
7133
7134 /* BW scale CB needs to be set after registering MHI per requirement */
7135 if (!plat_priv->no_bwscale)
7136 cnss_mhi_controller_set_bw_scale_cb(pci_priv,
7137 cnss_mhi_bw_scale);
7138
7139 ret = cnss_pci_update_fw_name(pci_priv);
7140 if (ret)
7141 goto unreg_mhi;
7142
7143 return 0;
7144
7145 unreg_mhi:
7146 mhi_unregister_controller(mhi_ctrl);
7147 free_mhi_irq:
7148 kfree(mhi_ctrl->irq);
7149 free_mhi_ctrl:
7150 mhi_free_controller(mhi_ctrl);
7151
7152 return ret;
7153 }
7154
cnss_pci_unregister_mhi(struct cnss_pci_data * pci_priv)7155 static void cnss_pci_unregister_mhi(struct cnss_pci_data *pci_priv)
7156 {
7157 struct mhi_controller *mhi_ctrl = pci_priv->mhi_ctrl;
7158
7159 if (pci_priv->device_id == QCA6174_DEVICE_ID)
7160 return;
7161
7162 mhi_unregister_controller(mhi_ctrl);
7163 kfree(mhi_ctrl->irq);
7164 mhi_ctrl->irq = NULL;
7165 mhi_free_controller(mhi_ctrl);
7166 pci_priv->mhi_ctrl = NULL;
7167 }
7168
cnss_pci_config_regs(struct cnss_pci_data * pci_priv)7169 static void cnss_pci_config_regs(struct cnss_pci_data *pci_priv)
7170 {
7171 switch (pci_priv->device_id) {
7172 case QCA6390_DEVICE_ID:
7173 pci_priv->misc_reg_dev_mask = REG_MASK_QCA6390;
7174 pci_priv->wcss_reg = wcss_reg_access_seq;
7175 pci_priv->pcie_reg = pcie_reg_access_seq;
7176 pci_priv->wlaon_reg = wlaon_reg_access_seq;
7177 pci_priv->syspm_reg = syspm_reg_access_seq;
7178
7179 /* Configure WDOG register with specific value so that we can
7180 * know if HW is in the process of WDOG reset recovery or not
7181 * when reading the registers.
7182 */
7183 cnss_pci_reg_write
7184 (pci_priv,
7185 QCA6390_PCIE_SOC_WDOG_DISC_BAD_DATA_LOW_CFG_SOC_PCIE_REG,
7186 QCA6390_PCIE_SOC_WDOG_DISC_BAD_DATA_LOW_CFG_SOC_PCIE_REG_VAL);
7187 break;
7188 case QCA6490_DEVICE_ID:
7189 pci_priv->misc_reg_dev_mask = REG_MASK_QCA6490;
7190 pci_priv->wlaon_reg = wlaon_reg_access_seq;
7191 break;
7192 default:
7193 return;
7194 }
7195 }
7196
7197 #if !IS_ENABLED(CONFIG_ARCH_QCOM)
cnss_pci_of_reserved_mem_device_init(struct cnss_pci_data * pci_priv)7198 static int cnss_pci_of_reserved_mem_device_init(struct cnss_pci_data *pci_priv)
7199 {
7200 return 0;
7201 }
7202
cnss_pci_wake_handler(int irq,void * data)7203 static irqreturn_t cnss_pci_wake_handler(int irq, void *data)
7204 {
7205 struct cnss_pci_data *pci_priv = data;
7206 struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
7207 enum rpm_status status;
7208 struct device *dev;
7209
7210 pci_priv->wake_counter++;
7211 cnss_pr_dbg("WLAN PCI wake IRQ (%u) is asserted #%u\n",
7212 pci_priv->wake_irq, pci_priv->wake_counter);
7213
7214 /* Make sure abort current suspend */
7215 cnss_pm_stay_awake(plat_priv);
7216 cnss_pm_relax(plat_priv);
7217 /* Above two pm* API calls will abort system suspend only when
7218 * plat_dev->dev->ws is initiated by device_init_wakeup() API, and
7219 * calling pm_system_wakeup() is just to guarantee system suspend
7220 * can be aborted if it is not initiated in any case.
7221 */
7222 pm_system_wakeup();
7223
7224 dev = &pci_priv->pci_dev->dev;
7225 status = dev->power.runtime_status;
7226
7227 if ((cnss_pci_get_monitor_wake_intr(pci_priv) &&
7228 cnss_pci_get_auto_suspended(pci_priv)) ||
7229 (status == RPM_SUSPENDING || status == RPM_SUSPENDED)) {
7230 cnss_pci_set_monitor_wake_intr(pci_priv, false);
7231 cnss_pci_pm_request_resume(pci_priv);
7232 }
7233
7234 return IRQ_HANDLED;
7235 }
7236
7237 /**
7238 * cnss_pci_wake_gpio_init() - Setup PCI wake GPIO for WLAN
7239 * @pci_priv: driver PCI bus context pointer
7240 *
7241 * This function initializes WLAN PCI wake GPIO and corresponding
7242 * interrupt. It should be used in non-MSM platforms whose PCIe
7243 * root complex driver doesn't handle the GPIO.
7244 *
7245 * Return: 0 for success or skip, negative value for error
7246 */
cnss_pci_wake_gpio_init(struct cnss_pci_data * pci_priv)7247 static int cnss_pci_wake_gpio_init(struct cnss_pci_data *pci_priv)
7248 {
7249 struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
7250 struct device *dev = &plat_priv->plat_dev->dev;
7251 int ret = 0;
7252
7253 pci_priv->wake_gpio = of_get_named_gpio(dev->of_node,
7254 "wlan-pci-wake-gpio", 0);
7255 if (pci_priv->wake_gpio < 0)
7256 goto out;
7257
7258 cnss_pr_dbg("Get PCI wake GPIO (%d) from device node\n",
7259 pci_priv->wake_gpio);
7260
7261 ret = gpio_request(pci_priv->wake_gpio, "wlan_pci_wake_gpio");
7262 if (ret) {
7263 cnss_pr_err("Failed to request PCI wake GPIO, err = %d\n",
7264 ret);
7265 goto out;
7266 }
7267
7268 gpio_direction_input(pci_priv->wake_gpio);
7269 pci_priv->wake_irq = gpio_to_irq(pci_priv->wake_gpio);
7270
7271 ret = request_irq(pci_priv->wake_irq, cnss_pci_wake_handler,
7272 IRQF_TRIGGER_FALLING, "wlan_pci_wake_irq", pci_priv);
7273 if (ret) {
7274 cnss_pr_err("Failed to request PCI wake IRQ, err = %d\n", ret);
7275 goto free_gpio;
7276 }
7277
7278 ret = enable_irq_wake(pci_priv->wake_irq);
7279 if (ret) {
7280 cnss_pr_err("Failed to enable PCI wake IRQ, err = %d\n", ret);
7281 goto free_irq;
7282 }
7283
7284 return 0;
7285
7286 free_irq:
7287 free_irq(pci_priv->wake_irq, pci_priv);
7288 free_gpio:
7289 gpio_free(pci_priv->wake_gpio);
7290 out:
7291 return ret;
7292 }
7293
cnss_pci_wake_gpio_deinit(struct cnss_pci_data * pci_priv)7294 static void cnss_pci_wake_gpio_deinit(struct cnss_pci_data *pci_priv)
7295 {
7296 if (pci_priv->wake_gpio < 0)
7297 return;
7298
7299 disable_irq_wake(pci_priv->wake_irq);
7300 free_irq(pci_priv->wake_irq, pci_priv);
7301 gpio_free(pci_priv->wake_gpio);
7302 }
7303 #endif
7304
7305 #ifdef CONFIG_CNSS_SUPPORT_DUAL_DEV
cnss_try_suspend(struct cnss_plat_data * plat_priv)7306 static int cnss_try_suspend(struct cnss_plat_data *plat_priv)
7307 {
7308 int ret = 0;
7309
7310 /* in the dual wlan card case, if call pci_register_driver after
7311 * finishing the first pcie device enumeration, it will cause
7312 * the cnss_pci_probe called in advance with the second wlan card,
7313 * and the sequence like this:
7314 * enter msm_pcie_enumerate -> pci_bus_add_devices -> cnss_pci_probe
7315 * -> exit msm_pcie_enumerate.
7316 * But the correct sequence we expected is like this:
7317 * enter msm_pcie_enumerate -> pci_bus_add_devices ->
7318 * exit msm_pcie_enumerate -> cnss_pci_probe.
7319 * And this unexpected sequence will make the second wlan card do
7320 * pcie link suspend while the pcie enumeration not finished.
7321 * So need to add below logical to avoid doing pcie link suspend
7322 * if the enumeration has not finish.
7323 */
7324 plat_priv->enumerate_done = true;
7325
7326 /* Now enumeration is finished, try to suspend PCIe link */
7327 if (plat_priv->bus_priv) {
7328 struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
7329 struct pci_dev *pci_dev = pci_priv->pci_dev;
7330
7331 switch (pci_dev->device) {
7332 case QCA6390_DEVICE_ID:
7333 cnss_pci_set_wlaon_pwr_ctrl(pci_priv,
7334 false,
7335 true,
7336 false);
7337
7338 cnss_pci_suspend_pwroff(pci_dev);
7339 break;
7340 default:
7341 cnss_pr_err("Unknown PCI device found: 0x%x\n",
7342 pci_dev->device);
7343 ret = -ENODEV;
7344 }
7345 }
7346
7347 return ret;
7348 }
7349 #else
cnss_try_suspend(struct cnss_plat_data * plat_priv)7350 static int cnss_try_suspend(struct cnss_plat_data *plat_priv)
7351 {
7352 return 0;
7353 }
7354 #endif
7355
7356 /* Setting to use this cnss_pm_domain ops will let PM framework override the
7357 * ops from dev->bus->pm which is pci_dev_pm_ops from pci-driver.c. This ops
7358 * has to take care everything device driver needed which is currently done
7359 * from pci_dev_pm_ops.
7360 */
7361 static struct dev_pm_domain cnss_pm_domain = {
7362 .ops = {
7363 SET_SYSTEM_SLEEP_PM_OPS(cnss_pci_suspend, cnss_pci_resume)
7364 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(cnss_pci_suspend_noirq,
7365 cnss_pci_resume_noirq)
7366 SET_RUNTIME_PM_OPS(cnss_pci_runtime_suspend,
7367 cnss_pci_runtime_resume,
7368 cnss_pci_runtime_idle)
7369 }
7370 };
7371
cnss_pci_get_dev_cfg_node(struct cnss_plat_data * plat_priv)7372 static int cnss_pci_get_dev_cfg_node(struct cnss_plat_data *plat_priv)
7373 {
7374 struct device_node *child;
7375 u32 id, i;
7376 int id_n, ret;
7377
7378 if (plat_priv->dt_type != CNSS_DTT_MULTIEXCHG)
7379 return 0;
7380
7381 if (!plat_priv->device_id) {
7382 cnss_pr_err("Invalid device id\n");
7383 return -EINVAL;
7384 }
7385
7386 for_each_available_child_of_node(plat_priv->plat_dev->dev.of_node,
7387 child) {
7388 if (strcmp(child->name, "chip_cfg"))
7389 continue;
7390
7391 id_n = of_property_count_u32_elems(child, "supported-ids");
7392 if (id_n <= 0) {
7393 cnss_pr_err("Device id is NOT set\n");
7394 return -EINVAL;
7395 }
7396
7397 for (i = 0; i < id_n; i++) {
7398 ret = of_property_read_u32_index(child,
7399 "supported-ids",
7400 i, &id);
7401 if (ret) {
7402 cnss_pr_err("Failed to read supported ids\n");
7403 return -EINVAL;
7404 }
7405
7406 if (id == plat_priv->device_id) {
7407 plat_priv->dev_node = child;
7408 cnss_pr_dbg("got node[%s@%d] for device[0x%x]\n",
7409 child->name, i, id);
7410 return 0;
7411 }
7412 }
7413 }
7414
7415 return -EINVAL;
7416 }
7417
7418 #ifdef CONFIG_CNSS2_CONDITIONAL_POWEROFF
cnss_should_suspend_pwroff(struct pci_dev * pci_dev)7419 static bool cnss_should_suspend_pwroff(struct pci_dev *pci_dev)
7420 {
7421 bool suspend_pwroff;
7422
7423 switch (pci_dev->device) {
7424 case QCA6390_DEVICE_ID:
7425 case QCA6490_DEVICE_ID:
7426 suspend_pwroff = false;
7427 break;
7428 default:
7429 suspend_pwroff = true;
7430 }
7431
7432 return suspend_pwroff;
7433 }
7434 #else
cnss_should_suspend_pwroff(struct pci_dev * pci_dev)7435 static bool cnss_should_suspend_pwroff(struct pci_dev *pci_dev)
7436 {
7437 return true;
7438 }
7439 #endif
7440
7441 #ifdef CONFIG_CNSS2_ENUM_WITH_LOW_SPEED
7442 static void
cnss_pci_downgrade_rc_speed(struct cnss_plat_data * plat_priv,u32 rc_num)7443 cnss_pci_downgrade_rc_speed(struct cnss_plat_data *plat_priv, u32 rc_num)
7444 {
7445 int ret;
7446
7447 ret = cnss_pci_set_max_link_speed(plat_priv->bus_priv, rc_num,
7448 PCI_EXP_LNKSTA_CLS_2_5GB);
7449 if (ret)
7450 cnss_pr_err("Failed to set max PCIe RC%x link speed to Gen1, err = %d\n",
7451 rc_num, ret);
7452 }
7453
7454 static void
cnss_pci_restore_rc_speed(struct cnss_pci_data * pci_priv)7455 cnss_pci_restore_rc_speed(struct cnss_pci_data *pci_priv)
7456 {
7457 int ret;
7458 u16 link_speed;
7459 struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
7460
7461 switch (pci_priv->device_id) {
7462 case QCN7605_DEVICE_ID:
7463 /* do nothing, keep Gen1*/
7464 return;
7465 case QCA6490_DEVICE_ID:
7466 /* restore to Gen2 */
7467 link_speed = PCI_EXP_LNKSTA_CLS_5_0GB;
7468 break;
7469 default:
7470 /* The request 0 will reset maximum GEN speed to default */
7471 link_speed = 0;
7472 break;
7473 }
7474
7475 ret = cnss_pci_set_max_link_speed(pci_priv, plat_priv->rc_num, link_speed);
7476 if (ret)
7477 cnss_pr_err("Failed to set max PCIe RC%x link speed to %d, err = %d\n",
7478 plat_priv->rc_num, link_speed, ret);
7479 }
7480
7481 static void
cnss_pci_link_retrain_trigger(struct cnss_pci_data * pci_priv)7482 cnss_pci_link_retrain_trigger(struct cnss_pci_data *pci_priv)
7483 {
7484 int ret;
7485
7486 /* suspend/resume will trigger retain to re-establish link speed */
7487 ret = cnss_suspend_pci_link(pci_priv);
7488 if (ret)
7489 cnss_pr_err("Failed to suspend PCI link, err = %d\n", ret);
7490
7491 ret = cnss_resume_pci_link(pci_priv);
7492 if (ret)
7493 cnss_pr_err("Failed to resume PCI link, err = %d\n", ret);
7494
7495 cnss_pci_get_link_status(pci_priv);
7496 }
7497 #else
7498 static void
cnss_pci_downgrade_rc_speed(struct cnss_plat_data * plat_priv,u32 rc_num)7499 cnss_pci_downgrade_rc_speed(struct cnss_plat_data *plat_priv, u32 rc_num)
7500 {
7501 }
7502
7503 static void
cnss_pci_restore_rc_speed(struct cnss_pci_data * pci_priv)7504 cnss_pci_restore_rc_speed(struct cnss_pci_data *pci_priv)
7505 {
7506 }
7507
7508 static void
cnss_pci_link_retrain_trigger(struct cnss_pci_data * pci_priv)7509 cnss_pci_link_retrain_trigger(struct cnss_pci_data *pci_priv)
7510 {
7511 }
7512 #endif
7513
cnss_pci_suspend_pwroff(struct pci_dev * pci_dev)7514 static void cnss_pci_suspend_pwroff(struct pci_dev *pci_dev)
7515 {
7516 struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
7517 int rc_num = pci_dev->bus->domain_nr;
7518 struct cnss_plat_data *plat_priv;
7519 int ret = 0;
7520 bool suspend_pwroff = cnss_should_suspend_pwroff(pci_dev);
7521
7522 plat_priv = cnss_get_plat_priv_by_rc_num(rc_num);
7523
7524 if (suspend_pwroff) {
7525 ret = cnss_suspend_pci_link(pci_priv);
7526 if (ret)
7527 cnss_pr_err("Failed to suspend PCI link, err = %d\n",
7528 ret);
7529 cnss_power_off_device(plat_priv);
7530 } else {
7531 cnss_pr_dbg("bus suspend and dev power off disabled for device [0x%x]\n",
7532 pci_dev->device);
7533 cnss_pci_link_retrain_trigger(pci_priv);
7534 }
7535 }
7536
cnss_pci_probe(struct pci_dev * pci_dev,const struct pci_device_id * id)7537 static int cnss_pci_probe(struct pci_dev *pci_dev,
7538 const struct pci_device_id *id)
7539 {
7540 int ret = 0;
7541 struct cnss_pci_data *pci_priv;
7542 struct device *dev = &pci_dev->dev;
7543 int rc_num = pci_dev->bus->domain_nr;
7544 struct cnss_plat_data *plat_priv = cnss_get_plat_priv_by_rc_num(rc_num);
7545
7546 cnss_pr_dbg("PCI is probing, vendor ID: 0x%x, device ID: 0x%x rc_num %d\n",
7547 id->vendor, pci_dev->device, rc_num);
7548 if (!plat_priv) {
7549 cnss_pr_err("Find match plat_priv with rc number failure\n");
7550 ret = -ENODEV;
7551 goto out;
7552 }
7553
7554 pci_priv = devm_kzalloc(dev, sizeof(*pci_priv), GFP_KERNEL);
7555 if (!pci_priv) {
7556 ret = -ENOMEM;
7557 goto out;
7558 }
7559
7560 pci_priv->pci_link_state = PCI_LINK_UP;
7561 pci_priv->plat_priv = plat_priv;
7562 pci_priv->pci_dev = pci_dev;
7563 pci_priv->pci_device_id = id;
7564 pci_priv->device_id = pci_dev->device;
7565 cnss_set_pci_priv(pci_dev, pci_priv);
7566 plat_priv->device_id = pci_dev->device;
7567 plat_priv->bus_priv = pci_priv;
7568 mutex_init(&pci_priv->bus_lock);
7569 if (plat_priv->use_pm_domain)
7570 dev->pm_domain = &cnss_pm_domain;
7571
7572 cnss_pci_restore_rc_speed(pci_priv);
7573
7574 ret = cnss_pci_get_dev_cfg_node(plat_priv);
7575 if (ret) {
7576 cnss_pr_err("Failed to get device cfg node, err = %d\n", ret);
7577 goto reset_ctx;
7578 }
7579
7580 cnss_get_sleep_clk_supported(plat_priv);
7581
7582 ret = cnss_dev_specific_power_on(plat_priv);
7583 if (ret < 0)
7584 goto reset_ctx;
7585
7586 cnss_pci_of_reserved_mem_device_init(pci_priv);
7587
7588 ret = cnss_register_subsys(plat_priv);
7589 if (ret)
7590 goto reset_ctx;
7591
7592 ret = cnss_register_ramdump(plat_priv);
7593 if (ret)
7594 goto unregister_subsys;
7595
7596 ret = cnss_pci_init_smmu(pci_priv);
7597 if (ret)
7598 goto unregister_ramdump;
7599
7600 /* update drv support flag */
7601 cnss_pci_update_drv_supported(pci_priv);
7602
7603 cnss_update_supported_link_info(pci_priv);
7604
7605 init_completion(&pci_priv->wake_event_complete);
7606
7607 ret = cnss_reg_pci_event(pci_priv);
7608 if (ret) {
7609 cnss_pr_err("Failed to register PCI event, err = %d\n", ret);
7610 goto deinit_smmu;
7611 }
7612
7613 ret = cnss_pci_enable_bus(pci_priv);
7614 if (ret)
7615 goto dereg_pci_event;
7616
7617 ret = cnss_pci_enable_msi(pci_priv);
7618 if (ret)
7619 goto disable_bus;
7620
7621 ret = cnss_pci_register_mhi(pci_priv);
7622 if (ret)
7623 goto disable_msi;
7624
7625 switch (pci_dev->device) {
7626 case QCA6174_DEVICE_ID:
7627 pci_read_config_word(pci_dev, QCA6174_REV_ID_OFFSET,
7628 &pci_priv->revision_id);
7629 break;
7630 case QCA6290_DEVICE_ID:
7631 case QCA6390_DEVICE_ID:
7632 case QCN7605_DEVICE_ID:
7633 case QCA6490_DEVICE_ID:
7634 case KIWI_DEVICE_ID:
7635 case MANGO_DEVICE_ID:
7636 case PEACH_DEVICE_ID:
7637 if ((cnss_is_dual_wlan_enabled() &&
7638 plat_priv->enumerate_done) || !cnss_is_dual_wlan_enabled())
7639 cnss_pci_set_wlaon_pwr_ctrl(pci_priv, false, false,
7640 false);
7641
7642 timer_setup(&pci_priv->dev_rddm_timer,
7643 cnss_dev_rddm_timeout_hdlr, 0);
7644 timer_setup(&pci_priv->boot_debug_timer,
7645 cnss_boot_debug_timeout_hdlr, 0);
7646 INIT_DELAYED_WORK(&pci_priv->time_sync_work,
7647 cnss_pci_time_sync_work_hdlr);
7648 cnss_pci_get_link_status(pci_priv);
7649 cnss_pci_set_wlaon_pwr_ctrl(pci_priv, false, true, false);
7650 cnss_pci_wake_gpio_init(pci_priv);
7651 break;
7652 default:
7653 cnss_pr_err("Unknown PCI device found: 0x%x\n",
7654 pci_dev->device);
7655 ret = -ENODEV;
7656 goto unreg_mhi;
7657 }
7658
7659 cnss_pci_config_regs(pci_priv);
7660 if (EMULATION_HW)
7661 goto out;
7662 if (cnss_is_dual_wlan_enabled() && !plat_priv->enumerate_done)
7663 goto probe_done;
7664 cnss_pci_suspend_pwroff(pci_dev);
7665
7666 probe_done:
7667 set_bit(CNSS_PCI_PROBE_DONE, &plat_priv->driver_state);
7668
7669 return 0;
7670
7671 unreg_mhi:
7672 cnss_pci_unregister_mhi(pci_priv);
7673 disable_msi:
7674 cnss_pci_disable_msi(pci_priv);
7675 disable_bus:
7676 cnss_pci_disable_bus(pci_priv);
7677 dereg_pci_event:
7678 cnss_dereg_pci_event(pci_priv);
7679 deinit_smmu:
7680 cnss_pci_deinit_smmu(pci_priv);
7681 unregister_ramdump:
7682 cnss_unregister_ramdump(plat_priv);
7683 unregister_subsys:
7684 cnss_unregister_subsys(plat_priv);
7685 reset_ctx:
7686 plat_priv->bus_priv = NULL;
7687 out:
7688 return ret;
7689 }
7690
cnss_pci_remove(struct pci_dev * pci_dev)7691 static void cnss_pci_remove(struct pci_dev *pci_dev)
7692 {
7693 struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
7694 struct cnss_plat_data *plat_priv =
7695 cnss_bus_dev_to_plat_priv(&pci_dev->dev);
7696
7697 clear_bit(CNSS_PCI_PROBE_DONE, &plat_priv->driver_state);
7698 cnss_pci_unregister_driver_hdlr(pci_priv);
7699 cnss_pci_free_aux_mem(pci_priv);
7700 cnss_pci_free_tme_lite_mem(pci_priv);
7701 cnss_pci_free_tme_opt_file_mem(pci_priv);
7702 cnss_pci_free_m3_mem(pci_priv);
7703 cnss_pci_free_fw_mem(pci_priv);
7704 cnss_pci_free_qdss_mem(pci_priv);
7705
7706 switch (pci_dev->device) {
7707 case QCA6290_DEVICE_ID:
7708 case QCA6390_DEVICE_ID:
7709 case QCN7605_DEVICE_ID:
7710 case QCA6490_DEVICE_ID:
7711 case KIWI_DEVICE_ID:
7712 case MANGO_DEVICE_ID:
7713 case PEACH_DEVICE_ID:
7714 cnss_pci_wake_gpio_deinit(pci_priv);
7715 del_timer(&pci_priv->boot_debug_timer);
7716 del_timer(&pci_priv->dev_rddm_timer);
7717 break;
7718 default:
7719 break;
7720 }
7721
7722 cnss_pci_unregister_mhi(pci_priv);
7723 cnss_pci_disable_msi(pci_priv);
7724 cnss_pci_disable_bus(pci_priv);
7725 cnss_dereg_pci_event(pci_priv);
7726 cnss_pci_deinit_smmu(pci_priv);
7727 if (plat_priv) {
7728 cnss_unregister_ramdump(plat_priv);
7729 cnss_unregister_subsys(plat_priv);
7730 plat_priv->bus_priv = NULL;
7731 } else {
7732 cnss_pr_err("Plat_priv is null, Unable to unregister ramdump,subsys\n");
7733 }
7734 }
7735
7736 static const struct pci_device_id cnss_pci_id_table[] = {
7737 { QCA6174_VENDOR_ID, QCA6174_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
7738 { QCA6290_VENDOR_ID, QCA6290_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
7739 { QCA6390_VENDOR_ID, QCA6390_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
7740 { QCN7605_VENDOR_ID, QCN7605_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
7741 { QCA6490_VENDOR_ID, QCA6490_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
7742 { KIWI_VENDOR_ID, KIWI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
7743 { MANGO_VENDOR_ID, MANGO_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
7744 { PEACH_VENDOR_ID, PEACH_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
7745 { 0 }
7746 };
7747 MODULE_DEVICE_TABLE(pci, cnss_pci_id_table);
7748
7749 static const struct dev_pm_ops cnss_pm_ops = {
7750 SET_SYSTEM_SLEEP_PM_OPS(cnss_pci_suspend, cnss_pci_resume)
7751 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(cnss_pci_suspend_noirq,
7752 cnss_pci_resume_noirq)
7753 SET_RUNTIME_PM_OPS(cnss_pci_runtime_suspend, cnss_pci_runtime_resume,
7754 cnss_pci_runtime_idle)
7755 };
7756
7757 static struct pci_driver cnss_pci_driver = {
7758 .name = "cnss_pci",
7759 .id_table = cnss_pci_id_table,
7760 .probe = cnss_pci_probe,
7761 .remove = cnss_pci_remove,
7762 .driver = {
7763 .pm = &cnss_pm_ops,
7764 },
7765 };
7766
cnss_pci_enumerate(struct cnss_plat_data * plat_priv,u32 rc_num)7767 static int cnss_pci_enumerate(struct cnss_plat_data *plat_priv, u32 rc_num)
7768 {
7769 int ret, retry = 0;
7770
7771 /* Always set initial target PCIe link speed to Gen2 for QCA6490 device
7772 * since there may be link issues if it boots up with Gen3 link speed.
7773 * Device is able to change it later at any time. It will be rejected
7774 * if requested speed is higher than the one specified in PCIe DT.
7775 */
7776 if (plat_priv->device_id == QCA6490_DEVICE_ID) {
7777 ret = cnss_pci_set_max_link_speed(plat_priv->bus_priv, rc_num,
7778 PCI_EXP_LNKSTA_CLS_5_0GB);
7779 if (ret && ret != -EPROBE_DEFER)
7780 cnss_pr_err("Failed to set max PCIe RC%x link speed to Gen2, err = %d\n",
7781 rc_num, ret);
7782 } else {
7783 cnss_pci_downgrade_rc_speed(plat_priv, rc_num);
7784 }
7785
7786 cnss_pr_dbg("Trying to enumerate with PCIe RC%x\n", rc_num);
7787 retry:
7788 ret = _cnss_pci_enumerate(plat_priv, rc_num);
7789 if (ret) {
7790 if (ret == -EPROBE_DEFER) {
7791 cnss_pr_dbg("PCIe RC driver is not ready, defer probe\n");
7792 goto out;
7793 }
7794 cnss_pr_err("Failed to enable PCIe RC%x, err = %d\n",
7795 rc_num, ret);
7796 if (retry++ < LINK_TRAINING_RETRY_MAX_TIMES) {
7797 cnss_pr_dbg("Retry PCI link training #%d\n", retry);
7798 goto retry;
7799 } else {
7800 goto out;
7801 }
7802 }
7803
7804 plat_priv->rc_num = rc_num;
7805
7806 out:
7807 return ret;
7808 }
7809
cnss_pci_init(struct cnss_plat_data * plat_priv)7810 int cnss_pci_init(struct cnss_plat_data *plat_priv)
7811 {
7812 struct device *dev = &plat_priv->plat_dev->dev;
7813 const __be32 *prop;
7814 int ret = 0, prop_len = 0, rc_count, i;
7815
7816 prop = of_get_property(dev->of_node, "qcom,wlan-rc-num", &prop_len);
7817 if (!prop || !prop_len) {
7818 cnss_pr_err("Failed to get PCIe RC number from DT\n");
7819 goto out;
7820 }
7821
7822 rc_count = prop_len / sizeof(__be32);
7823 for (i = 0; i < rc_count; i++) {
7824 ret = cnss_pci_enumerate(plat_priv, be32_to_cpup(&prop[i]));
7825 if (!ret)
7826 break;
7827 else if (ret == -EPROBE_DEFER || (ret && i == rc_count - 1))
7828 goto out;
7829 }
7830
7831 ret = cnss_try_suspend(plat_priv);
7832 if (ret) {
7833 cnss_pr_err("Failed to suspend, ret: %d\n", ret);
7834 goto out;
7835 }
7836
7837 if (!cnss_driver_registered) {
7838 ret = pci_register_driver(&cnss_pci_driver);
7839 if (ret) {
7840 cnss_pr_err("Failed to register to PCI framework, err = %d\n",
7841 ret);
7842 goto out;
7843 }
7844 if (!plat_priv->bus_priv) {
7845 cnss_pr_err("Failed to probe PCI driver\n");
7846 ret = -ENODEV;
7847 goto unreg_pci;
7848 }
7849 cnss_driver_registered = true;
7850 }
7851
7852 return 0;
7853
7854 unreg_pci:
7855 pci_unregister_driver(&cnss_pci_driver);
7856 out:
7857 return ret;
7858 }
7859
cnss_pci_deinit(struct cnss_plat_data * plat_priv)7860 void cnss_pci_deinit(struct cnss_plat_data *plat_priv)
7861 {
7862 if (cnss_driver_registered) {
7863 pci_unregister_driver(&cnss_pci_driver);
7864 cnss_driver_registered = false;
7865 }
7866 }
7867