xref: /wlan-driver/qca-wifi-host-cmn/hif/src/pcie/if_pci.c (revision 5113495b16420b49004c444715d2daae2066e7dc)
1 /*
2  * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
3  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for
6  * any purpose with or without fee is hereby granted, provided that the
7  * above copyright notice and this permission notice appear in all
8  * copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17  * PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <linux/pci.h>
21 #include <linux/slab.h>
22 #include <linux/interrupt.h>
23 #include <linux/if_arp.h>
24 #include <linux/of_pci.h>
25 #include <linux/version.h>
26 #include "hif_io32.h"
27 #include "if_pci.h"
28 #include "hif.h"
29 #include "target_type.h"
30 #include "hif_main.h"
31 #include "ce_main.h"
32 #include "ce_api.h"
33 #include "ce_internal.h"
34 #include "ce_reg.h"
35 #include "ce_bmi.h"
36 #include "regtable.h"
37 #include "hif_hw_version.h"
38 #include <linux/debugfs.h>
39 #include <linux/seq_file.h>
40 #include "qdf_status.h"
41 #include "qdf_atomic.h"
42 #include "qdf_platform.h"
43 #include "pld_common.h"
44 #include "mp_dev.h"
45 #include "hif_debug.h"
46 
47 #ifdef QCA_SUPPORT_LEGACY_INTERRUPTS
48 char *legacy_ic_irqname[] = {
49 "ce0",
50 "ce1",
51 "ce2",
52 "ce3",
53 "ce4",
54 "ce5",
55 "ce6",
56 "ce7",
57 "ce8",
58 "ce9",
59 "ce10",
60 "ce11",
61 "ce12",
62 "ce13",
63 "ce14",
64 "ce15",
65 "reo2sw8_intr2",
66 "reo2sw7_intr2",
67 "reo2sw6_intr2",
68 "reo2sw5_intr2",
69 "reo2sw4_intr2",
70 "reo2sw3_intr2",
71 "reo2sw2_intr2",
72 "reo2sw1_intr2",
73 "reo2sw0_intr2",
74 "reo2sw8_intr",
75 "reo2sw7_intr",
76 "reo2sw6_inrr",
77 "reo2sw5_intr",
78 "reo2sw4_intr",
79 "reo2sw3_intr",
80 "reo2sw2_intr",
81 "reo2sw1_intr",
82 "reo2sw0_intr",
83 "reo2status_intr2",
84 "reo_status",
85 "reo2rxdma_out_2",
86 "reo2rxdma_out_1",
87 "reo_cmd",
88 "sw2reo6",
89 "sw2reo5",
90 "sw2reo1",
91 "sw2reo",
92 "rxdma2reo_mlo_0_dst_ring1",
93 "rxdma2reo_mlo_0_dst_ring0",
94 "rxdma2reo_mlo_1_dst_ring1",
95 "rxdma2reo_mlo_1_dst_ring0",
96 "rxdma2reo_dst_ring1",
97 "rxdma2reo_dst_ring0",
98 "rxdma2sw_dst_ring1",
99 "rxdma2sw_dst_ring0",
100 "rxdma2release_dst_ring1",
101 "rxdma2release_dst_ring0",
102 "sw2rxdma_2_src_ring",
103 "sw2rxdma_1_src_ring",
104 "sw2rxdma_0",
105 "wbm2sw6_release2",
106 "wbm2sw5_release2",
107 "wbm2sw4_release2",
108 "wbm2sw3_release2",
109 "wbm2sw2_release2",
110 "wbm2sw1_release2",
111 "wbm2sw0_release2",
112 "wbm2sw6_release",
113 "wbm2sw5_release",
114 "wbm2sw4_release",
115 "wbm2sw3_release",
116 "wbm2sw2_release",
117 "wbm2sw1_release",
118 "wbm2sw0_release",
119 "wbm2sw_link",
120 "wbm_error_release",
121 "sw2txmon_src_ring",
122 "sw2rxmon_src_ring",
123 "txmon2sw_p1_intr1",
124 "txmon2sw_p1_intr0",
125 "txmon2sw_p0_dest1",
126 "txmon2sw_p0_dest0",
127 "rxmon2sw_p1_intr1",
128 "rxmon2sw_p1_intr0",
129 "rxmon2sw_p0_dest1",
130 "rxmon2sw_p0_dest0",
131 "sw_release",
132 "sw2tcl_credit2",
133 "sw2tcl_credit",
134 "sw2tcl4",
135 "sw2tcl5",
136 "sw2tcl3",
137 "sw2tcl2",
138 "sw2tcl1",
139 "sw2wbm1",
140 "misc_8",
141 "misc_7",
142 "misc_6",
143 "misc_5",
144 "misc_4",
145 "misc_3",
146 "misc_2",
147 "misc_1",
148 "misc_0",
149 };
150 #endif
151 
152 #if (defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490) || \
153 	defined(QCA_WIFI_KIWI))
154 #include "hal_api.h"
155 #endif
156 
157 #include "if_pci_internal.h"
158 #include "ce_tasklet.h"
159 #include "targaddrs.h"
160 #include "hif_exec.h"
161 
162 #include "pci_api.h"
163 #include "ahb_api.h"
164 #include "wlan_cfg.h"
165 #include "qdf_hang_event_notifier.h"
166 #include "qdf_platform.h"
167 #include "qal_devnode.h"
168 #include "qdf_irq.h"
169 
170 /* Maximum ms timeout for host to wake up target */
171 #define PCIE_WAKE_TIMEOUT 1000
172 #define RAMDUMP_EVENT_TIMEOUT 2500
173 
174 /* Setting SOC_GLOBAL_RESET during driver unload causes intermittent
175  * PCIe data bus error
176  * As workaround for this issue - changing the reset sequence to
177  * use TargetCPU warm reset * instead of SOC_GLOBAL_RESET
178  */
179 #define CPU_WARM_RESET_WAR
180 #define WLAN_CFG_MAX_PCIE_GROUPS 5
181 #ifdef QCA_WIFI_QCN9224
182 #define WLAN_CFG_MAX_CE_COUNT 16
183 #else
184 #define WLAN_CFG_MAX_CE_COUNT 12
185 #endif
186 #define DP_IRQ_NAME_LEN 25
187 char dp_irqname[WLAN_CFG_MAX_PCIE_GROUPS][WLAN_CFG_INT_NUM_CONTEXTS][DP_IRQ_NAME_LEN] = {};
188 char ce_irqname[WLAN_CFG_MAX_PCIE_GROUPS][WLAN_CFG_MAX_CE_COUNT][DP_IRQ_NAME_LEN] = {};
189 
190 #ifdef QCA_SUPPORT_LEGACY_INTERRUPTS
191 #define WLAN_CFG_MAX_LEGACY_IRQ_COUNT 160
192 char dp_legacy_irqname[WLAN_CFG_MAX_PCIE_GROUPS][WLAN_CFG_MAX_LEGACY_IRQ_COUNT][DP_IRQ_NAME_LEN] = {};
193 #endif
194 
hif_get_pci_slot(struct hif_softc * scn)195 static inline int hif_get_pci_slot(struct hif_softc *scn)
196 {
197 	int pci_slot = pld_get_pci_slot(scn->qdf_dev->dev);
198 
199 	if (pci_slot < 0) {
200 		hif_err("Invalid PCI SLOT %d", pci_slot);
201 		qdf_assert_always(0);
202 		return 0;
203 	} else {
204 		return pci_slot;
205 	}
206 }
207 
208 /*
209  * Top-level interrupt handler for all PCI interrupts from a Target.
210  * When a block of MSI interrupts is allocated, this top-level handler
211  * is not used; instead, we directly call the correct sub-handler.
212  */
213 struct ce_irq_reg_table {
214 	uint32_t irq_enable;
215 	uint32_t irq_status;
216 };
217 
218 #ifndef QCA_WIFI_3_0_ADRASTEA
hif_pci_route_adrastea_interrupt(struct hif_pci_softc * sc)219 static inline void hif_pci_route_adrastea_interrupt(struct hif_pci_softc *sc)
220 {
221 }
222 #else
hif_pci_route_adrastea_interrupt(struct hif_pci_softc * sc)223 static void hif_pci_route_adrastea_interrupt(struct hif_pci_softc *sc)
224 {
225 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
226 	unsigned int target_enable0, target_enable1;
227 	unsigned int target_cause0, target_cause1;
228 
229 	target_enable0 = hif_read32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_0);
230 	target_enable1 = hif_read32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_1);
231 	target_cause0 = hif_read32_mb(sc, sc->mem + Q6_CAUSE_REGISTER_0);
232 	target_cause1 = hif_read32_mb(sc, sc->mem + Q6_CAUSE_REGISTER_1);
233 
234 	if ((target_enable0 & target_cause0) ||
235 	    (target_enable1 & target_cause1)) {
236 		hif_write32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_0, 0);
237 		hif_write32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_1, 0);
238 
239 		if (scn->notice_send)
240 			pld_intr_notify_q6(sc->dev);
241 	}
242 }
243 #endif
244 
245 
246 /**
247  * pci_dispatch_interrupt() - PCI interrupt dispatcher
248  * @scn: scn
249  *
250  * Return: N/A
251  */
pci_dispatch_interrupt(struct hif_softc * scn)252 static void pci_dispatch_interrupt(struct hif_softc *scn)
253 {
254 	uint32_t intr_summary;
255 	int id;
256 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
257 
258 	if (scn->hif_init_done != true)
259 		return;
260 
261 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
262 		return;
263 
264 	intr_summary = CE_INTERRUPT_SUMMARY(scn);
265 
266 	if (intr_summary == 0) {
267 		if ((scn->target_status != TARGET_STATUS_RESET) &&
268 			(!qdf_atomic_read(&scn->link_suspended))) {
269 
270 			hif_write32_mb(scn, scn->mem +
271 				(SOC_CORE_BASE_ADDRESS |
272 				PCIE_INTR_ENABLE_ADDRESS),
273 				HOST_GROUP0_MASK);
274 
275 			hif_read32_mb(scn, scn->mem +
276 					(SOC_CORE_BASE_ADDRESS |
277 					PCIE_INTR_ENABLE_ADDRESS));
278 		}
279 		Q_TARGET_ACCESS_END(scn);
280 		return;
281 	}
282 	Q_TARGET_ACCESS_END(scn);
283 
284 	scn->ce_irq_summary = intr_summary;
285 	for (id = 0; intr_summary && (id < scn->ce_count); id++) {
286 		if (intr_summary & (1 << id)) {
287 			intr_summary &= ~(1 << id);
288 			ce_dispatch_interrupt(id,  &hif_state->tasklets[id]);
289 		}
290 	}
291 }
292 
hif_pci_legacy_ce_interrupt_handler(int irq,void * arg)293 irqreturn_t hif_pci_legacy_ce_interrupt_handler(int irq, void *arg)
294 {
295 	struct hif_pci_softc *sc = (struct hif_pci_softc *)arg;
296 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
297 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(arg);
298 
299 	volatile int tmp;
300 	uint16_t val = 0;
301 	uint32_t bar0 = 0;
302 	uint32_t fw_indicator_address, fw_indicator;
303 	bool ssr_irq = false;
304 	unsigned int host_cause, host_enable;
305 
306 	if (LEGACY_INTERRUPTS(sc)) {
307 		if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
308 			return IRQ_HANDLED;
309 
310 		if (ADRASTEA_BU) {
311 			host_enable = hif_read32_mb(sc, sc->mem +
312 						    PCIE_INTR_ENABLE_ADDRESS);
313 			host_cause = hif_read32_mb(sc, sc->mem +
314 						   PCIE_INTR_CAUSE_ADDRESS);
315 			if (!(host_enable & host_cause)) {
316 				hif_pci_route_adrastea_interrupt(sc);
317 				return IRQ_HANDLED;
318 			}
319 		}
320 
321 		/* Clear Legacy PCI line interrupts
322 		 * IMPORTANT: INTR_CLR register has to be set
323 		 * after INTR_ENABLE is set to 0,
324 		 * otherwise interrupt can not be really cleared
325 		 */
326 		hif_write32_mb(sc, sc->mem +
327 			      (SOC_CORE_BASE_ADDRESS |
328 			       PCIE_INTR_ENABLE_ADDRESS), 0);
329 
330 		hif_write32_mb(sc, sc->mem +
331 			      (SOC_CORE_BASE_ADDRESS | PCIE_INTR_CLR_ADDRESS),
332 			       ADRASTEA_BU ?
333 			       (host_enable & host_cause) :
334 			      HOST_GROUP0_MASK);
335 
336 		if (ADRASTEA_BU)
337 			hif_write32_mb(sc, sc->mem + 0x2f100c,
338 				       (host_cause >> 1));
339 
340 		/* IMPORTANT: this extra read transaction is required to
341 		 * flush the posted write buffer
342 		 */
343 		if (!ADRASTEA_BU) {
344 		tmp =
345 			hif_read32_mb(sc, sc->mem +
346 				     (SOC_CORE_BASE_ADDRESS |
347 				      PCIE_INTR_ENABLE_ADDRESS));
348 
349 		if (tmp == 0xdeadbeef) {
350 			hif_err("SoC returns 0xdeadbeef!!");
351 
352 			pci_read_config_word(sc->pdev, PCI_VENDOR_ID, &val);
353 			hif_err("PCI Vendor ID = 0x%04x", val);
354 
355 			pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &val);
356 			hif_err("PCI Device ID = 0x%04x", val);
357 
358 			pci_read_config_word(sc->pdev, PCI_COMMAND, &val);
359 			hif_err("PCI Command = 0x%04x", val);
360 
361 			pci_read_config_word(sc->pdev, PCI_STATUS, &val);
362 			hif_err("PCI Status = 0x%04x", val);
363 
364 			pci_read_config_dword(sc->pdev, PCI_BASE_ADDRESS_0,
365 					      &bar0);
366 			hif_err("PCI BAR0 = 0x%08x", bar0);
367 
368 			hif_err("RTC_STATE_ADDRESS = 0x%08x",
369 				hif_read32_mb(sc, sc->mem +
370 					PCIE_LOCAL_BASE_ADDRESS
371 					+ RTC_STATE_ADDRESS));
372 			hif_err("PCIE_SOC_WAKE_ADDRESS = 0x%08x",
373 				hif_read32_mb(sc, sc->mem +
374 					PCIE_LOCAL_BASE_ADDRESS
375 					+ PCIE_SOC_WAKE_ADDRESS));
376 			hif_err("0x80008 = 0x%08x, 0x8000c = 0x%08x",
377 				hif_read32_mb(sc, sc->mem + 0x80008),
378 				hif_read32_mb(sc, sc->mem + 0x8000c));
379 			hif_err("0x80010 = 0x%08x, 0x80014 = 0x%08x",
380 				hif_read32_mb(sc, sc->mem + 0x80010),
381 				hif_read32_mb(sc, sc->mem + 0x80014));
382 			hif_err("0x80018 = 0x%08x, 0x8001c = 0x%08x",
383 				hif_read32_mb(sc, sc->mem + 0x80018),
384 				hif_read32_mb(sc, sc->mem + 0x8001c));
385 			QDF_BUG(0);
386 		}
387 
388 		PCI_CLR_CAUSE0_REGISTER(sc);
389 		}
390 
391 		if (HAS_FW_INDICATOR) {
392 			fw_indicator_address = hif_state->fw_indicator_address;
393 			fw_indicator = A_TARGET_READ(scn, fw_indicator_address);
394 			if ((fw_indicator != ~0) &&
395 			   (fw_indicator & FW_IND_EVENT_PENDING))
396 				ssr_irq = true;
397 		}
398 
399 		if (Q_TARGET_ACCESS_END(scn) < 0)
400 			return IRQ_HANDLED;
401 	}
402 	/* TBDXXX: Add support for WMAC */
403 
404 	if (ssr_irq) {
405 		sc->irq_event = irq;
406 		qdf_atomic_set(&scn->tasklet_from_intr, 1);
407 
408 		qdf_atomic_inc(&scn->active_tasklet_cnt);
409 		tasklet_schedule(&sc->intr_tq);
410 	} else {
411 		pci_dispatch_interrupt(scn);
412 	}
413 
414 	return IRQ_HANDLED;
415 }
416 
hif_pci_targ_is_present(struct hif_softc * scn,void * __iomem * mem)417 bool hif_pci_targ_is_present(struct hif_softc *scn, void *__iomem *mem)
418 {
419 	return 1;               /* FIX THIS */
420 }
421 
hif_get_irq_num(struct hif_opaque_softc * scn,int * irq,uint32_t size)422 int hif_get_irq_num(struct hif_opaque_softc *scn, int *irq, uint32_t size)
423 {
424 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
425 	int i = 0;
426 
427 	if (!irq || !size) {
428 		return -EINVAL;
429 	}
430 
431 	if (!sc->num_msi_intrs || sc->num_msi_intrs == 1) {
432 		irq[0] = sc->irq;
433 		return 1;
434 	}
435 
436 	if (sc->num_msi_intrs > size) {
437 		qdf_print("Not enough space in irq buffer to return irqs");
438 		return -EINVAL;
439 	}
440 
441 	for (i = 0; i < sc->num_msi_intrs; i++) {
442 		irq[i] = sc->irq +  i + MSI_ASSIGN_CE_INITIAL;
443 	}
444 
445 	return sc->num_msi_intrs;
446 }
447 
448 
449 /**
450  * hif_pci_cancel_deferred_target_sleep() - cancels the deferred target sleep
451  * @scn: hif_softc
452  *
453  * Return: void
454  */
455 #if CONFIG_ATH_PCIE_MAX_PERF == 0
hif_pci_cancel_deferred_target_sleep(struct hif_softc * scn)456 void hif_pci_cancel_deferred_target_sleep(struct hif_softc *scn)
457 {
458 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
459 	A_target_id_t pci_addr = scn->mem;
460 
461 	qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
462 	/*
463 	 * If the deferred sleep timer is running cancel it
464 	 * and put the soc into sleep.
465 	 */
466 	if (hif_state->fake_sleep == true) {
467 		qdf_timer_stop(&hif_state->sleep_timer);
468 		if (hif_state->verified_awake == false) {
469 			hif_write32_mb(scn, pci_addr + PCIE_LOCAL_BASE_ADDRESS +
470 				      PCIE_SOC_WAKE_ADDRESS,
471 				      PCIE_SOC_WAKE_RESET);
472 		}
473 		hif_state->fake_sleep = false;
474 	}
475 	qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
476 }
477 #else
hif_pci_cancel_deferred_target_sleep(struct hif_softc * scn)478 inline void hif_pci_cancel_deferred_target_sleep(struct hif_softc *scn)
479 {
480 }
481 #endif
482 
483 #define A_PCIE_LOCAL_REG_READ(sc, mem, addr) \
484 	hif_read32_mb(sc, (char *)(mem) + \
485 	PCIE_LOCAL_BASE_ADDRESS + (uint32_t)(addr))
486 
487 #define A_PCIE_LOCAL_REG_WRITE(sc, mem, addr, val) \
488 	hif_write32_mb(sc, ((char *)(mem) + \
489 	PCIE_LOCAL_BASE_ADDRESS + (uint32_t)(addr)), (val))
490 
491 #ifdef QCA_WIFI_3_0
492 /**
493  * hif_targ_is_awake() - check to see if the target is awake
494  * @hif_ctx: hif context
495  * @mem:
496  *
497  * emulation never goes to sleep
498  *
499  * Return: true if target is awake
500  */
hif_targ_is_awake(struct hif_softc * hif_ctx,void * __iomem * mem)501 static bool hif_targ_is_awake(struct hif_softc *hif_ctx, void *__iomem *mem)
502 {
503 	return true;
504 }
505 #else
506 /**
507  * hif_targ_is_awake() - check to see if the target is awake
508  * @scn: hif context
509  * @mem:
510  *
511  * Return: true if the targets clocks are on
512  */
hif_targ_is_awake(struct hif_softc * scn,void * __iomem * mem)513 static bool hif_targ_is_awake(struct hif_softc *scn, void *__iomem *mem)
514 {
515 	uint32_t val;
516 
517 	if (scn->recovery)
518 		return false;
519 	val = hif_read32_mb(scn, mem + PCIE_LOCAL_BASE_ADDRESS
520 		+ RTC_STATE_ADDRESS);
521 	return (RTC_STATE_V_GET(val) & RTC_STATE_V_ON) == RTC_STATE_V_ON;
522 }
523 #endif
524 
525 #define ATH_PCI_RESET_WAIT_MAX 10       /* Ms */
hif_pci_device_reset(struct hif_pci_softc * sc)526 static void hif_pci_device_reset(struct hif_pci_softc *sc)
527 {
528 	void __iomem *mem = sc->mem;
529 	int i;
530 	uint32_t val;
531 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
532 
533 	if (!scn->hostdef)
534 		return;
535 
536 	/* NB: Don't check resetok here.  This form of reset
537 	 * is integral to correct operation.
538 	 */
539 
540 	if (!SOC_GLOBAL_RESET_ADDRESS)
541 		return;
542 
543 	if (!mem)
544 		return;
545 
546 	hif_err("Reset Device");
547 
548 	/*
549 	 * NB: If we try to write SOC_GLOBAL_RESET_ADDRESS without first
550 	 * writing WAKE_V, the Target may scribble over Host memory!
551 	 */
552 	A_PCIE_LOCAL_REG_WRITE(sc, mem, PCIE_SOC_WAKE_ADDRESS,
553 			       PCIE_SOC_WAKE_V_MASK);
554 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
555 		if (hif_targ_is_awake(scn, mem))
556 			break;
557 
558 		qdf_mdelay(1);
559 	}
560 
561 	/* Put Target, including PCIe, into RESET. */
562 	val = A_PCIE_LOCAL_REG_READ(sc, mem, SOC_GLOBAL_RESET_ADDRESS);
563 	val |= 1;
564 	A_PCIE_LOCAL_REG_WRITE(sc, mem, SOC_GLOBAL_RESET_ADDRESS, val);
565 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
566 		if (A_PCIE_LOCAL_REG_READ(sc, mem, RTC_STATE_ADDRESS) &
567 		    RTC_STATE_COLD_RESET_MASK)
568 			break;
569 
570 		qdf_mdelay(1);
571 	}
572 
573 	/* Pull Target, including PCIe, out of RESET. */
574 	val &= ~1;
575 	A_PCIE_LOCAL_REG_WRITE(sc, mem, SOC_GLOBAL_RESET_ADDRESS, val);
576 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
577 		if (!
578 		    (A_PCIE_LOCAL_REG_READ(sc, mem, RTC_STATE_ADDRESS) &
579 		     RTC_STATE_COLD_RESET_MASK))
580 			break;
581 
582 		qdf_mdelay(1);
583 	}
584 
585 	A_PCIE_LOCAL_REG_WRITE(sc, mem, PCIE_SOC_WAKE_ADDRESS,
586 			       PCIE_SOC_WAKE_RESET);
587 }
588 
589 /* CPU warm reset function
590  * Steps:
591  * 1. Disable all pending interrupts - so no pending interrupts on WARM reset
592  * 2. Clear the FW_INDICATOR_ADDRESS -so Target CPU initializes FW
593  *    correctly on WARM reset
594  * 3. Clear TARGET CPU LF timer interrupt
595  * 4. Reset all CEs to clear any pending CE tarnsactions
596  * 5. Warm reset CPU
597  */
hif_pci_device_warm_reset(struct hif_pci_softc * sc)598 static void hif_pci_device_warm_reset(struct hif_pci_softc *sc)
599 {
600 	void __iomem *mem = sc->mem;
601 	int i;
602 	uint32_t val;
603 	uint32_t fw_indicator;
604 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
605 
606 	/* NB: Don't check resetok here.  This form of reset is
607 	 * integral to correct operation.
608 	 */
609 
610 	if (!mem)
611 		return;
612 
613 	hif_debug("Target Warm Reset");
614 
615 	/*
616 	 * NB: If we try to write SOC_GLOBAL_RESET_ADDRESS without first
617 	 * writing WAKE_V, the Target may scribble over Host memory!
618 	 */
619 	A_PCIE_LOCAL_REG_WRITE(sc, mem, PCIE_SOC_WAKE_ADDRESS,
620 			       PCIE_SOC_WAKE_V_MASK);
621 	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
622 		if (hif_targ_is_awake(scn, mem))
623 			break;
624 		qdf_mdelay(1);
625 	}
626 
627 	/*
628 	 * Disable Pending interrupts
629 	 */
630 	val =
631 		hif_read32_mb(sc, mem +
632 			     (SOC_CORE_BASE_ADDRESS |
633 			      PCIE_INTR_CAUSE_ADDRESS));
634 	hif_debug("Host Intr Cause reg 0x%x: value : 0x%x",
635 		  (SOC_CORE_BASE_ADDRESS | PCIE_INTR_CAUSE_ADDRESS), val);
636 	/* Target CPU Intr Cause */
637 	val = hif_read32_mb(sc, mem +
638 			    (SOC_CORE_BASE_ADDRESS | CPU_INTR_ADDRESS));
639 	hif_debug("Target CPU Intr Cause 0x%x", val);
640 
641 	val =
642 		hif_read32_mb(sc, mem +
643 			     (SOC_CORE_BASE_ADDRESS |
644 			      PCIE_INTR_ENABLE_ADDRESS));
645 	hif_write32_mb(sc, (mem +
646 		       (SOC_CORE_BASE_ADDRESS | PCIE_INTR_ENABLE_ADDRESS)), 0);
647 	hif_write32_mb(sc, (mem +
648 		       (SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS)),
649 		       HOST_GROUP0_MASK);
650 
651 	qdf_mdelay(100);
652 
653 	/* Clear FW_INDICATOR_ADDRESS */
654 	if (HAS_FW_INDICATOR) {
655 		fw_indicator = hif_read32_mb(sc, mem + FW_INDICATOR_ADDRESS);
656 		hif_write32_mb(sc, mem + FW_INDICATOR_ADDRESS, 0);
657 	}
658 
659 	/* Clear Target LF Timer interrupts */
660 	val =
661 		hif_read32_mb(sc, mem +
662 			     (RTC_SOC_BASE_ADDRESS +
663 			      SOC_LF_TIMER_CONTROL0_ADDRESS));
664 	hif_debug("addr 0x%x : 0x%x",
665 	       (RTC_SOC_BASE_ADDRESS + SOC_LF_TIMER_CONTROL0_ADDRESS), val);
666 	val &= ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK;
667 	hif_write32_mb(sc, mem +
668 		      (RTC_SOC_BASE_ADDRESS + SOC_LF_TIMER_CONTROL0_ADDRESS),
669 		      val);
670 
671 	/* Reset CE */
672 	val =
673 		hif_read32_mb(sc, mem +
674 			     (RTC_SOC_BASE_ADDRESS |
675 			      SOC_RESET_CONTROL_ADDRESS));
676 	val |= SOC_RESET_CONTROL_CE_RST_MASK;
677 	hif_write32_mb(sc, (mem +
678 		       (RTC_SOC_BASE_ADDRESS | SOC_RESET_CONTROL_ADDRESS)),
679 		      val);
680 	val =
681 		hif_read32_mb(sc, mem +
682 			     (RTC_SOC_BASE_ADDRESS |
683 			      SOC_RESET_CONTROL_ADDRESS));
684 	qdf_mdelay(10);
685 
686 	/* CE unreset */
687 	val &= ~SOC_RESET_CONTROL_CE_RST_MASK;
688 	hif_write32_mb(sc, mem + (RTC_SOC_BASE_ADDRESS |
689 		       SOC_RESET_CONTROL_ADDRESS), val);
690 	val =
691 		hif_read32_mb(sc, mem +
692 			     (RTC_SOC_BASE_ADDRESS |
693 			      SOC_RESET_CONTROL_ADDRESS));
694 	qdf_mdelay(10);
695 
696 	/* Read Target CPU Intr Cause */
697 	val = hif_read32_mb(sc, mem +
698 			    (SOC_CORE_BASE_ADDRESS | CPU_INTR_ADDRESS));
699 	hif_debug("Target CPU Intr Cause after CE reset 0x%x", val);
700 
701 	/* CPU warm RESET */
702 	val =
703 		hif_read32_mb(sc, mem +
704 			     (RTC_SOC_BASE_ADDRESS |
705 			      SOC_RESET_CONTROL_ADDRESS));
706 	val |= SOC_RESET_CONTROL_CPU_WARM_RST_MASK;
707 	hif_write32_mb(sc, mem + (RTC_SOC_BASE_ADDRESS |
708 		       SOC_RESET_CONTROL_ADDRESS), val);
709 	val =
710 		hif_read32_mb(sc, mem +
711 			     (RTC_SOC_BASE_ADDRESS |
712 			      SOC_RESET_CONTROL_ADDRESS));
713 	hif_debug("RESET_CONTROL after cpu warm reset 0x%x", val);
714 
715 	qdf_mdelay(100);
716 	hif_debug("Target Warm reset complete");
717 
718 }
719 
720 #ifndef QCA_WIFI_3_0
721 /* only applicable to legacy ce */
hif_check_fw_reg(struct hif_opaque_softc * hif_ctx)722 int hif_check_fw_reg(struct hif_opaque_softc *hif_ctx)
723 {
724 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
725 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
726 	void __iomem *mem = sc->mem;
727 	uint32_t val;
728 
729 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
730 		return ATH_ISR_NOSCHED;
731 	val = hif_read32_mb(sc, mem + FW_INDICATOR_ADDRESS);
732 	if (Q_TARGET_ACCESS_END(scn) < 0)
733 		return ATH_ISR_SCHED;
734 
735 	hif_debug("FW_INDICATOR register is 0x%x", val);
736 
737 	if (val & FW_IND_HELPER)
738 		return 0;
739 
740 	return 1;
741 }
742 #endif
743 
hif_check_soc_status(struct hif_opaque_softc * hif_ctx)744 int hif_check_soc_status(struct hif_opaque_softc *hif_ctx)
745 {
746 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
747 	uint16_t device_id = 0;
748 	uint32_t val;
749 	uint16_t timeout_count = 0;
750 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
751 
752 	/* Check device ID from PCIe configuration space for link status */
753 	pfrm_read_config_word(sc->pdev, PCI_DEVICE_ID, &device_id);
754 	if (device_id != sc->devid) {
755 		hif_err("Device ID does match (read 0x%x, expect 0x%x)",
756 			device_id, sc->devid);
757 		return -EACCES;
758 	}
759 
760 	/* Check PCIe local register for bar/memory access */
761 	val = hif_read32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
762 			   RTC_STATE_ADDRESS);
763 	hif_debug("RTC_STATE_ADDRESS is %08x", val);
764 
765 	/* Try to wake up target if it sleeps */
766 	hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
767 		PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
768 	hif_debug("PCIE_SOC_WAKE_ADDRESS is %08x",
769 		hif_read32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
770 		PCIE_SOC_WAKE_ADDRESS));
771 
772 	/* Check if target can be woken up */
773 	while (!hif_targ_is_awake(scn, sc->mem)) {
774 		if (timeout_count >= PCIE_WAKE_TIMEOUT) {
775 			hif_err("wake up timeout, %08x, %08x",
776 				hif_read32_mb(sc, sc->mem +
777 				     PCIE_LOCAL_BASE_ADDRESS +
778 				     RTC_STATE_ADDRESS),
779 				hif_read32_mb(sc, sc->mem +
780 				     PCIE_LOCAL_BASE_ADDRESS +
781 				     PCIE_SOC_WAKE_ADDRESS));
782 			return -EACCES;
783 		}
784 
785 		hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
786 			      PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
787 
788 		qdf_mdelay(100);
789 		timeout_count += 100;
790 	}
791 
792 	/* Check Power register for SoC internal bus issues */
793 	val =
794 		hif_read32_mb(sc, sc->mem + RTC_SOC_BASE_ADDRESS +
795 			     SOC_POWER_REG_OFFSET);
796 	hif_debug("Power register is %08x", val);
797 
798 	return 0;
799 }
800 
801 /**
802  * __hif_pci_dump_registers(): dump other PCI debug registers
803  * @scn: struct hif_softc
804  *
805  * This function dumps pci debug registers.  The parent function
806  * dumps the copy engine registers before calling this function.
807  *
808  * Return: void
809  */
__hif_pci_dump_registers(struct hif_softc * scn)810 static void __hif_pci_dump_registers(struct hif_softc *scn)
811 {
812 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
813 	void __iomem *mem = sc->mem;
814 	uint32_t val, i, j;
815 	uint32_t wrapper_idx[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9 };
816 	uint32_t ce_base;
817 
818 	if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
819 		return;
820 
821 	/* DEBUG_INPUT_SEL_SRC = 0x6 */
822 	val =
823 		hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
824 			     WLAN_DEBUG_INPUT_SEL_OFFSET);
825 	val &= ~WLAN_DEBUG_INPUT_SEL_SRC_MASK;
826 	val |= WLAN_DEBUG_INPUT_SEL_SRC_SET(0x6);
827 	hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
828 		       WLAN_DEBUG_INPUT_SEL_OFFSET, val);
829 
830 	/* DEBUG_CONTROL_ENABLE = 0x1 */
831 	val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
832 			   WLAN_DEBUG_CONTROL_OFFSET);
833 	val &= ~WLAN_DEBUG_CONTROL_ENABLE_MASK;
834 	val |= WLAN_DEBUG_CONTROL_ENABLE_SET(0x1);
835 	hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
836 		      WLAN_DEBUG_CONTROL_OFFSET, val);
837 
838 	hif_debug("Debug: inputsel: %x dbgctrl: %x",
839 	       hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
840 			    WLAN_DEBUG_INPUT_SEL_OFFSET),
841 	       hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
842 			    WLAN_DEBUG_CONTROL_OFFSET));
843 
844 	hif_debug("Debug CE");
845 	/* Loop CE debug output */
846 	/* AMBA_DEBUG_BUS_SEL = 0xc */
847 	val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
848 			    AMBA_DEBUG_BUS_OFFSET);
849 	val &= ~AMBA_DEBUG_BUS_SEL_MASK;
850 	val |= AMBA_DEBUG_BUS_SEL_SET(0xc);
851 	hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS + AMBA_DEBUG_BUS_OFFSET,
852 		       val);
853 
854 	for (i = 0; i < sizeof(wrapper_idx) / sizeof(uint32_t); i++) {
855 		/* For (i=1,2,3,4,8,9) write CE_WRAPPER_DEBUG_SEL = i */
856 		val = hif_read32_mb(sc, mem + CE_WRAPPER_BASE_ADDRESS +
857 				   CE_WRAPPER_DEBUG_OFFSET);
858 		val &= ~CE_WRAPPER_DEBUG_SEL_MASK;
859 		val |= CE_WRAPPER_DEBUG_SEL_SET(wrapper_idx[i]);
860 		hif_write32_mb(sc, mem + CE_WRAPPER_BASE_ADDRESS +
861 			      CE_WRAPPER_DEBUG_OFFSET, val);
862 
863 		hif_debug("ce wrapper: %d amdbg: %x cewdbg: %x",
864 			  wrapper_idx[i],
865 			  hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
866 				AMBA_DEBUG_BUS_OFFSET),
867 			  hif_read32_mb(sc, mem + CE_WRAPPER_BASE_ADDRESS +
868 				CE_WRAPPER_DEBUG_OFFSET));
869 
870 		if (wrapper_idx[i] <= 7) {
871 			for (j = 0; j <= 5; j++) {
872 				ce_base = CE_BASE_ADDRESS(wrapper_idx[i]);
873 				/* For (j=0~5) write CE_DEBUG_SEL = j */
874 				val =
875 					hif_read32_mb(sc, mem + ce_base +
876 						     CE_DEBUG_OFFSET);
877 				val &= ~CE_DEBUG_SEL_MASK;
878 				val |= CE_DEBUG_SEL_SET(j);
879 				hif_write32_mb(sc, mem + ce_base +
880 					       CE_DEBUG_OFFSET, val);
881 
882 				/* read (@gpio_athr_wlan_reg)
883 				 * WLAN_DEBUG_OUT_DATA
884 				 */
885 				val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS
886 						    + WLAN_DEBUG_OUT_OFFSET);
887 				val = WLAN_DEBUG_OUT_DATA_GET(val);
888 
889 				hif_debug("module%d: cedbg: %x out: %x",
890 					  j,
891 					  hif_read32_mb(sc, mem + ce_base +
892 						CE_DEBUG_OFFSET), val);
893 			}
894 		} else {
895 			/* read (@gpio_athr_wlan_reg) WLAN_DEBUG_OUT_DATA */
896 			val =
897 				hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
898 					     WLAN_DEBUG_OUT_OFFSET);
899 			val = WLAN_DEBUG_OUT_DATA_GET(val);
900 
901 			hif_debug("out: %x", val);
902 		}
903 	}
904 
905 	hif_debug("Debug PCIe:");
906 	/* Loop PCIe debug output */
907 	/* Write AMBA_DEBUG_BUS_SEL = 0x1c */
908 	val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
909 			    AMBA_DEBUG_BUS_OFFSET);
910 	val &= ~AMBA_DEBUG_BUS_SEL_MASK;
911 	val |= AMBA_DEBUG_BUS_SEL_SET(0x1c);
912 	hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
913 		       AMBA_DEBUG_BUS_OFFSET, val);
914 
915 	for (i = 0; i <= 8; i++) {
916 		/* For (i=1~8) write AMBA_DEBUG_BUS_PCIE_DEBUG_SEL = i */
917 		val =
918 			hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
919 				     AMBA_DEBUG_BUS_OFFSET);
920 		val &= ~AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK;
921 		val |= AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_SET(i);
922 		hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
923 			       AMBA_DEBUG_BUS_OFFSET, val);
924 
925 		/* read (@gpio_athr_wlan_reg) WLAN_DEBUG_OUT_DATA */
926 		val =
927 			hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
928 				     WLAN_DEBUG_OUT_OFFSET);
929 		val = WLAN_DEBUG_OUT_DATA_GET(val);
930 
931 		hif_debug("amdbg: %x out: %x %x",
932 			  hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
933 				WLAN_DEBUG_OUT_OFFSET), val,
934 			  hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
935 				WLAN_DEBUG_OUT_OFFSET));
936 	}
937 
938 	Q_TARGET_ACCESS_END(scn);
939 }
940 
941 /**
942  * hif_pci_dump_registers(): dump bus debug registers
943  * @hif_ctx: struct hif_opaque_softc
944  *
945  * This function dumps hif bus debug registers
946  *
947  * Return: 0 for success or error code
948  */
hif_pci_dump_registers(struct hif_softc * hif_ctx)949 int hif_pci_dump_registers(struct hif_softc *hif_ctx)
950 {
951 	int status;
952 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
953 
954 	status = hif_dump_ce_registers(scn);
955 
956 	if (status)
957 		hif_err("Dump CE Registers Failed");
958 
959 	/* dump non copy engine pci registers */
960 	__hif_pci_dump_registers(scn);
961 
962 	return 0;
963 }
964 
965 #ifdef HIF_CONFIG_SLUB_DEBUG_ON
966 
967 /* worker thread to schedule wlan_tasklet in SLUB debug build */
reschedule_tasklet_work_handler(void * arg)968 static void reschedule_tasklet_work_handler(void *arg)
969 {
970 	struct hif_pci_softc *sc = arg;
971 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
972 
973 	if (!scn) {
974 		hif_err("hif_softc is NULL");
975 		return;
976 	}
977 
978 	if (scn->hif_init_done == false) {
979 		hif_err("wlan driver is unloaded");
980 		return;
981 	}
982 
983 	tasklet_schedule(&sc->intr_tq);
984 }
985 
986 /**
987  * hif_init_reschedule_tasklet_work() - API to initialize reschedule tasklet
988  * work
989  * @sc: HIF PCI Context
990  *
991  * Return: void
992  */
hif_init_reschedule_tasklet_work(struct hif_pci_softc * sc)993 static void hif_init_reschedule_tasklet_work(struct hif_pci_softc *sc)
994 {
995 	qdf_create_work(0, &sc->reschedule_tasklet_work,
996 				reschedule_tasklet_work_handler, NULL);
997 }
998 #else
hif_init_reschedule_tasklet_work(struct hif_pci_softc * sc)999 static void hif_init_reschedule_tasklet_work(struct hif_pci_softc *sc) { }
1000 #endif /* HIF_CONFIG_SLUB_DEBUG_ON */
1001 
wlan_tasklet(unsigned long data)1002 void wlan_tasklet(unsigned long data)
1003 {
1004 	struct hif_pci_softc *sc = (struct hif_pci_softc *)data;
1005 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
1006 
1007 	if (scn->hif_init_done == false)
1008 		goto end;
1009 
1010 	if (qdf_atomic_read(&scn->link_suspended))
1011 		goto end;
1012 
1013 	if (!ADRASTEA_BU) {
1014 		hif_fw_interrupt_handler(sc->irq_event, scn);
1015 		if (scn->target_status == TARGET_STATUS_RESET)
1016 			goto end;
1017 	}
1018 
1019 end:
1020 	qdf_atomic_set(&scn->tasklet_from_intr, 0);
1021 	qdf_atomic_dec(&scn->active_tasklet_cnt);
1022 }
1023 
1024 /**
1025  * hif_disable_power_gating() - disable HW power gating
1026  * @hif_ctx: hif context
1027  *
1028  * disables pcie L1 power states
1029  */
hif_disable_power_gating(struct hif_opaque_softc * hif_ctx)1030 static void hif_disable_power_gating(struct hif_opaque_softc *hif_ctx)
1031 {
1032 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1033 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
1034 
1035 	if (!scn) {
1036 		hif_err("Could not disable ASPM scn is null");
1037 		return;
1038 	}
1039 
1040 	/* Disable ASPM when pkt log is enabled */
1041 	pfrm_read_config_dword(sc->pdev, 0x80, &sc->lcr_val);
1042 	pfrm_write_config_dword(sc->pdev, 0x80, (sc->lcr_val & 0xffffff00));
1043 }
1044 
1045 /**
1046  * hif_enable_power_gating() - enable HW power gating
1047  * @sc: hif context
1048  *
1049  * enables pcie L1 power states
1050  */
hif_enable_power_gating(struct hif_pci_softc * sc)1051 static void hif_enable_power_gating(struct hif_pci_softc *sc)
1052 {
1053 	if (!sc) {
1054 		hif_err("Could not disable ASPM scn is null");
1055 		return;
1056 	}
1057 
1058 	/* Re-enable ASPM after firmware/OTP download is complete */
1059 	pfrm_write_config_dword(sc->pdev, 0x80, sc->lcr_val);
1060 }
1061 
1062 /**
1063  * hif_pci_enable_power_management() - enable power management
1064  * @hif_sc: hif context
1065  * @is_packet_log_enabled:
1066  *
1067  * Enables runtime pm, aspm(PCI.. hif_enable_power_gating) and re-enabling
1068  * soc-sleep after driver load (hif_pci_target_sleep_state_adjust).
1069  *
1070  * note: epping mode does not call this function as it does not
1071  *       care about saving power.
1072  */
hif_pci_enable_power_management(struct hif_softc * hif_sc,bool is_packet_log_enabled)1073 void hif_pci_enable_power_management(struct hif_softc *hif_sc,
1074 				 bool is_packet_log_enabled)
1075 {
1076 	struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_sc);
1077 	uint32_t mode;
1078 
1079 	if (!pci_ctx) {
1080 		hif_err("hif_ctx null");
1081 		return;
1082 	}
1083 
1084 	mode = hif_get_conparam(hif_sc);
1085 	if (mode == QDF_GLOBAL_FTM_MODE) {
1086 		hif_info("Enable power gating for FTM mode");
1087 		hif_enable_power_gating(pci_ctx);
1088 		return;
1089 	}
1090 
1091 	hif_rtpm_start(hif_sc);
1092 
1093 	if (!is_packet_log_enabled)
1094 		hif_enable_power_gating(pci_ctx);
1095 
1096 	if (!CONFIG_ATH_PCIE_MAX_PERF &&
1097 	    CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD &&
1098 	    !ce_srng_based(hif_sc)) {
1099 		/* allow sleep for PCIE_AWAKE_WHILE_DRIVER_LOAD feature */
1100 		if (hif_pci_target_sleep_state_adjust(hif_sc, true, false) < 0)
1101 			hif_err("Failed to set target to sleep");
1102 	}
1103 }
1104 
1105 /**
1106  * hif_pci_disable_power_management() - disable power management
1107  * @hif_ctx: hif context
1108  *
1109  * Currently disables runtime pm. Should be updated to behave
1110  * if runtime pm is not started. Should be updated to take care
1111  * of aspm and soc sleep for driver load.
1112  */
hif_pci_disable_power_management(struct hif_softc * hif_ctx)1113 void hif_pci_disable_power_management(struct hif_softc *hif_ctx)
1114 {
1115 	struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx);
1116 
1117 	if (!pci_ctx) {
1118 		hif_err("hif_ctx null");
1119 		return;
1120 	}
1121 
1122 	hif_rtpm_stop(hif_ctx);
1123 }
1124 
hif_pci_display_stats(struct hif_softc * hif_ctx)1125 void hif_pci_display_stats(struct hif_softc *hif_ctx)
1126 {
1127 	struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx);
1128 
1129 	if (!pci_ctx) {
1130 		hif_err("hif_ctx null");
1131 		return;
1132 	}
1133 	hif_display_ce_stats(hif_ctx);
1134 
1135 	hif_print_pci_stats(pci_ctx);
1136 }
1137 
hif_pci_clear_stats(struct hif_softc * hif_ctx)1138 void hif_pci_clear_stats(struct hif_softc *hif_ctx)
1139 {
1140 	struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx);
1141 
1142 	if (!pci_ctx) {
1143 		hif_err("hif_ctx null");
1144 		return;
1145 	}
1146 	hif_clear_ce_stats(&pci_ctx->ce_sc);
1147 }
1148 
1149 #define ATH_PCI_PROBE_RETRY_MAX 3
1150 /**
1151  * hif_pci_open(): hif_bus_open
1152  * @hif_ctx: scn
1153  * @bus_type: bus type
1154  *
1155  * Return: n/a
1156  */
hif_pci_open(struct hif_softc * hif_ctx,enum qdf_bus_type bus_type)1157 QDF_STATUS hif_pci_open(struct hif_softc *hif_ctx, enum qdf_bus_type bus_type)
1158 {
1159 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
1160 
1161 	hif_ctx->bus_type = bus_type;
1162 	hif_rtpm_open(hif_ctx);
1163 
1164 	qdf_spinlock_create(&sc->irq_lock);
1165 	qdf_spinlock_create(&sc->force_wake_lock);
1166 
1167 	return hif_ce_open(hif_ctx);
1168 }
1169 
1170 /**
1171  * hif_wake_target_cpu() - wake the target's cpu
1172  * @scn: hif context
1173  *
1174  * Send an interrupt to the device to wake up the Target CPU
1175  * so it has an opportunity to notice any changed state.
1176  */
hif_wake_target_cpu(struct hif_softc * scn)1177 static void hif_wake_target_cpu(struct hif_softc *scn)
1178 {
1179 	QDF_STATUS rv;
1180 	uint32_t core_ctrl;
1181 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1182 
1183 	rv = hif_diag_read_access(hif_hdl,
1184 				  SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS,
1185 				  &core_ctrl);
1186 	QDF_ASSERT(rv == QDF_STATUS_SUCCESS);
1187 	/* A_INUM_FIRMWARE interrupt to Target CPU */
1188 	core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
1189 
1190 	rv = hif_diag_write_access(hif_hdl,
1191 				   SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS,
1192 				   core_ctrl);
1193 	QDF_ASSERT(rv == QDF_STATUS_SUCCESS);
1194 }
1195 
1196 /**
1197  * soc_wake_reset() - allow the target to go to sleep
1198  * @scn: hif_softc
1199  *
1200  * Clear the force wake register.  This is done by
1201  * hif_sleep_entry and cancel deferred timer sleep.
1202  */
soc_wake_reset(struct hif_softc * scn)1203 static void soc_wake_reset(struct hif_softc *scn)
1204 {
1205 	hif_write32_mb(scn, scn->mem +
1206 		PCIE_LOCAL_BASE_ADDRESS +
1207 		PCIE_SOC_WAKE_ADDRESS,
1208 		PCIE_SOC_WAKE_RESET);
1209 }
1210 
1211 /**
1212  * hif_sleep_entry() - gate target sleep
1213  * @arg: hif context
1214  *
1215  * This function is the callback for the sleep timer.
1216  * Check if last force awake critical section was at least
1217  * HIF_MIN_SLEEP_INACTIVITY_TIME_MS time ago.  if it was,
1218  * allow the target to go to sleep and cancel the sleep timer.
1219  * otherwise reschedule the sleep timer.
1220  */
hif_sleep_entry(void * arg)1221 static void hif_sleep_entry(void *arg)
1222 {
1223 	struct HIF_CE_state *hif_state = (struct HIF_CE_state *)arg;
1224 	struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
1225 	uint32_t idle_ms;
1226 
1227 	if (scn->recovery)
1228 		return;
1229 
1230 	if (hif_is_driver_unloading(scn))
1231 		return;
1232 
1233 	qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
1234 	if (hif_state->fake_sleep) {
1235 		idle_ms = qdf_system_ticks_to_msecs(qdf_system_ticks()
1236 						    - hif_state->sleep_ticks);
1237 		if (!hif_state->verified_awake &&
1238 		    idle_ms >= HIF_MIN_SLEEP_INACTIVITY_TIME_MS) {
1239 			if (!qdf_atomic_read(&scn->link_suspended)) {
1240 				soc_wake_reset(scn);
1241 				hif_state->fake_sleep = false;
1242 			}
1243 		} else {
1244 			qdf_timer_stop(&hif_state->sleep_timer);
1245 			qdf_timer_start(&hif_state->sleep_timer,
1246 					HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
1247 		}
1248 	}
1249 	qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
1250 }
1251 
1252 #define HIF_HIA_MAX_POLL_LOOP    1000000
1253 #define HIF_HIA_POLLING_DELAY_MS 10
1254 
1255 #ifdef QCA_HIF_HIA_EXTND
1256 
hif_set_hia_extnd(struct hif_softc * scn)1257 static void hif_set_hia_extnd(struct hif_softc *scn)
1258 {
1259 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1260 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
1261 	uint32_t target_type = tgt_info->target_type;
1262 
1263 	hif_info("E");
1264 
1265 	if ((target_type == TARGET_TYPE_AR900B) ||
1266 			target_type == TARGET_TYPE_QCA9984 ||
1267 			target_type == TARGET_TYPE_QCA9888) {
1268 		/* CHIP revision is 8-11 bits of the CHIP_ID register 0xec
1269 		 * in RTC space
1270 		 */
1271 		tgt_info->target_revision
1272 			= CHIP_ID_REVISION_GET(hif_read32_mb(scn, scn->mem
1273 					+ CHIP_ID_ADDRESS));
1274 		qdf_print("chip_id 0x%x chip_revision 0x%x",
1275 			  target_type, tgt_info->target_revision);
1276 	}
1277 
1278 	{
1279 		uint32_t flag2_value = 0;
1280 		uint32_t flag2_targ_addr =
1281 			host_interest_item_address(target_type,
1282 			offsetof(struct host_interest_s, hi_skip_clock_init));
1283 
1284 		if ((ar900b_20_targ_clk != -1) &&
1285 			(frac != -1) && (intval != -1)) {
1286 			hif_diag_read_access(hif_hdl, flag2_targ_addr,
1287 				&flag2_value);
1288 			qdf_print("\n Setting clk_override");
1289 			flag2_value |= CLOCK_OVERRIDE;
1290 
1291 			hif_diag_write_access(hif_hdl, flag2_targ_addr,
1292 					flag2_value);
1293 			qdf_print("\n CLOCK PLL val set %d", flag2_value);
1294 		} else {
1295 			qdf_print("\n CLOCK PLL skipped");
1296 		}
1297 	}
1298 
1299 	if (target_type == TARGET_TYPE_AR900B
1300 			|| target_type == TARGET_TYPE_QCA9984
1301 			|| target_type == TARGET_TYPE_QCA9888) {
1302 
1303 		/* for AR9980_2.0, 300 mhz clock is used, right now we assume
1304 		 * this would be supplied through module parameters,
1305 		 * if not supplied assumed default or same behavior as 1.0.
1306 		 * Assume 1.0 clock can't be tuned, reset to defaults
1307 		 */
1308 
1309 		qdf_print(KERN_INFO
1310 			  "%s: setting the target pll frac %x intval %x",
1311 			  __func__, frac, intval);
1312 
1313 		/* do not touch frac, and int val, let them be default -1,
1314 		 * if desired, host can supply these through module params
1315 		 */
1316 		if (frac != -1 || intval != -1) {
1317 			uint32_t flag2_value = 0;
1318 			uint32_t flag2_targ_addr;
1319 
1320 			flag2_targ_addr =
1321 				host_interest_item_address(target_type,
1322 				offsetof(struct host_interest_s,
1323 					hi_clock_info));
1324 			hif_diag_read_access(hif_hdl,
1325 				flag2_targ_addr, &flag2_value);
1326 			qdf_print("\n ====> FRAC Val %x Address %x", frac,
1327 				  flag2_value);
1328 			hif_diag_write_access(hif_hdl, flag2_value, frac);
1329 			qdf_print("\n INT Val %x  Address %x",
1330 				  intval, flag2_value + 4);
1331 			hif_diag_write_access(hif_hdl,
1332 					flag2_value + 4, intval);
1333 		} else {
1334 			qdf_print(KERN_INFO
1335 				  "%s: no frac provided, skipping pre-configuring PLL",
1336 				  __func__);
1337 		}
1338 
1339 		/* for 2.0 write 300 mhz into hi_desired_cpu_speed_hz */
1340 		if ((target_type == TARGET_TYPE_AR900B)
1341 			&& (tgt_info->target_revision == AR900B_REV_2)
1342 			&& ar900b_20_targ_clk != -1) {
1343 			uint32_t flag2_value = 0;
1344 			uint32_t flag2_targ_addr;
1345 
1346 			flag2_targ_addr
1347 				= host_interest_item_address(target_type,
1348 					offsetof(struct host_interest_s,
1349 					hi_desired_cpu_speed_hz));
1350 			hif_diag_read_access(hif_hdl, flag2_targ_addr,
1351 							&flag2_value);
1352 			qdf_print("\n ==> hi_desired_cpu_speed_hz Address %x",
1353 				  flag2_value);
1354 			hif_diag_write_access(hif_hdl, flag2_value,
1355 				ar900b_20_targ_clk/*300000000u*/);
1356 		} else if (target_type == TARGET_TYPE_QCA9888) {
1357 			uint32_t flag2_targ_addr;
1358 
1359 			if (200000000u != qca9888_20_targ_clk) {
1360 				qca9888_20_targ_clk = 300000000u;
1361 				/* Setting the target clock speed to 300 mhz */
1362 			}
1363 
1364 			flag2_targ_addr
1365 				= host_interest_item_address(target_type,
1366 					offsetof(struct host_interest_s,
1367 					hi_desired_cpu_speed_hz));
1368 			hif_diag_write_access(hif_hdl, flag2_targ_addr,
1369 				qca9888_20_targ_clk);
1370 		} else {
1371 			qdf_print("%s: targ_clk is not provided, skipping pre-configuring PLL",
1372 				  __func__);
1373 		}
1374 	} else {
1375 		if (frac != -1 || intval != -1) {
1376 			uint32_t flag2_value = 0;
1377 			uint32_t flag2_targ_addr =
1378 				host_interest_item_address(target_type,
1379 					offsetof(struct host_interest_s,
1380 							hi_clock_info));
1381 			hif_diag_read_access(hif_hdl, flag2_targ_addr,
1382 						&flag2_value);
1383 			qdf_print("\n ====> FRAC Val %x Address %x", frac,
1384 				  flag2_value);
1385 			hif_diag_write_access(hif_hdl, flag2_value, frac);
1386 			qdf_print("\n INT Val %x  Address %x", intval,
1387 				  flag2_value + 4);
1388 			hif_diag_write_access(hif_hdl, flag2_value + 4,
1389 					      intval);
1390 		}
1391 	}
1392 }
1393 
1394 #else
1395 
hif_set_hia_extnd(struct hif_softc * scn)1396 static void hif_set_hia_extnd(struct hif_softc *scn)
1397 {
1398 }
1399 
1400 #endif
1401 
1402 /**
1403  * hif_set_hia() - fill out the host interest area
1404  * @scn: hif context
1405  *
1406  * This is replaced by hif_wlan_enable for integrated targets.
1407  * This fills out the host interest area.  The firmware will
1408  * process these memory addresses when it is first brought out
1409  * of reset.
1410  *
1411  * Return: 0 for success.
1412  */
hif_set_hia(struct hif_softc * scn)1413 static int hif_set_hia(struct hif_softc *scn)
1414 {
1415 	QDF_STATUS rv;
1416 	uint32_t interconnect_targ_addr = 0;
1417 	uint32_t pcie_state_targ_addr = 0;
1418 	uint32_t pipe_cfg_targ_addr = 0;
1419 	uint32_t svc_to_pipe_map = 0;
1420 	uint32_t pcie_config_flags = 0;
1421 	uint32_t flag2_value = 0;
1422 	uint32_t flag2_targ_addr = 0;
1423 #ifdef QCA_WIFI_3_0
1424 	uint32_t host_interest_area = 0;
1425 	uint8_t i;
1426 #else
1427 	uint32_t ealloc_value = 0;
1428 	uint32_t ealloc_targ_addr = 0;
1429 	uint8_t banks_switched = 1;
1430 	uint32_t chip_id;
1431 #endif
1432 	uint32_t pipe_cfg_addr;
1433 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
1434 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
1435 	uint32_t target_type = tgt_info->target_type;
1436 	uint32_t target_ce_config_sz, target_service_to_ce_map_sz;
1437 	static struct CE_pipe_config *target_ce_config;
1438 	struct service_to_pipe *target_service_to_ce_map;
1439 
1440 	hif_info("E");
1441 
1442 	hif_get_target_ce_config(scn,
1443 				 &target_ce_config, &target_ce_config_sz,
1444 				 &target_service_to_ce_map,
1445 				 &target_service_to_ce_map_sz,
1446 				 NULL, NULL);
1447 
1448 	if (ADRASTEA_BU)
1449 		return 0;
1450 
1451 #ifdef QCA_WIFI_3_0
1452 	i = 0;
1453 	while (i < HIF_HIA_MAX_POLL_LOOP) {
1454 		host_interest_area = hif_read32_mb(scn, scn->mem +
1455 						A_SOC_CORE_SCRATCH_0_ADDRESS);
1456 		if ((host_interest_area & 0x01) == 0) {
1457 			qdf_mdelay(HIF_HIA_POLLING_DELAY_MS);
1458 			host_interest_area = 0;
1459 			i++;
1460 			if (i > HIF_HIA_MAX_POLL_LOOP && (i % 1000 == 0))
1461 				hif_err("poll timeout: %d", i);
1462 		} else {
1463 			host_interest_area &= (~0x01);
1464 			hif_write32_mb(scn, scn->mem + 0x113014, 0);
1465 			break;
1466 		}
1467 	}
1468 
1469 	if (i >= HIF_HIA_MAX_POLL_LOOP) {
1470 		hif_err("hia polling timeout");
1471 		return -EIO;
1472 	}
1473 
1474 	if (host_interest_area == 0) {
1475 		hif_err("host_interest_area = 0");
1476 		return -EIO;
1477 	}
1478 
1479 	interconnect_targ_addr = host_interest_area +
1480 			offsetof(struct host_interest_area_t,
1481 			hi_interconnect_state);
1482 
1483 	flag2_targ_addr = host_interest_area +
1484 			offsetof(struct host_interest_area_t, hi_option_flag2);
1485 
1486 #else
1487 	interconnect_targ_addr = hif_hia_item_address(target_type,
1488 		offsetof(struct host_interest_s, hi_interconnect_state));
1489 	ealloc_targ_addr = hif_hia_item_address(target_type,
1490 		offsetof(struct host_interest_s, hi_early_alloc));
1491 	flag2_targ_addr = hif_hia_item_address(target_type,
1492 		offsetof(struct host_interest_s, hi_option_flag2));
1493 #endif
1494 	/* Supply Target-side CE configuration */
1495 	rv = hif_diag_read_access(hif_hdl, interconnect_targ_addr,
1496 			  &pcie_state_targ_addr);
1497 	if (rv != QDF_STATUS_SUCCESS) {
1498 		hif_err("interconnect_targ_addr = 0x%0x, ret = %d",
1499 			interconnect_targ_addr, rv);
1500 		goto done;
1501 	}
1502 	if (pcie_state_targ_addr == 0) {
1503 		rv = QDF_STATUS_E_FAILURE;
1504 		hif_err("pcie state addr is 0");
1505 		goto done;
1506 	}
1507 	pipe_cfg_addr = pcie_state_targ_addr +
1508 			  offsetof(struct pcie_state_s,
1509 			  pipe_cfg_addr);
1510 	rv = hif_diag_read_access(hif_hdl,
1511 			  pipe_cfg_addr,
1512 			  &pipe_cfg_targ_addr);
1513 	if (rv != QDF_STATUS_SUCCESS) {
1514 		hif_err("pipe_cfg_addr = 0x%0x, ret = %d", pipe_cfg_addr, rv);
1515 		goto done;
1516 	}
1517 	if (pipe_cfg_targ_addr == 0) {
1518 		rv = QDF_STATUS_E_FAILURE;
1519 		hif_err("pipe cfg addr is 0");
1520 		goto done;
1521 	}
1522 
1523 	rv = hif_diag_write_mem(hif_hdl, pipe_cfg_targ_addr,
1524 			(uint8_t *) target_ce_config,
1525 			target_ce_config_sz);
1526 
1527 	if (rv != QDF_STATUS_SUCCESS) {
1528 		hif_err("write pipe cfg: %d", rv);
1529 		goto done;
1530 	}
1531 
1532 	rv = hif_diag_read_access(hif_hdl,
1533 			  pcie_state_targ_addr +
1534 			  offsetof(struct pcie_state_s,
1535 			   svc_to_pipe_map),
1536 			  &svc_to_pipe_map);
1537 	if (rv != QDF_STATUS_SUCCESS) {
1538 		hif_err("get svc/pipe map: %d", rv);
1539 		goto done;
1540 	}
1541 	if (svc_to_pipe_map == 0) {
1542 		rv = QDF_STATUS_E_FAILURE;
1543 		hif_err("svc_to_pipe map is 0");
1544 		goto done;
1545 	}
1546 
1547 	rv = hif_diag_write_mem(hif_hdl,
1548 			svc_to_pipe_map,
1549 			(uint8_t *) target_service_to_ce_map,
1550 			target_service_to_ce_map_sz);
1551 	if (rv != QDF_STATUS_SUCCESS) {
1552 		hif_err("write svc/pipe map: %d", rv);
1553 		goto done;
1554 	}
1555 
1556 	rv = hif_diag_read_access(hif_hdl,
1557 			pcie_state_targ_addr +
1558 			offsetof(struct pcie_state_s,
1559 			config_flags),
1560 			&pcie_config_flags);
1561 	if (rv != QDF_STATUS_SUCCESS) {
1562 		hif_err("get pcie config_flags: %d", rv);
1563 		goto done;
1564 	}
1565 #if (CONFIG_PCIE_ENABLE_L1_CLOCK_GATE)
1566 	pcie_config_flags |= PCIE_CONFIG_FLAG_ENABLE_L1;
1567 #else
1568 	pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1569 #endif /* CONFIG_PCIE_ENABLE_L1_CLOCK_GATE */
1570 	pcie_config_flags |= PCIE_CONFIG_FLAG_CLK_SWITCH_WAIT;
1571 #if (CONFIG_PCIE_ENABLE_AXI_CLK_GATE)
1572 	pcie_config_flags |= PCIE_CONFIG_FLAG_AXI_CLK_GATE;
1573 #endif
1574 	rv = hif_diag_write_mem(hif_hdl,
1575 			pcie_state_targ_addr +
1576 			offsetof(struct pcie_state_s,
1577 			config_flags),
1578 			(uint8_t *) &pcie_config_flags,
1579 			sizeof(pcie_config_flags));
1580 	if (rv != QDF_STATUS_SUCCESS) {
1581 		hif_err("write pcie config_flags: %d", rv);
1582 		goto done;
1583 	}
1584 
1585 #ifndef QCA_WIFI_3_0
1586 	/* configure early allocation */
1587 	ealloc_targ_addr = hif_hia_item_address(target_type,
1588 						offsetof(
1589 						struct host_interest_s,
1590 						hi_early_alloc));
1591 
1592 	rv = hif_diag_read_access(hif_hdl, ealloc_targ_addr,
1593 			&ealloc_value);
1594 	if (rv != QDF_STATUS_SUCCESS) {
1595 		hif_err("get early alloc val: %d", rv);
1596 		goto done;
1597 	}
1598 
1599 	/* 1 bank is switched to IRAM, except ROME 1.0 */
1600 	ealloc_value |=
1601 		((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1602 		 HI_EARLY_ALLOC_MAGIC_MASK);
1603 
1604 	rv = hif_diag_read_access(hif_hdl,
1605 			  CHIP_ID_ADDRESS |
1606 			  RTC_SOC_BASE_ADDRESS, &chip_id);
1607 	if (rv != QDF_STATUS_SUCCESS) {
1608 		hif_err("get chip id val: %d", rv);
1609 		goto done;
1610 	}
1611 	if (CHIP_ID_VERSION_GET(chip_id) == 0xD) {
1612 		tgt_info->target_revision = CHIP_ID_REVISION_GET(chip_id);
1613 		switch (CHIP_ID_REVISION_GET(chip_id)) {
1614 		case 0x2:       /* ROME 1.3 */
1615 			/* 2 banks are switched to IRAM */
1616 			banks_switched = 2;
1617 			break;
1618 		case 0x4:       /* ROME 2.1 */
1619 		case 0x5:       /* ROME 2.2 */
1620 			banks_switched = 6;
1621 			break;
1622 		case 0x8:       /* ROME 3.0 */
1623 		case 0x9:       /* ROME 3.1 */
1624 		case 0xA:       /* ROME 3.2 */
1625 			banks_switched = 9;
1626 			break;
1627 		case 0x0:       /* ROME 1.0 */
1628 		case 0x1:       /* ROME 1.1 */
1629 		default:
1630 			/* 3 banks are switched to IRAM */
1631 			banks_switched = 3;
1632 			break;
1633 		}
1634 	}
1635 
1636 	ealloc_value |=
1637 		((banks_switched << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT)
1638 		 & HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1639 
1640 	rv = hif_diag_write_access(hif_hdl,
1641 				ealloc_targ_addr,
1642 				ealloc_value);
1643 	if (rv != QDF_STATUS_SUCCESS) {
1644 		hif_err("set early alloc val: %d", rv);
1645 		goto done;
1646 	}
1647 #endif
1648 	if ((target_type == TARGET_TYPE_AR900B)
1649 			|| (target_type == TARGET_TYPE_QCA9984)
1650 			|| (target_type == TARGET_TYPE_QCA9888)
1651 			|| (target_type == TARGET_TYPE_AR9888)) {
1652 		hif_set_hia_extnd(scn);
1653 	}
1654 
1655 	/* Tell Target to proceed with initialization */
1656 	flag2_targ_addr = hif_hia_item_address(target_type,
1657 						offsetof(
1658 						struct host_interest_s,
1659 						hi_option_flag2));
1660 
1661 	rv = hif_diag_read_access(hif_hdl, flag2_targ_addr,
1662 			  &flag2_value);
1663 	if (rv != QDF_STATUS_SUCCESS) {
1664 		hif_err("get option val: %d", rv);
1665 		goto done;
1666 	}
1667 
1668 	flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1669 	rv = hif_diag_write_access(hif_hdl, flag2_targ_addr,
1670 			   flag2_value);
1671 	if (rv != QDF_STATUS_SUCCESS) {
1672 		hif_err("set option val: %d", rv);
1673 		goto done;
1674 	}
1675 
1676 	hif_wake_target_cpu(scn);
1677 
1678 done:
1679 
1680 	return qdf_status_to_os_return(rv);
1681 }
1682 
1683 /**
1684  * hif_pci_bus_configure() - configure the pcie bus
1685  * @hif_sc: pointer to the hif context.
1686  *
1687  * return: 0 for success. nonzero for failure.
1688  */
hif_pci_bus_configure(struct hif_softc * hif_sc)1689 int hif_pci_bus_configure(struct hif_softc *hif_sc)
1690 {
1691 	int status = 0;
1692 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
1693 	struct hif_opaque_softc *hif_osc = GET_HIF_OPAQUE_HDL(hif_sc);
1694 
1695 	hif_ce_prepare_config(hif_sc);
1696 
1697 	/* initialize sleep state adjust variables */
1698 	hif_state->sleep_timer_init = true;
1699 	hif_state->keep_awake_count = 0;
1700 	hif_state->fake_sleep = false;
1701 	hif_state->sleep_ticks = 0;
1702 
1703 	qdf_timer_init(NULL, &hif_state->sleep_timer,
1704 			       hif_sleep_entry, (void *)hif_state,
1705 			       QDF_TIMER_TYPE_WAKE_APPS);
1706 	hif_state->sleep_timer_init = true;
1707 
1708 	status = hif_wlan_enable(hif_sc);
1709 	if (status) {
1710 		hif_err("hif_wlan_enable error: %d", status);
1711 		goto timer_free;
1712 	}
1713 
1714 	A_TARGET_ACCESS_LIKELY(hif_sc);
1715 
1716 	if ((CONFIG_ATH_PCIE_MAX_PERF ||
1717 	     CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD) &&
1718 	    !ce_srng_based(hif_sc)) {
1719 		/*
1720 		 * prevent sleep for PCIE_AWAKE_WHILE_DRIVER_LOAD feature
1721 		 * prevent sleep when we want to keep firmware always awake
1722 		 * note: when we want to keep firmware always awake,
1723 		 *       hif_target_sleep_state_adjust will point to a dummy
1724 		 *       function, and hif_pci_target_sleep_state_adjust must
1725 		 *       be called instead.
1726 		 * note: bus type check is here because AHB bus is reusing
1727 		 *       hif_pci_bus_configure code.
1728 		 */
1729 		if (hif_sc->bus_type == QDF_BUS_TYPE_PCI) {
1730 			if (hif_pci_target_sleep_state_adjust(hif_sc,
1731 					false, true) < 0) {
1732 				status = -EACCES;
1733 				goto disable_wlan;
1734 			}
1735 		}
1736 	}
1737 
1738 	/* todo: consider replacing this with an srng field */
1739 	if (((hif_sc->target_info.target_type == TARGET_TYPE_QCA8074) ||
1740 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA8074V2) ||
1741 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA9574) ||
1742 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA5332) ||
1743 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA5018) ||
1744 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCN6122) ||
1745 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCN9160) ||
1746 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA6018) ||
1747 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCN6432)) &&
1748 	    (hif_sc->bus_type == QDF_BUS_TYPE_AHB)) {
1749 		hif_sc->per_ce_irq = true;
1750 	}
1751 
1752 	status = hif_config_ce(hif_sc);
1753 	if (status)
1754 		goto disable_wlan;
1755 
1756 	if (hif_needs_bmi(hif_osc)) {
1757 		status = hif_set_hia(hif_sc);
1758 		if (status)
1759 			goto unconfig_ce;
1760 
1761 		hif_debug("hif_set_hia done");
1762 
1763 	}
1764 
1765 	if (((hif_sc->target_info.target_type == TARGET_TYPE_QCA8074) ||
1766 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA8074V2) ||
1767 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA9574) ||
1768 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA5332) ||
1769 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA5018) ||
1770 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCN6122) ||
1771 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCN9160) ||
1772 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCA6018) ||
1773 	     (hif_sc->target_info.target_type == TARGET_TYPE_QCN6432)) &&
1774 	    (hif_sc->bus_type == QDF_BUS_TYPE_PCI))
1775 		hif_debug("Skip irq config for PCI based 8074 target");
1776 	else {
1777 		status = hif_configure_irq(hif_sc);
1778 		if (status < 0)
1779 			goto unconfig_ce;
1780 	}
1781 
1782 	A_TARGET_ACCESS_UNLIKELY(hif_sc);
1783 
1784 	return status;
1785 
1786 unconfig_ce:
1787 	hif_unconfig_ce(hif_sc);
1788 disable_wlan:
1789 	A_TARGET_ACCESS_UNLIKELY(hif_sc);
1790 	hif_wlan_disable(hif_sc);
1791 
1792 timer_free:
1793 	qdf_timer_stop(&hif_state->sleep_timer);
1794 	qdf_timer_free(&hif_state->sleep_timer);
1795 	hif_state->sleep_timer_init = false;
1796 
1797 	hif_err("Failed, status: %d", status);
1798 	return status;
1799 }
1800 
1801 /**
1802  * hif_pci_close(): hif_bus_close
1803  * @hif_sc: HIF context
1804  *
1805  * Return: n/a
1806  */
hif_pci_close(struct hif_softc * hif_sc)1807 void hif_pci_close(struct hif_softc *hif_sc)
1808 {
1809 	hif_rtpm_close(hif_sc);
1810 	hif_ce_close(hif_sc);
1811 }
1812 
1813 #define BAR_NUM 0
1814 
1815 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0))
hif_pci_set_dma_mask(struct pci_dev * pci_dev,u64 mask)1816 static inline int hif_pci_set_dma_mask(struct pci_dev *pci_dev, u64 mask)
1817 {
1818 	return dma_set_mask(&pci_dev->dev, mask);
1819 }
1820 
hif_pci_set_coherent_dma_mask(struct pci_dev * pci_dev,u64 mask)1821 static inline int hif_pci_set_coherent_dma_mask(struct pci_dev *pci_dev,
1822 						u64 mask)
1823 {
1824 	return dma_set_coherent_mask(&pci_dev->dev, mask);
1825 }
1826 #else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0)) */
hif_pci_set_dma_mask(struct pci_dev * pci_dev,u64 mask)1827 static inline int hif_pci_set_dma_mask(struct pci_dev *pci_dev, u64 mask)
1828 {
1829 	return pci_set_dma_mask(pci_dev, mask);
1830 }
1831 
hif_pci_set_coherent_dma_mask(struct pci_dev * pci_dev,u64 mask)1832 static inline int hif_pci_set_coherent_dma_mask(struct pci_dev *pci_dev,
1833 						u64 mask)
1834 {
1835 	return pci_set_consistent_dma_mask(pci_dev, mask);
1836 }
1837 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0)) */
1838 
hif_enable_pci_nopld(struct hif_pci_softc * sc,struct pci_dev * pdev,const struct pci_device_id * id)1839 static int hif_enable_pci_nopld(struct hif_pci_softc *sc,
1840 				struct pci_dev *pdev,
1841 				const struct pci_device_id *id)
1842 {
1843 	void __iomem *mem;
1844 	int ret = 0;
1845 	uint16_t device_id = 0;
1846 	struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
1847 
1848 	pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
1849 	if (device_id != id->device)  {
1850 		hif_err(
1851 		   "dev id mismatch, config id = 0x%x, probing id = 0x%x",
1852 		   device_id, id->device);
1853 		/* pci link is down, so returning with error code */
1854 		return -EIO;
1855 	}
1856 
1857 	/* FIXME: temp. commenting out assign_resource
1858 	 * call for dev_attach to work on 2.6.38 kernel
1859 	 */
1860 #if (!defined(__LINUX_ARM_ARCH__))
1861 	if (pci_assign_resource(pdev, BAR_NUM)) {
1862 		hif_err("pci_assign_resource error");
1863 		return -EIO;
1864 	}
1865 #endif
1866 	if (pci_enable_device(pdev)) {
1867 		hif_err("pci_enable_device error");
1868 		return -EIO;
1869 	}
1870 
1871 	/* Request MMIO resources */
1872 #ifdef CONFIG_PCI
1873 	ret = pci_request_region(pdev, BAR_NUM, "ath");
1874 	if (ret) {
1875 		hif_err("PCI MMIO reservation error");
1876 		ret = -EIO;
1877 		goto err_region;
1878 	}
1879 #endif
1880 #ifdef CONFIG_ARM_LPAE
1881 	/* if CONFIG_ARM_LPAE is enabled, we have to set 64 bits mask
1882 	 * for 32 bits device also.
1883 	 */
1884 	ret =  hif_pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1885 	if (ret) {
1886 		hif_err("Cannot enable 64-bit pci DMA");
1887 		goto err_dma;
1888 	}
1889 	ret = hif_pci_set_coherent_dma_mask(pdev, DMA_BIT_MASK(64));
1890 	if (ret) {
1891 		hif_err("Cannot enable 64-bit DMA");
1892 		goto err_dma;
1893 	}
1894 #else
1895 	ret = hif_pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1896 	if (ret) {
1897 		hif_err("Cannot enable 32-bit pci DMA");
1898 		goto err_dma;
1899 	}
1900 	ret = hif_pci_set_coherent_dma_mask(pdev, DMA_BIT_MASK(32));
1901 	if (ret) {
1902 		hif_err("Cannot enable 32-bit coherent DMA!");
1903 		goto err_dma;
1904 	}
1905 #endif
1906 
1907 	PCI_CFG_TO_DISABLE_L1SS_STATES(pdev, 0x188);
1908 
1909 	/* Set bus master bit in PCI_COMMAND to enable DMA */
1910 	pci_set_master(pdev);
1911 
1912 	/* Arrange for access to Target SoC registers. */
1913 	mem = pci_iomap(pdev, BAR_NUM, 0);
1914 	if (!mem) {
1915 		hif_err("PCI iomap error");
1916 		ret = -EIO;
1917 		goto err_iomap;
1918 	}
1919 
1920 	hif_info("*****BAR is %pK", (void *)mem);
1921 
1922 	sc->mem = mem;
1923 
1924 	/* Hawkeye emulation specific change */
1925 	if ((device_id == RUMIM2M_DEVICE_ID_NODE0) ||
1926 		(device_id == RUMIM2M_DEVICE_ID_NODE1) ||
1927 		(device_id == RUMIM2M_DEVICE_ID_NODE2) ||
1928 		(device_id == RUMIM2M_DEVICE_ID_NODE3) ||
1929 		(device_id == RUMIM2M_DEVICE_ID_NODE4) ||
1930 		(device_id == RUMIM2M_DEVICE_ID_NODE5)) {
1931 		mem = mem + 0x0c000000;
1932 		sc->mem = mem;
1933 		hif_info("Changing PCI mem base to %pK", sc->mem);
1934 	}
1935 
1936 	sc->mem_len = pci_resource_len(pdev, BAR_NUM);
1937 	ol_sc->mem = mem;
1938 	ol_sc->mem_pa = pci_resource_start(pdev, BAR_NUM);
1939 	sc->pci_enabled = true;
1940 	return ret;
1941 
1942 err_iomap:
1943 #ifdef CONFIG_PCI
1944 	pci_clear_master(pdev);
1945 #endif
1946 err_dma:
1947 #ifdef CONFIG_PCI
1948 	pci_release_region(pdev, BAR_NUM);
1949 err_region:
1950 #endif
1951 	pci_disable_device(pdev);
1952 	return ret;
1953 }
1954 
hif_enable_pci_pld(struct hif_pci_softc * sc,struct pci_dev * pdev,const struct pci_device_id * id)1955 static int hif_enable_pci_pld(struct hif_pci_softc *sc,
1956 			      struct pci_dev *pdev,
1957 			      const struct pci_device_id *id)
1958 {
1959 	PCI_CFG_TO_DISABLE_L1SS_STATES(pdev, 0x188);
1960 	sc->pci_enabled = true;
1961 	return 0;
1962 }
1963 
1964 
hif_pci_deinit_nopld(struct hif_pci_softc * sc)1965 static void hif_pci_deinit_nopld(struct hif_pci_softc *sc)
1966 {
1967 #ifdef CONFIG_PCI
1968 	pci_disable_msi(sc->pdev);
1969 	pci_iounmap(sc->pdev, sc->mem);
1970 	pci_clear_master(sc->pdev);
1971 	pci_release_region(sc->pdev, BAR_NUM);
1972 	pci_disable_device(sc->pdev);
1973 #endif
1974 	return;
1975 }
1976 
hif_pci_deinit_pld(struct hif_pci_softc * sc)1977 static void hif_pci_deinit_pld(struct hif_pci_softc *sc) {}
1978 
hif_disable_pci(struct hif_pci_softc * sc)1979 static void hif_disable_pci(struct hif_pci_softc *sc)
1980 {
1981 	struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
1982 
1983 	if (!ol_sc) {
1984 		hif_err("ol_sc = NULL");
1985 		return;
1986 	}
1987 	hif_pci_device_reset(sc);
1988 	sc->hif_pci_deinit(sc);
1989 
1990 	sc->mem = NULL;
1991 	ol_sc->mem = NULL;
1992 }
1993 
hif_pci_probe_tgt_wakeup(struct hif_pci_softc * sc)1994 static int hif_pci_probe_tgt_wakeup(struct hif_pci_softc *sc)
1995 {
1996 	int ret = 0;
1997 	int targ_awake_limit = 500;
1998 #ifndef QCA_WIFI_3_0
1999 	uint32_t fw_indicator;
2000 #endif
2001 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
2002 
2003 	/*
2004 	 * Verify that the Target was started cleanly.*
2005 	 * The case where this is most likely is with an AUX-powered
2006 	 * Target and a Host in WoW mode. If the Host crashes,
2007 	 * loses power, or is restarted (without unloading the driver)
2008 	 * then the Target is left (aux) powered and running.  On a
2009 	 * subsequent driver load, the Target is in an unexpected state.
2010 	 * We try to catch that here in order to reset the Target and
2011 	 * retry the probe.
2012 	 */
2013 	hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
2014 				  PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
2015 	while (!hif_targ_is_awake(scn, sc->mem)) {
2016 		if (0 == targ_awake_limit) {
2017 			hif_err("target awake timeout");
2018 			ret = -EAGAIN;
2019 			goto end;
2020 		}
2021 		qdf_mdelay(1);
2022 		targ_awake_limit--;
2023 	}
2024 
2025 #if PCIE_BAR0_READY_CHECKING
2026 	{
2027 		int wait_limit = 200;
2028 		/* Synchronization point: wait the BAR0 is configured */
2029 		while (wait_limit-- &&
2030 			   !(hif_read32_mb(sc, c->mem +
2031 					  PCIE_LOCAL_BASE_ADDRESS +
2032 					  PCIE_SOC_RDY_STATUS_ADDRESS)
2033 					  & PCIE_SOC_RDY_STATUS_BAR_MASK)) {
2034 			qdf_mdelay(10);
2035 		}
2036 		if (wait_limit < 0) {
2037 			/* AR6320v1 doesn't support checking of BAR0
2038 			 * configuration, takes one sec to wait BAR0 ready
2039 			 */
2040 			hif_debug("AR6320v1 waits two sec for BAR0");
2041 		}
2042 	}
2043 #endif
2044 
2045 #ifndef QCA_WIFI_3_0
2046 	fw_indicator = hif_read32_mb(sc, sc->mem + FW_INDICATOR_ADDRESS);
2047 	hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
2048 				  PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
2049 
2050 	if (fw_indicator & FW_IND_INITIALIZED) {
2051 		hif_err("Target is in an unknown state. EAGAIN");
2052 		ret = -EAGAIN;
2053 		goto end;
2054 	}
2055 #endif
2056 
2057 end:
2058 	return ret;
2059 }
2060 
hif_pci_configure_legacy_irq(struct hif_pci_softc * sc)2061 static int hif_pci_configure_legacy_irq(struct hif_pci_softc *sc)
2062 {
2063 	int ret = 0;
2064 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
2065 	uint32_t target_type = scn->target_info.target_type;
2066 
2067 	hif_info("E");
2068 
2069 	/* do notn support MSI or MSI IRQ failed */
2070 	tasklet_init(&sc->intr_tq, wlan_tasklet, (unsigned long)sc);
2071 	ret = request_irq(sc->pdev->irq,
2072 			  hif_pci_legacy_ce_interrupt_handler, IRQF_SHARED,
2073 			  "wlan_pci", sc);
2074 	if (ret) {
2075 		hif_err("request_irq failed, ret: %d", ret);
2076 		goto end;
2077 	}
2078 	scn->wake_irq = sc->pdev->irq;
2079 	/* Use sc->irq instead of sc->pdev-irq
2080 	 * platform_device pdev doesn't have an irq field
2081 	 */
2082 	sc->irq = sc->pdev->irq;
2083 	/* Use Legacy PCI Interrupts */
2084 	hif_write32_mb(sc, sc->mem + (SOC_CORE_BASE_ADDRESS |
2085 		  PCIE_INTR_ENABLE_ADDRESS),
2086 		  HOST_GROUP0_MASK);
2087 	hif_read32_mb(sc, sc->mem + (SOC_CORE_BASE_ADDRESS |
2088 			       PCIE_INTR_ENABLE_ADDRESS));
2089 	hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
2090 		      PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
2091 
2092 	if ((target_type == TARGET_TYPE_AR900B)  ||
2093 			(target_type == TARGET_TYPE_QCA9984) ||
2094 			(target_type == TARGET_TYPE_AR9888) ||
2095 			(target_type == TARGET_TYPE_QCA9888) ||
2096 			(target_type == TARGET_TYPE_AR6320V1) ||
2097 			(target_type == TARGET_TYPE_AR6320V2) ||
2098 			(target_type == TARGET_TYPE_AR6320V3)) {
2099 		hif_write32_mb(scn, scn->mem + PCIE_LOCAL_BASE_ADDRESS +
2100 				PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
2101 	}
2102 end:
2103 	QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_ERROR,
2104 			  "%s: X, ret = %d", __func__, ret);
2105 	return ret;
2106 }
2107 
hif_ce_srng_free_irq(struct hif_softc * scn)2108 static int hif_ce_srng_free_irq(struct hif_softc *scn)
2109 {
2110 	int ret = 0;
2111 	int ce_id, irq;
2112 	uint32_t msi_data_start;
2113 	uint32_t msi_data_count;
2114 	uint32_t msi_irq_start;
2115 	struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
2116 	struct CE_attr *host_ce_conf = ce_sc->host_ce_config;
2117 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2118 
2119 	if (!pld_get_enable_intx(scn->qdf_dev->dev)) {
2120 		ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
2121 						  &msi_data_count,
2122 						  &msi_data_start,
2123 						  &msi_irq_start);
2124 		if (ret)
2125 			return ret;
2126 	}
2127 
2128 	/* needs to match the ce_id -> irq data mapping
2129 	 * used in the srng parameter configuration
2130 	 */
2131 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
2132 		if (host_ce_conf[ce_id].flags & CE_ATTR_DISABLE_INTR)
2133 			continue;
2134 
2135 		if (!ce_sc->tasklets[ce_id].inited)
2136 			continue;
2137 
2138 		irq = sc->ce_irq_num[ce_id];
2139 
2140 		hif_irq_affinity_remove(irq);
2141 
2142 		hif_debug("%s: (ce_id %d, irq %d)", __func__, ce_id, irq);
2143 
2144 		pfrm_free_irq(scn->qdf_dev->dev, irq, &ce_sc->tasklets[ce_id]);
2145 	}
2146 
2147 	return ret;
2148 }
2149 
hif_pci_deconfigure_grp_irq(struct hif_softc * scn)2150 void hif_pci_deconfigure_grp_irq(struct hif_softc *scn)
2151 {
2152 	int i, j, irq;
2153 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2154 	struct hif_exec_context *hif_ext_group;
2155 
2156 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
2157 		hif_ext_group = hif_state->hif_ext_group[i];
2158 		if (hif_ext_group->irq_requested) {
2159 			hif_ext_group->irq_requested = false;
2160 			for (j = 0; j < hif_ext_group->numirq; j++) {
2161 				irq = hif_ext_group->os_irq[j];
2162 				if (scn->irq_unlazy_disable) {
2163 					qdf_dev_clear_irq_status_flags(
2164 							irq,
2165 							QDF_IRQ_DISABLE_UNLAZY);
2166 				}
2167 				hif_irq_affinity_remove(irq);
2168 				pfrm_free_irq(scn->qdf_dev->dev,
2169 					      irq, hif_ext_group);
2170 			}
2171 			hif_ext_group->numirq = 0;
2172 		}
2173 	}
2174 }
2175 
2176 /**
2177  * hif_pci_nointrs(): disable IRQ
2178  * @scn: struct hif_softc
2179  *
2180  * This function stops interrupt(s)
2181  *
2182  * Return: none
2183  */
hif_pci_nointrs(struct hif_softc * scn)2184 void hif_pci_nointrs(struct hif_softc *scn)
2185 {
2186 	int i, ret;
2187 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2188 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2189 
2190 	scn->free_irq_done = true;
2191 	ce_unregister_irq(hif_state, CE_ALL_BITMAP);
2192 
2193 	if (scn->request_irq_done == false)
2194 		return;
2195 
2196 	hif_pci_deconfigure_grp_irq(scn);
2197 
2198 	ret = hif_ce_srng_free_irq(scn);
2199 	if (ret != -EINVAL) {
2200 		/* ce irqs freed in hif_ce_srng_free_irq */
2201 
2202 		if (scn->wake_irq)
2203 			pfrm_free_irq(scn->qdf_dev->dev, scn->wake_irq, scn);
2204 		scn->wake_irq = 0;
2205 	} else if (sc->num_msi_intrs > 0) {
2206 		/* MSI interrupt(s) */
2207 		for (i = 0; i < sc->num_msi_intrs; i++)
2208 			free_irq(sc->irq + i, sc);
2209 		sc->num_msi_intrs = 0;
2210 	} else {
2211 		/* Legacy PCI line interrupt
2212 		 * Use sc->irq instead of sc->pdev-irq
2213 		 * platform_device pdev doesn't have an irq field
2214 		 */
2215 		free_irq(sc->irq, sc);
2216 	}
2217 	scn->request_irq_done = false;
2218 }
2219 
2220 static inline
hif_pci_default_link_up(struct hif_target_info * tgt_info)2221 bool hif_pci_default_link_up(struct hif_target_info *tgt_info)
2222 {
2223 	if (ADRASTEA_BU && (tgt_info->target_type != TARGET_TYPE_QCN7605))
2224 		return true;
2225 	else
2226 		return false;
2227 }
2228 /**
2229  * hif_pci_disable_bus(): hif_disable_bus
2230  * @scn: hif context
2231  *
2232  * This function disables the bus
2233  *
2234  * Return: none
2235  */
hif_pci_disable_bus(struct hif_softc * scn)2236 void hif_pci_disable_bus(struct hif_softc *scn)
2237 {
2238 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2239 	struct pci_dev *pdev;
2240 	void __iomem *mem;
2241 	struct hif_target_info *tgt_info = &scn->target_info;
2242 
2243 	/* Attach did not succeed, all resources have been
2244 	 * freed in error handler
2245 	 */
2246 	if (!sc)
2247 		return;
2248 
2249 	pdev = sc->pdev;
2250 	if (hif_pci_default_link_up(tgt_info)) {
2251 		hif_vote_link_down(GET_HIF_OPAQUE_HDL(scn));
2252 
2253 		hif_write32_mb(sc, sc->mem + PCIE_INTR_ENABLE_ADDRESS, 0);
2254 		hif_write32_mb(sc, sc->mem + PCIE_INTR_CLR_ADDRESS,
2255 			       HOST_GROUP0_MASK);
2256 	}
2257 
2258 #if defined(CPU_WARM_RESET_WAR)
2259 	/* Currently CPU warm reset sequence is tested only for AR9888_REV2
2260 	 * Need to enable for AR9888_REV1 once CPU warm reset sequence is
2261 	 * verified for AR9888_REV1
2262 	 */
2263 	if ((tgt_info->target_version == AR9888_REV2_VERSION) ||
2264 	    (tgt_info->target_version == AR9887_REV1_VERSION))
2265 		hif_pci_device_warm_reset(sc);
2266 	else
2267 		hif_pci_device_reset(sc);
2268 #else
2269 	hif_pci_device_reset(sc);
2270 #endif
2271 	mem = (void __iomem *)sc->mem;
2272 	if (mem) {
2273 		hif_dump_pipe_debug_count(scn);
2274 		if (scn->athdiag_procfs_inited) {
2275 			athdiag_procfs_remove();
2276 			scn->athdiag_procfs_inited = false;
2277 		}
2278 		sc->hif_pci_deinit(sc);
2279 		scn->mem = NULL;
2280 	}
2281 	hif_info("X");
2282 }
2283 
2284 #define OL_ATH_PCI_PM_CONTROL 0x44
2285 
2286 #ifdef CONFIG_PLD_PCIE_CNSS
2287 /**
2288  * hif_pci_prevent_linkdown(): allow or permit linkdown
2289  * @scn: hif context
2290  * @flag: true prevents linkdown, false allows
2291  *
2292  * Calls into the platform driver to vote against taking down the
2293  * pcie link.
2294  *
2295  * Return: n/a
2296  */
hif_pci_prevent_linkdown(struct hif_softc * scn,bool flag)2297 void hif_pci_prevent_linkdown(struct hif_softc *scn, bool flag)
2298 {
2299 	int errno;
2300 
2301 	hif_info("wlan: %s pcie power collapse", flag ? "disable" : "enable");
2302 	hif_runtime_prevent_linkdown(scn, flag);
2303 
2304 	errno = pld_wlan_pm_control(scn->qdf_dev->dev, flag);
2305 	if (errno)
2306 		hif_err("Failed pld_wlan_pm_control; errno %d", errno);
2307 }
2308 #else
hif_pci_prevent_linkdown(struct hif_softc * scn,bool flag)2309 void hif_pci_prevent_linkdown(struct hif_softc *scn, bool flag)
2310 {
2311 }
2312 #endif
2313 
2314 #ifdef CONFIG_PCI_LOW_POWER_INT_REG
2315 /**
2316  * hif_pci_config_low_power_int_register() - configure pci low power
2317  *                                           interrupt register.
2318  * @scn: hif context
2319  * @enable: true to enable the bits, false clear.
2320  *
2321  * Configure the bits INTR_L1SS and INTR_CLKPM of
2322  * PCIE_LOW_POWER_INT_MASK register.
2323  *
2324  * Return: n/a
2325  */
hif_pci_config_low_power_int_register(struct hif_softc * scn,bool enable)2326 static void hif_pci_config_low_power_int_register(struct hif_softc *scn,
2327 						  bool enable)
2328 {
2329 	void *address;
2330 	uint32_t value;
2331 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
2332 	struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
2333 	uint32_t target_type = tgt_info->target_type;
2334 
2335 	/*
2336 	 * Only configure the bits INTR_L1SS and INTR_CLKPM of
2337 	 * PCIE_LOW_POWER_INT_MASK register for QCA6174 for high
2338 	 * consumption issue. NFA344A power consumption is above 80mA
2339 	 * after entering Modern Standby. But the power will drop to normal
2340 	 * after PERST# de-assert.
2341 	 */
2342 	if ((target_type == TARGET_TYPE_AR6320) ||
2343 	    (target_type == TARGET_TYPE_AR6320V1) ||
2344 	    (target_type == TARGET_TYPE_AR6320V2) ||
2345 	    (target_type == TARGET_TYPE_AR6320V3)) {
2346 		hif_info("Configure PCI low power int mask register");
2347 
2348 		address = scn->mem + PCIE_LOW_POWER_INT_MASK_OFFSET;
2349 
2350 		/* Configure bit3 INTR_L1SS */
2351 		value = hif_read32_mb(scn, address);
2352 		if (enable)
2353 			value |= INTR_L1SS;
2354 		else
2355 			value &= ~INTR_L1SS;
2356 		hif_write32_mb(scn, address, value);
2357 
2358 		/* Configure bit4 INTR_CLKPM */
2359 		value = hif_read32_mb(scn, address);
2360 		if (enable)
2361 			value |= INTR_CLKPM;
2362 		else
2363 			value &= ~INTR_CLKPM;
2364 		hif_write32_mb(scn, address, value);
2365 	}
2366 }
2367 #else
hif_pci_config_low_power_int_register(struct hif_softc * scn,bool enable)2368 static inline void hif_pci_config_low_power_int_register(struct hif_softc *scn,
2369 							 bool enable)
2370 {
2371 }
2372 #endif
2373 
2374 /**
2375  * hif_pci_bus_suspend(): prepare hif for suspend
2376  * @scn: hif context
2377  *
2378  * Return: Errno
2379  */
hif_pci_bus_suspend(struct hif_softc * scn)2380 int hif_pci_bus_suspend(struct hif_softc *scn)
2381 {
2382 	QDF_STATUS ret;
2383 
2384 	hif_apps_irqs_disable(GET_HIF_OPAQUE_HDL(scn));
2385 
2386 	/*
2387 	 * In an unlikely case, if draining becomes infinite loop,
2388 	 * it returns an error, shall abort the bus suspend.
2389 	 */
2390 	ret = hif_drain_fw_diag_ce(scn);
2391 	if (ret)
2392 		hif_err("draining fw_diag_ce not got cleaned");
2393 
2394 	ret = hif_try_complete_tasks(scn);
2395 	if (QDF_IS_STATUS_ERROR(ret)) {
2396 		hif_apps_irqs_enable(GET_HIF_OPAQUE_HDL(scn));
2397 		return -EBUSY;
2398 	}
2399 
2400 	/* Stop the HIF Sleep Timer */
2401 	hif_cancel_deferred_target_sleep(scn);
2402 
2403 	/*
2404 	 * Only need clear the bits INTR_L1SS/INTR_CLKPM after suspend.
2405 	 * No need do enable bits after resume, as firmware will restore
2406 	 * the bits after resume.
2407 	 */
2408 	hif_pci_config_low_power_int_register(scn, false);
2409 
2410 	scn->bus_suspended = true;
2411 
2412 	return 0;
2413 }
2414 
2415 #ifdef PCI_LINK_STATUS_SANITY
2416 /**
2417  * __hif_check_link_status() - API to check if PCIe link is active/not
2418  * @scn: HIF Context
2419  *
2420  * API reads the PCIe config space to verify if PCIe link training is
2421  * successful or not.
2422  *
2423  * Return: Success/Failure
2424  */
__hif_check_link_status(struct hif_softc * scn)2425 static int __hif_check_link_status(struct hif_softc *scn)
2426 {
2427 	uint16_t dev_id = 0;
2428 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2429 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
2430 
2431 	if (!sc) {
2432 		hif_err("HIF Bus Context is Invalid");
2433 		return -EINVAL;
2434 	}
2435 
2436 	pfrm_read_config_word(sc->pdev, PCI_DEVICE_ID, &dev_id);
2437 
2438 	if (dev_id == sc->devid)
2439 		return 0;
2440 
2441 	hif_err("Invalid PCIe Config Space; PCIe link down dev_id:0x%04x",
2442 	       dev_id);
2443 
2444 	scn->recovery = true;
2445 
2446 	if (cbk && cbk->set_recovery_in_progress)
2447 		cbk->set_recovery_in_progress(cbk->context, true);
2448 	else
2449 		hif_err("Driver Global Recovery is not set");
2450 
2451 	pld_is_pci_link_down(sc->dev);
2452 	return -EACCES;
2453 }
2454 #else
__hif_check_link_status(struct hif_softc * scn)2455 static inline int __hif_check_link_status(struct hif_softc *scn)
2456 {
2457 	return 0;
2458 }
2459 #endif
2460 
2461 
2462 #ifdef HIF_BUS_LOG_INFO
hif_log_pcie_info(struct hif_softc * scn,uint8_t * data,unsigned int * offset)2463 bool hif_log_pcie_info(struct hif_softc *scn, uint8_t *data,
2464 		       unsigned int *offset)
2465 {
2466 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2467 	struct hang_event_bus_info info = {0};
2468 	size_t size;
2469 
2470 	if (!sc) {
2471 		hif_err("HIF Bus Context is Invalid");
2472 		return false;
2473 	}
2474 
2475 	pfrm_read_config_word(sc->pdev, PCI_DEVICE_ID, &info.dev_id);
2476 
2477 	size = sizeof(info);
2478 	QDF_HANG_EVT_SET_HDR(&info.tlv_header, HANG_EVT_TAG_BUS_INFO,
2479 			     size - QDF_HANG_EVENT_TLV_HDR_SIZE);
2480 
2481 	if (*offset + size > QDF_WLAN_HANG_FW_OFFSET)
2482 		return false;
2483 
2484 	qdf_mem_copy(data + *offset, &info, size);
2485 	*offset = *offset + size;
2486 
2487 	if (info.dev_id == sc->devid)
2488 		return false;
2489 
2490 	qdf_recovery_reason_update(QCA_HANG_BUS_FAILURE);
2491 	qdf_get_bus_reg_dump(scn->qdf_dev->dev, data,
2492 			     (QDF_WLAN_HANG_FW_OFFSET - size));
2493 	return true;
2494 }
2495 #endif
2496 
2497 /**
2498  * hif_pci_bus_resume(): prepare hif for resume
2499  * @scn: hif context
2500  *
2501  * Return: Errno
2502  */
hif_pci_bus_resume(struct hif_softc * scn)2503 int hif_pci_bus_resume(struct hif_softc *scn)
2504 {
2505 	int errno;
2506 
2507 	scn->bus_suspended = false;
2508 
2509 	errno = __hif_check_link_status(scn);
2510 	if (errno)
2511 		return errno;
2512 
2513 	hif_apps_irqs_enable(GET_HIF_OPAQUE_HDL(scn));
2514 
2515 	return 0;
2516 }
2517 
2518 /**
2519  * hif_pci_bus_suspend_noirq() - ensure there are no pending transactions
2520  * @scn: hif context
2521  *
2522  * Ensure that if we received the wakeup message before the irq
2523  * was disabled that the message is processed before suspending.
2524  *
2525  * Return: -EBUSY if we fail to flush the tasklets.
2526  */
hif_pci_bus_suspend_noirq(struct hif_softc * scn)2527 int hif_pci_bus_suspend_noirq(struct hif_softc *scn)
2528 {
2529 	if (hif_can_suspend_link(GET_HIF_OPAQUE_HDL(scn)))
2530 		qdf_atomic_set(&scn->link_suspended, 1);
2531 
2532 	return 0;
2533 }
2534 
2535 /**
2536  * hif_pci_bus_resume_noirq() - ensure there are no pending transactions
2537  * @scn: hif context
2538  *
2539  * Ensure that if we received the wakeup message before the irq
2540  * was disabled that the message is processed before suspending.
2541  *
2542  * Return: -EBUSY if we fail to flush the tasklets.
2543  */
hif_pci_bus_resume_noirq(struct hif_softc * scn)2544 int hif_pci_bus_resume_noirq(struct hif_softc *scn)
2545 {
2546 	/* a vote for link up can come in the middle of the ongoing resume
2547 	 * process. hence, clear the link suspend flag once
2548 	 * hif_bus_resume_noirq() succeeds since PCIe link is already resumed
2549 	 * by this time
2550 	 */
2551 	qdf_atomic_set(&scn->link_suspended, 0);
2552 
2553 	return 0;
2554 }
2555 
2556 #if CONFIG_PCIE_64BIT_MSI
hif_free_msi_ctx(struct hif_softc * scn)2557 static void hif_free_msi_ctx(struct hif_softc *scn)
2558 {
2559 	struct hif_pci_softc *sc = scn->hif_sc;
2560 	struct hif_msi_info *info = &sc->msi_info;
2561 	struct device *dev = scn->qdf_dev->dev;
2562 
2563 	OS_FREE_CONSISTENT(dev, 4, info->magic, info->magic_dma,
2564 			   OS_GET_DMA_MEM_CONTEXT(scn, dmacontext));
2565 	info->magic = NULL;
2566 	info->magic_dma = 0;
2567 }
2568 #else
hif_free_msi_ctx(struct hif_softc * scn)2569 static void hif_free_msi_ctx(struct hif_softc *scn)
2570 {
2571 }
2572 #endif
2573 
hif_pci_disable_isr(struct hif_softc * scn)2574 void hif_pci_disable_isr(struct hif_softc *scn)
2575 {
2576 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2577 
2578 	hif_exec_kill(&scn->osc);
2579 	hif_nointrs(scn);
2580 	hif_free_msi_ctx(scn);
2581 	/* Cancel the pending tasklet */
2582 	ce_tasklet_kill(scn);
2583 	tasklet_kill(&sc->intr_tq);
2584 	qdf_atomic_set(&scn->active_tasklet_cnt, 0);
2585 	qdf_atomic_set(&scn->active_grp_tasklet_cnt, 0);
2586 }
2587 
2588 /* Function to reset SoC */
hif_pci_reset_soc(struct hif_softc * hif_sc)2589 void hif_pci_reset_soc(struct hif_softc *hif_sc)
2590 {
2591 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_sc);
2592 	struct hif_opaque_softc *ol_sc = GET_HIF_OPAQUE_HDL(hif_sc);
2593 	struct hif_target_info *tgt_info = hif_get_target_info_handle(ol_sc);
2594 
2595 #if defined(CPU_WARM_RESET_WAR)
2596 	/* Currently CPU warm reset sequence is tested only for AR9888_REV2
2597 	 * Need to enable for AR9888_REV1 once CPU warm reset sequence is
2598 	 * verified for AR9888_REV1
2599 	 */
2600 	if (tgt_info->target_version == AR9888_REV2_VERSION)
2601 		hif_pci_device_warm_reset(sc);
2602 	else
2603 		hif_pci_device_reset(sc);
2604 #else
2605 	hif_pci_device_reset(sc);
2606 #endif
2607 }
2608 
2609 /**
2610  * hif_log_soc_wakeup_timeout() - API to log PCIe and SOC Info
2611  * @sc: HIF PCIe Context
2612  *
2613  * API to log PCIe Config space and SOC info when SOC wakeup timeout happens
2614  *
2615  * Return: Failure to caller
2616  */
hif_log_soc_wakeup_timeout(struct hif_pci_softc * sc)2617 static int hif_log_soc_wakeup_timeout(struct hif_pci_softc *sc)
2618 {
2619 	uint16_t val = 0;
2620 	uint32_t bar = 0;
2621 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(sc);
2622 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
2623 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(sc);
2624 	struct hif_config_info *cfg = hif_get_ini_handle(hif_hdl);
2625 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
2626 	A_target_id_t pci_addr = scn->mem;
2627 
2628 	hif_info("keep_awake_count = %d", hif_state->keep_awake_count);
2629 
2630 	pfrm_read_config_word(sc->pdev, PCI_VENDOR_ID, &val);
2631 
2632 	hif_info("PCI Vendor ID = 0x%04x", val);
2633 
2634 	pfrm_read_config_word(sc->pdev, PCI_DEVICE_ID, &val);
2635 
2636 	hif_info("PCI Device ID = 0x%04x", val);
2637 
2638 	pfrm_read_config_word(sc->pdev, PCI_COMMAND, &val);
2639 
2640 	hif_info("PCI Command = 0x%04x", val);
2641 
2642 	pfrm_read_config_word(sc->pdev, PCI_STATUS, &val);
2643 
2644 	hif_info("PCI Status = 0x%04x", val);
2645 
2646 	pfrm_read_config_dword(sc->pdev, PCI_BASE_ADDRESS_0, &bar);
2647 
2648 	hif_info("PCI BAR 0 = 0x%08x", bar);
2649 
2650 	hif_info("SOC_WAKE_ADDR 0%08x",
2651 		hif_read32_mb(scn, pci_addr + PCIE_LOCAL_BASE_ADDRESS +
2652 				PCIE_SOC_WAKE_ADDRESS));
2653 
2654 	hif_info("RTC_STATE_ADDR 0x%08x",
2655 		hif_read32_mb(scn, pci_addr + PCIE_LOCAL_BASE_ADDRESS +
2656 							RTC_STATE_ADDRESS));
2657 
2658 	hif_info("wakeup target");
2659 
2660 	if (!cfg->enable_self_recovery)
2661 		QDF_BUG(0);
2662 
2663 	scn->recovery = true;
2664 
2665 	if (cbk->set_recovery_in_progress)
2666 		cbk->set_recovery_in_progress(cbk->context, true);
2667 
2668 	pld_is_pci_link_down(sc->dev);
2669 	return -EACCES;
2670 }
2671 
2672 /*
2673  * For now, we use simple on-demand sleep/wake.
2674  * Some possible improvements:
2675  *  -Use the Host-destined A_INUM_PCIE_AWAKE interrupt rather than spin/delay
2676  *   (or perhaps spin/delay for a short while, then convert to sleep/interrupt)
2677  *   Careful, though, these functions may be used by
2678  *  interrupt handlers ("atomic")
2679  *  -Don't use host_reg_table for this code; instead use values directly
2680  *  -Use a separate timer to track activity and allow Target to sleep only
2681  *   if it hasn't done anything for a while; may even want to delay some
2682  *   processing for a short while in order to "batch" (e.g.) transmit
2683  *   requests with completion processing into "windows of up time".  Costs
2684  *   some performance, but improves power utilization.
2685  *  -On some platforms, it might be possible to eliminate explicit
2686  *   sleep/wakeup. Instead, take a chance that each access works OK. If not,
2687  *   recover from the failure by forcing the Target awake.
2688  *  -Change keep_awake_count to an atomic_t in order to avoid spin lock
2689  *   overhead in some cases. Perhaps this makes more sense when
2690  *   CONFIG_ATH_PCIE_ACCESS_LIKELY is used and less sense when LIKELY is
2691  *   disabled.
2692  *  -It is possible to compile this code out and simply force the Target
2693  *   to remain awake.  That would yield optimal performance at the cost of
2694  *   increased power. See CONFIG_ATH_PCIE_MAX_PERF.
2695  *
2696  * Note: parameter wait_for_it has meaning only when waking (when sleep_ok==0).
2697  */
2698 
2699 /**
2700  * hif_pci_target_sleep_state_adjust() - on-demand sleep/wake
2701  * @scn: hif_softc pointer.
2702  * @sleep_ok: bool
2703  * @wait_for_it: bool
2704  *
2705  * Output the pipe error counts of each pipe to log file
2706  *
2707  * Return: int
2708  */
hif_pci_target_sleep_state_adjust(struct hif_softc * scn,bool sleep_ok,bool wait_for_it)2709 int hif_pci_target_sleep_state_adjust(struct hif_softc *scn,
2710 			      bool sleep_ok, bool wait_for_it)
2711 {
2712 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2713 	A_target_id_t pci_addr = scn->mem;
2714 	static int max_delay;
2715 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
2716 	static int debug;
2717 	if (scn->recovery)
2718 		return -EACCES;
2719 
2720 	if (qdf_atomic_read(&scn->link_suspended)) {
2721 		hif_err("Invalid access, PCIe link is down");
2722 		debug = true;
2723 		QDF_ASSERT(0);
2724 		return -EACCES;
2725 	}
2726 
2727 	if (debug) {
2728 		wait_for_it = true;
2729 		hif_err("Invalid access, PCIe link is suspended");
2730 		QDF_ASSERT(0);
2731 	}
2732 
2733 	if (sleep_ok) {
2734 		qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
2735 		hif_state->keep_awake_count--;
2736 		if (hif_state->keep_awake_count == 0) {
2737 			/* Allow sleep */
2738 			hif_state->verified_awake = false;
2739 			hif_state->sleep_ticks = qdf_system_ticks();
2740 		}
2741 		if (hif_state->fake_sleep == false) {
2742 			/* Set the Fake Sleep */
2743 			hif_state->fake_sleep = true;
2744 
2745 			/* Start the Sleep Timer */
2746 			qdf_timer_stop(&hif_state->sleep_timer);
2747 			qdf_timer_start(&hif_state->sleep_timer,
2748 				HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
2749 		}
2750 		qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
2751 	} else {
2752 		qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
2753 
2754 		if (hif_state->fake_sleep) {
2755 			hif_state->verified_awake = true;
2756 		} else {
2757 			if (hif_state->keep_awake_count == 0) {
2758 				/* Force AWAKE */
2759 				hif_write32_mb(sc, pci_addr +
2760 					      PCIE_LOCAL_BASE_ADDRESS +
2761 					      PCIE_SOC_WAKE_ADDRESS,
2762 					      PCIE_SOC_WAKE_V_MASK);
2763 			}
2764 		}
2765 		hif_state->keep_awake_count++;
2766 		qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
2767 
2768 		if (wait_for_it && !hif_state->verified_awake) {
2769 #define PCIE_SLEEP_ADJUST_TIMEOUT 8000  /* 8Ms */
2770 			int tot_delay = 0;
2771 			int curr_delay = 5;
2772 
2773 			for (;; ) {
2774 				if (hif_targ_is_awake(scn, pci_addr)) {
2775 					hif_state->verified_awake = true;
2776 					break;
2777 				}
2778 				if (!hif_pci_targ_is_present(scn, pci_addr))
2779 					break;
2780 				if (tot_delay > PCIE_SLEEP_ADJUST_TIMEOUT)
2781 					return hif_log_soc_wakeup_timeout(sc);
2782 
2783 				OS_DELAY(curr_delay);
2784 				tot_delay += curr_delay;
2785 
2786 				if (curr_delay < 50)
2787 					curr_delay += 5;
2788 			}
2789 
2790 			/*
2791 			 * NB: If Target has to come out of Deep Sleep,
2792 			 * this may take a few Msecs. Typically, though
2793 			 * this delay should be <30us.
2794 			 */
2795 			if (tot_delay > max_delay)
2796 				max_delay = tot_delay;
2797 		}
2798 	}
2799 
2800 	if (debug && hif_state->verified_awake) {
2801 		debug = 0;
2802 		hif_err("INTR_ENABLE_REG = 0x%08x, INTR_CAUSE_REG = 0x%08x, CPU_INTR_REG = 0x%08x, INTR_CLR_REG = 0x%08x, CE_INTERRUPT_SUMMARY_REG = 0x%08x",
2803 			hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
2804 				PCIE_INTR_ENABLE_ADDRESS),
2805 			hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
2806 				PCIE_INTR_CAUSE_ADDRESS),
2807 			hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
2808 				CPU_INTR_ADDRESS),
2809 			hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
2810 				PCIE_INTR_CLR_ADDRESS),
2811 			hif_read32_mb(sc, sc->mem + CE_WRAPPER_BASE_ADDRESS +
2812 				CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS));
2813 	}
2814 
2815 	return 0;
2816 }
2817 
2818 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
hif_target_read_checked(struct hif_softc * scn,uint32_t offset)2819 uint32_t hif_target_read_checked(struct hif_softc *scn, uint32_t offset)
2820 {
2821 	uint32_t value;
2822 	void *addr;
2823 
2824 	addr = scn->mem + offset;
2825 	value = hif_read32_mb(scn, addr);
2826 
2827 	{
2828 		unsigned long irq_flags;
2829 		int idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
2830 
2831 		spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
2832 		pcie_access_log[idx].seqnum = pcie_access_log_seqnum;
2833 		pcie_access_log[idx].is_write = false;
2834 		pcie_access_log[idx].addr = addr;
2835 		pcie_access_log[idx].value = value;
2836 		pcie_access_log_seqnum++;
2837 		spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
2838 	}
2839 
2840 	return value;
2841 }
2842 
2843 void
hif_target_write_checked(struct hif_softc * scn,uint32_t offset,uint32_t value)2844 hif_target_write_checked(struct hif_softc *scn, uint32_t offset, uint32_t value)
2845 {
2846 	void *addr;
2847 
2848 	addr = scn->mem + (offset);
2849 	hif_write32_mb(scn, addr, value);
2850 
2851 	{
2852 		unsigned long irq_flags;
2853 		int idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
2854 
2855 		spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
2856 		pcie_access_log[idx].seqnum = pcie_access_log_seqnum;
2857 		pcie_access_log[idx].is_write = true;
2858 		pcie_access_log[idx].addr = addr;
2859 		pcie_access_log[idx].value = value;
2860 		pcie_access_log_seqnum++;
2861 		spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
2862 	}
2863 }
2864 
2865 /**
2866  * hif_target_dump_access_log() - dump access log
2867  *
2868  * dump access log
2869  *
2870  * Return: n/a
2871  */
hif_target_dump_access_log(void)2872 void hif_target_dump_access_log(void)
2873 {
2874 	int idx, len, start_idx, cur_idx;
2875 	unsigned long irq_flags;
2876 
2877 	spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
2878 	if (pcie_access_log_seqnum > PCIE_ACCESS_LOG_NUM) {
2879 		len = PCIE_ACCESS_LOG_NUM;
2880 		start_idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
2881 	} else {
2882 		len = pcie_access_log_seqnum;
2883 		start_idx = 0;
2884 	}
2885 
2886 	for (idx = 0; idx < len; idx++) {
2887 		cur_idx = (start_idx + idx) % PCIE_ACCESS_LOG_NUM;
2888 		hif_debug("idx:%d sn:%u wr:%d addr:%pK val:%u",
2889 		       idx,
2890 		       pcie_access_log[cur_idx].seqnum,
2891 		       pcie_access_log[cur_idx].is_write,
2892 		       pcie_access_log[cur_idx].addr,
2893 		       pcie_access_log[cur_idx].value);
2894 	}
2895 
2896 	pcie_access_log_seqnum = 0;
2897 	spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
2898 }
2899 #endif
2900 
2901 #ifndef HIF_AHB
hif_ahb_configure_irq(struct hif_pci_softc * sc)2902 int hif_ahb_configure_irq(struct hif_pci_softc *sc)
2903 {
2904 	QDF_BUG(0);
2905 	return -EINVAL;
2906 }
2907 #endif
2908 
hif_ce_interrupt_handler(int irq,void * context)2909 static irqreturn_t hif_ce_interrupt_handler(int irq, void *context)
2910 {
2911 	struct ce_tasklet_entry *tasklet_entry = context;
2912 	return ce_dispatch_interrupt(tasklet_entry->ce_id, tasklet_entry);
2913 }
2914 extern const char *ce_name[];
2915 
hif_ce_msi_map_ce_to_irq(struct hif_softc * scn,int ce_id)2916 static int hif_ce_msi_map_ce_to_irq(struct hif_softc *scn, int ce_id)
2917 {
2918 	struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
2919 
2920 	return pci_scn->ce_irq_num[ce_id];
2921 }
2922 
2923 /* hif_srng_msi_irq_disable() - disable the irq for msi
2924  * @hif_sc: hif context
2925  * @ce_id: which ce to disable copy complete interrupts for
2926  *
2927  * since MSI interrupts are not level based, the system can function
2928  * without disabling these interrupts.  Interrupt mitigation can be
2929  * added here for better system performance.
2930  */
hif_ce_srng_msi_irq_disable(struct hif_softc * hif_sc,int ce_id)2931 static void hif_ce_srng_msi_irq_disable(struct hif_softc *hif_sc, int ce_id)
2932 {
2933 	pfrm_disable_irq_nosync(hif_sc->qdf_dev->dev,
2934 				hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
2935 }
2936 
hif_ce_srng_msi_irq_enable(struct hif_softc * hif_sc,int ce_id)2937 static void hif_ce_srng_msi_irq_enable(struct hif_softc *hif_sc, int ce_id)
2938 {
2939 	if (__hif_check_link_status(hif_sc))
2940 		return;
2941 
2942 	pfrm_enable_irq(hif_sc->qdf_dev->dev,
2943 			hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
2944 }
2945 
hif_ce_legacy_msi_irq_disable(struct hif_softc * hif_sc,int ce_id)2946 static void hif_ce_legacy_msi_irq_disable(struct hif_softc *hif_sc, int ce_id)
2947 {
2948 	disable_irq_nosync(hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
2949 }
2950 
hif_ce_legacy_msi_irq_enable(struct hif_softc * hif_sc,int ce_id)2951 static void hif_ce_legacy_msi_irq_enable(struct hif_softc *hif_sc, int ce_id)
2952 {
2953 	enable_irq(hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
2954 }
2955 
2956 #ifdef QCA_SUPPORT_LEGACY_INTERRUPTS
2957 /**
2958  * hif_ce_configure_legacyirq() - Configure CE interrupts
2959  * @scn: hif_softc pointer
2960  *
2961  * Configure CE legacy interrupts
2962  *
2963  * Return: int
2964  */
hif_ce_configure_legacyirq(struct hif_softc * scn)2965 static int hif_ce_configure_legacyirq(struct hif_softc *scn)
2966 {
2967 	int ret = 0;
2968 	int irq, ce_id;
2969 	struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
2970 	struct CE_attr *host_ce_conf = ce_sc->host_ce_config;
2971 	struct hif_pci_softc *pci_sc = HIF_GET_PCI_SOFTC(scn);
2972 	int pci_slot;
2973 	qdf_device_t qdf_dev = scn->qdf_dev;
2974 
2975 	if (!pld_get_enable_intx(scn->qdf_dev->dev))
2976 		return -EINVAL;
2977 
2978 	scn->bus_ops.hif_irq_disable = &hif_ce_srng_msi_irq_disable;
2979 	scn->bus_ops.hif_irq_enable = &hif_ce_srng_msi_irq_enable;
2980 	scn->bus_ops.hif_map_ce_to_irq = &hif_ce_msi_map_ce_to_irq;
2981 
2982 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
2983 		if (host_ce_conf[ce_id].flags & CE_ATTR_DISABLE_INTR)
2984 			continue;
2985 
2986 		if (host_ce_conf[ce_id].flags & CE_ATTR_INIT_ON_DEMAND)
2987 			continue;
2988 
2989 		ret = pfrm_get_irq(scn->qdf_dev->dev,
2990 				   (struct qdf_pfm_hndl *)qdf_dev->cnss_pdev,
2991 				   legacy_ic_irqname[ce_id], ce_id, &irq);
2992 		if (ret) {
2993 			dev_err(scn->qdf_dev->dev, "get irq failed\n");
2994 			ret = -EFAULT;
2995 			goto skip;
2996 		}
2997 
2998 		pci_slot = hif_get_pci_slot(scn);
2999 		qdf_scnprintf(ce_irqname[pci_slot][ce_id],
3000 			      DP_IRQ_NAME_LEN, "pci%d_ce_%u", pci_slot, ce_id);
3001 		pci_sc->ce_irq_num[ce_id] = irq;
3002 
3003 		ret = pfrm_request_irq(scn->qdf_dev->dev, irq,
3004 				       hif_ce_interrupt_handler,
3005 				       IRQF_SHARED,
3006 				       ce_irqname[pci_slot][ce_id],
3007 				       &ce_sc->tasklets[ce_id]);
3008 		if (ret) {
3009 			hif_err("error = %d", ret);
3010 			return -EINVAL;
3011 		}
3012 	}
3013 
3014 skip:
3015 	return ret;
3016 }
3017 #else
3018 /**
3019  * hif_ce_configure_legacyirq() - Configure CE interrupts
3020  * @scn: hif_softc pointer
3021  *
3022  * Configure CE legacy interrupts
3023  *
3024  * Return: int
3025  */
hif_ce_configure_legacyirq(struct hif_softc * scn)3026 static int hif_ce_configure_legacyirq(struct hif_softc *scn)
3027 {
3028 	return 0;
3029 }
3030 #endif
3031 
hif_ce_msi_configure_irq_by_ceid(struct hif_softc * scn,int ce_id)3032 int hif_ce_msi_configure_irq_by_ceid(struct hif_softc *scn, int ce_id)
3033 {
3034 	int ret = 0;
3035 	int irq;
3036 	uint32_t msi_data_start;
3037 	uint32_t msi_data_count;
3038 	unsigned int msi_data;
3039 	int irq_id;
3040 	uint32_t msi_irq_start;
3041 	struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
3042 	struct hif_pci_softc *pci_sc = HIF_GET_PCI_SOFTC(scn);
3043 	int pci_slot;
3044 	unsigned long irq_flags;
3045 
3046 	if (ce_id >= CE_COUNT_MAX)
3047 		return -EINVAL;
3048 
3049 	/* do ce irq assignments */
3050 	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
3051 					  &msi_data_count, &msi_data_start,
3052 					  &msi_irq_start);
3053 
3054 	if (ret) {
3055 		hif_err("Failed to get CE msi config");
3056 		return -EINVAL;
3057 	}
3058 
3059 	irq_id = scn->int_assignment->msi_idx[ce_id];
3060 	/* needs to match the ce_id -> irq data mapping
3061 	 * used in the srng parameter configuration
3062 	 */
3063 	pci_slot = hif_get_pci_slot(scn);
3064 	msi_data = irq_id + msi_irq_start;
3065 	irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
3066 	if (pld_is_one_msi(scn->qdf_dev->dev))
3067 		irq_flags = IRQF_SHARED | IRQF_NOBALANCING;
3068 	else
3069 		irq_flags = IRQF_SHARED;
3070 	hif_debug("%s: (ce_id %d, irq_id %d, msi_data %d, irq %d flag 0x%lx tasklet %pK)",
3071 		  __func__, ce_id, irq_id, msi_data, irq, irq_flags,
3072 		  &ce_sc->tasklets[ce_id]);
3073 
3074 	/* implies the ce is also initialized */
3075 	if (!ce_sc->tasklets[ce_id].inited)
3076 		goto skip;
3077 
3078 	pci_sc->ce_irq_num[ce_id] = irq;
3079 
3080 	hif_affinity_mgr_init_ce_irq(scn, ce_id, irq);
3081 
3082 	qdf_scnprintf(ce_irqname[pci_slot][ce_id],
3083 		      DP_IRQ_NAME_LEN, "pci%u_wlan_ce_%u",
3084 		      pci_slot, ce_id);
3085 
3086 	ret = pfrm_request_irq(scn->qdf_dev->dev,
3087 			       irq, hif_ce_interrupt_handler, irq_flags,
3088 			       ce_irqname[pci_slot][ce_id],
3089 			       &ce_sc->tasklets[ce_id]);
3090 	if (ret)
3091 		return -EINVAL;
3092 
3093 skip:
3094 	return ret;
3095 }
3096 
hif_ce_msi_configure_irq(struct hif_softc * scn)3097 static int hif_ce_msi_configure_irq(struct hif_softc *scn)
3098 {
3099 	int ret;
3100 	int ce_id, irq;
3101 	uint32_t msi_data_start;
3102 	uint32_t msi_data_count;
3103 	uint32_t msi_irq_start;
3104 	struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
3105 	struct CE_attr *host_ce_conf = ce_sc->host_ce_config;
3106 
3107 	if (!scn->ini_cfg.disable_wake_irq) {
3108 		/* do wake irq assignment */
3109 		ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "WAKE",
3110 						  &msi_data_count,
3111 						  &msi_data_start,
3112 						  &msi_irq_start);
3113 		if (ret)
3114 			return ret;
3115 
3116 		scn->wake_irq = pld_get_msi_irq(scn->qdf_dev->dev,
3117 						msi_irq_start);
3118 		scn->wake_irq_type = HIF_PM_MSI_WAKE;
3119 
3120 		ret = pfrm_request_irq(scn->qdf_dev->dev, scn->wake_irq,
3121 				       hif_wake_interrupt_handler,
3122 				       IRQF_NO_SUSPEND, "wlan_wake_irq", scn);
3123 
3124 		if (ret)
3125 			return ret;
3126 	}
3127 
3128 	/* do ce irq assignments */
3129 	ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
3130 					  &msi_data_count, &msi_data_start,
3131 					  &msi_irq_start);
3132 	if (ret)
3133 		goto free_wake_irq;
3134 
3135 	if (ce_srng_based(scn)) {
3136 		scn->bus_ops.hif_irq_disable = &hif_ce_srng_msi_irq_disable;
3137 		scn->bus_ops.hif_irq_enable = &hif_ce_srng_msi_irq_enable;
3138 	} else {
3139 		scn->bus_ops.hif_irq_disable = &hif_ce_legacy_msi_irq_disable;
3140 		scn->bus_ops.hif_irq_enable = &hif_ce_legacy_msi_irq_enable;
3141 	}
3142 
3143 	scn->bus_ops.hif_map_ce_to_irq = &hif_ce_msi_map_ce_to_irq;
3144 
3145 	/* needs to match the ce_id -> irq data mapping
3146 	 * used in the srng parameter configuration
3147 	 */
3148 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
3149 		if (host_ce_conf[ce_id].flags & CE_ATTR_DISABLE_INTR)
3150 			continue;
3151 
3152 		if (host_ce_conf[ce_id].flags & CE_ATTR_INIT_ON_DEMAND)
3153 			continue;
3154 
3155 		ret = hif_ce_msi_configure_irq_by_ceid(scn, ce_id);
3156 		if (ret)
3157 			goto free_irq;
3158 	}
3159 
3160 	return ret;
3161 
3162 free_irq:
3163 	/* the request_irq for the last ce_id failed so skip it. */
3164 	while (ce_id > 0 && ce_id < scn->ce_count) {
3165 		unsigned int msi_data;
3166 
3167 		ce_id--;
3168 		msi_data = (ce_id % msi_data_count) + msi_irq_start;
3169 		irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
3170 		pfrm_free_irq(scn->qdf_dev->dev,
3171 			      irq, &ce_sc->tasklets[ce_id]);
3172 	}
3173 
3174 free_wake_irq:
3175 	if (!scn->ini_cfg.disable_wake_irq) {
3176 		pfrm_free_irq(scn->qdf_dev->dev,
3177 			      scn->wake_irq, scn->qdf_dev->dev);
3178 		scn->wake_irq = 0;
3179 		scn->wake_irq_type = HIF_PM_INVALID_WAKE;
3180 	}
3181 
3182 	return ret;
3183 }
3184 
hif_exec_grp_irq_disable(struct hif_exec_context * hif_ext_group)3185 static void hif_exec_grp_irq_disable(struct hif_exec_context *hif_ext_group)
3186 {
3187 	int i;
3188 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
3189 
3190 	for (i = 0; i < hif_ext_group->numirq; i++)
3191 		pfrm_disable_irq_nosync(scn->qdf_dev->dev,
3192 					hif_ext_group->os_irq[i]);
3193 }
3194 
hif_exec_grp_irq_enable(struct hif_exec_context * hif_ext_group)3195 static void hif_exec_grp_irq_enable(struct hif_exec_context *hif_ext_group)
3196 {
3197 	int i;
3198 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
3199 
3200 	for (i = 0; i < hif_ext_group->numirq; i++)
3201 		pfrm_enable_irq(scn->qdf_dev->dev, hif_ext_group->os_irq[i]);
3202 }
3203 
3204 /**
3205  * hif_pci_get_irq_name() - get irqname
3206  * This function gives irqnumber to irqname
3207  * mapping.
3208  *
3209  * @irq_no: irq number
3210  *
3211  * Return: irq name
3212  */
hif_pci_get_irq_name(int irq_no)3213 const char *hif_pci_get_irq_name(int irq_no)
3214 {
3215 	return "pci-dummy";
3216 }
3217 
3218 #if defined(FEATURE_IRQ_AFFINITY) || defined(HIF_CPU_PERF_AFFINE_MASK)
hif_pci_irq_set_affinity_hint(struct hif_exec_context * hif_ext_group,bool perf)3219 void hif_pci_irq_set_affinity_hint(struct hif_exec_context *hif_ext_group,
3220 				   bool perf)
3221 {
3222 	int i, ret;
3223 	unsigned int cpus;
3224 	bool mask_set = false;
3225 	int package_id;
3226 	int cpu_cluster = perf ? hif_get_perf_cluster_bitmap() :
3227 				 BIT(CPU_CLUSTER_TYPE_LITTLE);
3228 
3229 	for (i = 0; i < hif_ext_group->numirq; i++)
3230 		qdf_cpumask_clear(&hif_ext_group->new_cpu_mask[i]);
3231 
3232 	for (i = 0; i < hif_ext_group->numirq; i++) {
3233 		qdf_for_each_online_cpu(cpus) {
3234 			package_id = qdf_topology_physical_package_id(cpus);
3235 			if (package_id >= 0 && BIT(package_id) & cpu_cluster) {
3236 				qdf_cpumask_set_cpu(cpus,
3237 						    &hif_ext_group->
3238 						    new_cpu_mask[i]);
3239 				mask_set = true;
3240 			}
3241 		}
3242 	}
3243 	for (i = 0; i < hif_ext_group->numirq; i++) {
3244 		if (mask_set) {
3245 			ret = hif_affinity_mgr_set_qrg_irq_affinity((struct hif_softc *)hif_ext_group->hif,
3246 								    hif_ext_group->os_irq[i],
3247 								    hif_ext_group->grp_id, i,
3248 								    &hif_ext_group->new_cpu_mask[i]);
3249 			if (ret)
3250 				qdf_debug("Set affinity %*pbl fails for IRQ %d ",
3251 					  qdf_cpumask_pr_args(&hif_ext_group->
3252 							      new_cpu_mask[i]),
3253 					  hif_ext_group->os_irq[i]);
3254 		} else {
3255 			qdf_debug("Offline CPU: Set affinity fails for IRQ: %d",
3256 				  hif_ext_group->os_irq[i]);
3257 		}
3258 	}
3259 }
3260 #endif
3261 
3262 #ifdef HIF_CPU_PERF_AFFINE_MASK
hif_pci_ce_irq_set_affinity_hint(struct hif_softc * scn)3263 void hif_pci_ce_irq_set_affinity_hint(struct hif_softc *scn)
3264 {
3265 	int ret;
3266 	unsigned int cpus;
3267 	struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
3268 	struct hif_pci_softc *pci_sc = HIF_GET_PCI_SOFTC(scn);
3269 	struct CE_attr *host_ce_conf;
3270 	int ce_id;
3271 	qdf_cpu_mask ce_cpu_mask, updated_mask;
3272 	int perf_cpu_cluster = hif_get_perf_cluster_bitmap();
3273 	int package_id;
3274 
3275 	host_ce_conf = ce_sc->host_ce_config;
3276 	qdf_cpumask_clear(&ce_cpu_mask);
3277 
3278 	qdf_for_each_online_cpu(cpus) {
3279 		package_id = qdf_topology_physical_package_id(cpus);
3280 		if (package_id >= 0 && BIT(package_id) & perf_cpu_cluster) {
3281 			qdf_cpumask_set_cpu(cpus,
3282 					    &ce_cpu_mask);
3283 		} else {
3284 			hif_err_rl("Unable to set cpu mask for offline CPU %d"
3285 				   , cpus);
3286 		}
3287 	}
3288 	if (qdf_cpumask_empty(&ce_cpu_mask)) {
3289 		hif_err_rl("Empty cpu_mask, unable to set CE IRQ affinity");
3290 		return;
3291 	}
3292 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
3293 		if (host_ce_conf[ce_id].flags & CE_ATTR_DISABLE_INTR)
3294 			continue;
3295 		qdf_cpumask_copy(&updated_mask, &ce_cpu_mask);
3296 		ret = hif_affinity_mgr_set_ce_irq_affinity(scn, pci_sc->ce_irq_num[ce_id],
3297 							   ce_id,
3298 							   &updated_mask);
3299 		qdf_cpumask_clear(&pci_sc->ce_irq_cpu_mask[ce_id]);
3300 		qdf_cpumask_copy(&pci_sc->ce_irq_cpu_mask[ce_id],
3301 				 &updated_mask);
3302 		if (ret)
3303 			hif_err_rl("Set affinity %*pbl fails for CE IRQ %d",
3304 				   qdf_cpumask_pr_args(
3305 					&pci_sc->ce_irq_cpu_mask[ce_id]),
3306 				   pci_sc->ce_irq_num[ce_id]);
3307 		else
3308 			hif_debug_rl("Set affinity %*pbl for CE IRQ: %d",
3309 				     qdf_cpumask_pr_args(
3310 					&pci_sc->ce_irq_cpu_mask[ce_id]),
3311 				     pci_sc->ce_irq_num[ce_id]);
3312 	}
3313 }
3314 #endif /* #ifdef HIF_CPU_PERF_AFFINE_MASK */
3315 
3316 #ifdef HIF_CPU_CLEAR_AFFINITY
hif_pci_config_irq_clear_cpu_affinity(struct hif_softc * scn,int intr_ctxt_id,int cpu)3317 void hif_pci_config_irq_clear_cpu_affinity(struct hif_softc *scn,
3318 					   int intr_ctxt_id, int cpu)
3319 {
3320 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3321 	struct hif_exec_context *hif_ext_group;
3322 	int i, ret;
3323 
3324 	if (intr_ctxt_id < hif_state->hif_num_extgroup) {
3325 		hif_ext_group = hif_state->hif_ext_group[intr_ctxt_id];
3326 
3327 		for (i = 0; i < hif_ext_group->numirq; i++) {
3328 			qdf_cpumask_setall(&hif_ext_group->new_cpu_mask[i]);
3329 			qdf_cpumask_clear_cpu(cpu,
3330 					      &hif_ext_group->new_cpu_mask[i]);
3331 			ret = hif_affinity_mgr_set_qrg_irq_affinity((struct hif_softc *)hif_ext_group->hif,
3332 								    hif_ext_group->os_irq[i],
3333 								    hif_ext_group->grp_id, i,
3334 								    &hif_ext_group->new_cpu_mask[i]);
3335 			if (ret)
3336 				hif_err("Set affinity %*pbl fails for IRQ %d ",
3337 					qdf_cpumask_pr_args(&hif_ext_group->
3338 							    new_cpu_mask[i]),
3339 					hif_ext_group->os_irq[i]);
3340 			else
3341 				hif_debug("Set affinity %*pbl for IRQ: %d",
3342 					  qdf_cpumask_pr_args(&hif_ext_group->
3343 							      new_cpu_mask[i]),
3344 					  hif_ext_group->os_irq[i]);
3345 		}
3346 	}
3347 }
3348 #endif
3349 
hif_pci_config_irq_affinity(struct hif_softc * scn)3350 void hif_pci_config_irq_affinity(struct hif_softc *scn)
3351 {
3352 	int i;
3353 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3354 	struct hif_exec_context *hif_ext_group;
3355 
3356 	hif_core_ctl_set_boost(true);
3357 	/* Set IRQ affinity for WLAN DP interrupts*/
3358 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
3359 		hif_ext_group = hif_state->hif_ext_group[i];
3360 		hif_pci_irq_set_affinity_hint(hif_ext_group, true);
3361 	}
3362 	/* Set IRQ affinity for CE interrupts*/
3363 	hif_pci_ce_irq_set_affinity_hint(scn);
3364 }
3365 
3366 #ifdef QCA_SUPPORT_LEGACY_INTERRUPTS
3367 /**
3368  * hif_grp_configure_legacyirq() - Configure DP interrupts
3369  * @scn: hif_softc pointer
3370  * @hif_ext_group: hif extended group pointer
3371  *
3372  * Configure DP legacy interrupts
3373  *
3374  * Return: int
3375  */
hif_grp_configure_legacyirq(struct hif_softc * scn,struct hif_exec_context * hif_ext_group)3376 static int hif_grp_configure_legacyirq(struct hif_softc *scn,
3377 				       struct hif_exec_context *hif_ext_group)
3378 {
3379 	int ret = 0;
3380 	int irq = 0;
3381 	int j;
3382 	int pci_slot;
3383 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3384 	struct pci_dev *pdev = sc->pdev;
3385 	qdf_device_t qdf_dev = scn->qdf_dev;
3386 
3387 	for (j = 0; j < hif_ext_group->numirq; j++) {
3388 		ret = pfrm_get_irq(&pdev->dev,
3389 				   (struct qdf_pfm_hndl *)qdf_dev->cnss_pdev,
3390 				   legacy_ic_irqname[hif_ext_group->irq[j]],
3391 				   hif_ext_group->irq[j], &irq);
3392 		if (ret) {
3393 			dev_err(&pdev->dev, "get irq failed\n");
3394 			return -EFAULT;
3395 		}
3396 		hif_ext_group->os_irq[j] = irq;
3397 	}
3398 
3399 	hif_ext_group->irq_enable = &hif_exec_grp_irq_enable;
3400 	hif_ext_group->irq_disable = &hif_exec_grp_irq_disable;
3401 	hif_ext_group->irq_name = &hif_pci_get_irq_name;
3402 	hif_ext_group->work_complete = &hif_dummy_grp_done;
3403 
3404 	pci_slot = hif_get_pci_slot(scn);
3405 	for (j = 0; j < hif_ext_group->numirq; j++) {
3406 		irq = hif_ext_group->os_irq[j];
3407 		if (scn->irq_unlazy_disable)
3408 			qdf_dev_set_irq_status_flags(irq,
3409 						     QDF_IRQ_DISABLE_UNLAZY);
3410 
3411 		hif_debug("request_irq = %d for grp %d",
3412 			  irq, hif_ext_group->grp_id);
3413 
3414 		qdf_scnprintf(dp_legacy_irqname[pci_slot][hif_ext_group->irq[j]],
3415 			      DP_IRQ_NAME_LEN, "pci%u_%s", pci_slot,
3416 			      legacy_ic_irqname[hif_ext_group->irq[j]]);
3417 
3418 		ret = pfrm_request_irq(scn->qdf_dev->dev, irq,
3419 				       hif_ext_group_interrupt_handler,
3420 				       IRQF_SHARED | IRQF_NO_SUSPEND,
3421 				       dp_legacy_irqname[pci_slot][hif_ext_group->irq[j]],
3422 				       hif_ext_group);
3423 		if (ret) {
3424 			hif_err("request_irq failed ret = %d", ret);
3425 			return -EFAULT;
3426 		}
3427 		hif_ext_group->os_irq[j] = irq;
3428 	}
3429 	hif_ext_group->irq_requested = true;
3430 	return 0;
3431 }
3432 #else
3433 /**
3434  * hif_grp_configure_legacyirq() - Configure DP interrupts
3435  * @scn: hif_softc pointer
3436  * @hif_ext_group: hif extended group pointer
3437  *
3438  * Configure DP legacy interrupts
3439  *
3440  * Return: int
3441  */
hif_grp_configure_legacyirq(struct hif_softc * scn,struct hif_exec_context * hif_ext_group)3442 static int hif_grp_configure_legacyirq(struct hif_softc *scn,
3443 				       struct hif_exec_context *hif_ext_group)
3444 {
3445 	return 0;
3446 }
3447 #endif
3448 
hif_pci_configure_grp_irq(struct hif_softc * scn,struct hif_exec_context * hif_ext_group)3449 int hif_pci_configure_grp_irq(struct hif_softc *scn,
3450 			      struct hif_exec_context *hif_ext_group)
3451 {
3452 	int ret = 0;
3453 	int irq = 0;
3454 	int j;
3455 	int pci_slot;
3456 	unsigned long irq_flags;
3457 
3458 	if (pld_get_enable_intx(scn->qdf_dev->dev))
3459 		return hif_grp_configure_legacyirq(scn, hif_ext_group);
3460 
3461 	hif_ext_group->irq_enable = &hif_exec_grp_irq_enable;
3462 	hif_ext_group->irq_disable = &hif_exec_grp_irq_disable;
3463 	hif_ext_group->irq_name = &hif_pci_get_irq_name;
3464 	hif_ext_group->work_complete = &hif_dummy_grp_done;
3465 
3466 	pci_slot = hif_get_pci_slot(scn);
3467 	for (j = 0; j < hif_ext_group->numirq; j++) {
3468 		irq = hif_ext_group->irq[j];
3469 		if (scn->irq_unlazy_disable)
3470 			qdf_dev_set_irq_status_flags(irq,
3471 						     QDF_IRQ_DISABLE_UNLAZY);
3472 
3473 		if (pld_is_one_msi(scn->qdf_dev->dev))
3474 			irq_flags = IRQF_SHARED | IRQF_NOBALANCING;
3475 		else
3476 			irq_flags = IRQF_SHARED | IRQF_NO_SUSPEND;
3477 		hif_debug("request_irq = %d for grp %d irq_flags 0x%lx",
3478 			  irq, hif_ext_group->grp_id, irq_flags);
3479 
3480 		qdf_scnprintf(dp_irqname[pci_slot][hif_ext_group->grp_id],
3481 			      DP_IRQ_NAME_LEN, "pci%u_wlan_grp_dp_%u",
3482 			      pci_slot, hif_ext_group->grp_id);
3483 		ret = pfrm_request_irq(
3484 				scn->qdf_dev->dev, irq,
3485 				hif_ext_group_interrupt_handler,
3486 				irq_flags,
3487 				dp_irqname[pci_slot][hif_ext_group->grp_id],
3488 				hif_ext_group);
3489 		if (ret) {
3490 			hif_err("request_irq failed ret = %d", ret);
3491 			return -EFAULT;
3492 		}
3493 		hif_ext_group->os_irq[j] = irq;
3494 		hif_affinity_mgr_init_grp_irq(scn, hif_ext_group->grp_id,
3495 					      j, irq);
3496 	}
3497 	hif_ext_group->irq_requested = true;
3498 	return 0;
3499 }
3500 
3501 #ifdef FEATURE_IRQ_AFFINITY
hif_pci_set_grp_intr_affinity(struct hif_softc * scn,uint32_t grp_intr_bitmask,bool perf)3502 void hif_pci_set_grp_intr_affinity(struct hif_softc *scn,
3503 				   uint32_t grp_intr_bitmask, bool perf)
3504 {
3505 	int i;
3506 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
3507 	struct hif_exec_context *hif_ext_group;
3508 
3509 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
3510 		if (!(grp_intr_bitmask & BIT(i)))
3511 			continue;
3512 
3513 		hif_ext_group = hif_state->hif_ext_group[i];
3514 		hif_pci_irq_set_affinity_hint(hif_ext_group, perf);
3515 		qdf_atomic_set(&hif_ext_group->force_napi_complete, -1);
3516 	}
3517 }
3518 #endif
3519 
3520 #if (defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490) || \
3521 	defined(QCA_WIFI_KIWI))
hif_pci_reg_read32(struct hif_softc * hif_sc,uint32_t offset)3522 uint32_t hif_pci_reg_read32(struct hif_softc *hif_sc,
3523 			    uint32_t offset)
3524 {
3525 	return hal_read32_mb(hif_sc->hal_soc, offset);
3526 }
3527 
hif_pci_reg_write32(struct hif_softc * hif_sc,uint32_t offset,uint32_t value)3528 void hif_pci_reg_write32(struct hif_softc *hif_sc,
3529 			 uint32_t offset,
3530 			 uint32_t value)
3531 {
3532 	hal_write32_mb(hif_sc->hal_soc, offset, value);
3533 }
3534 #else
3535 /* TODO: Need to implement other chips carefully */
hif_pci_reg_read32(struct hif_softc * hif_sc,uint32_t offset)3536 uint32_t hif_pci_reg_read32(struct hif_softc *hif_sc,
3537 			    uint32_t offset)
3538 {
3539 	return 0;
3540 }
3541 
hif_pci_reg_write32(struct hif_softc * hif_sc,uint32_t offset,uint32_t value)3542 void hif_pci_reg_write32(struct hif_softc *hif_sc,
3543 			 uint32_t offset,
3544 			 uint32_t value)
3545 {
3546 }
3547 #endif
3548 
3549 /**
3550  * hif_configure_irq() - configure interrupt
3551  * @scn: HIF context
3552  *
3553  * This function configures interrupt(s)
3554  *
3555  * Return: 0 - for success
3556  */
hif_configure_irq(struct hif_softc * scn)3557 int hif_configure_irq(struct hif_softc *scn)
3558 {
3559 	int ret = 0;
3560 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3561 
3562 	hif_info("E");
3563 
3564 	if (hif_is_polled_mode_enabled(GET_HIF_OPAQUE_HDL(scn))) {
3565 		scn->request_irq_done = false;
3566 		return 0;
3567 	}
3568 
3569 	hif_init_reschedule_tasklet_work(sc);
3570 
3571 	ret = hif_ce_msi_configure_irq(scn);
3572 	if (ret == 0) {
3573 		goto end;
3574 	}
3575 
3576 	switch (scn->target_info.target_type) {
3577 	case TARGET_TYPE_QCA8074:
3578 	case TARGET_TYPE_QCA8074V2:
3579 	case TARGET_TYPE_QCA6018:
3580 	case TARGET_TYPE_QCA5018:
3581 	case TARGET_TYPE_QCA5332:
3582 	case TARGET_TYPE_QCA9574:
3583 	case TARGET_TYPE_QCN9160:
3584 		ret = hif_ahb_configure_irq(sc);
3585 		break;
3586 	case TARGET_TYPE_QCN9224:
3587 		ret = hif_ce_configure_legacyirq(scn);
3588 		break;
3589 	default:
3590 		ret = hif_pci_configure_legacy_irq(sc);
3591 		break;
3592 	}
3593 	if (ret < 0) {
3594 		hif_err("error = %d", ret);
3595 		return ret;
3596 	}
3597 end:
3598 	scn->request_irq_done = true;
3599 	return 0;
3600 }
3601 
3602 /**
3603  * hif_trigger_timer_irq() : Triggers interrupt on LF_Timer 0
3604  * @scn: hif control structure
3605  *
3606  * Sets IRQ bit in LF Timer Status Address to awake peregrine/swift
3607  * stuck at a polling loop in pcie_address_config in FW
3608  *
3609  * Return: none
3610  */
hif_trigger_timer_irq(struct hif_softc * scn)3611 static void hif_trigger_timer_irq(struct hif_softc *scn)
3612 {
3613 	int tmp;
3614 	/* Trigger IRQ on Peregrine/Swift by setting
3615 	 * IRQ Bit of LF_TIMER 0
3616 	 */
3617 	tmp = hif_read32_mb(scn, scn->mem + (RTC_SOC_BASE_ADDRESS +
3618 						SOC_LF_TIMER_STATUS0_ADDRESS));
3619 	/* Set Raw IRQ Bit */
3620 	tmp |= 1;
3621 	/* SOC_LF_TIMER_STATUS0 */
3622 	hif_write32_mb(scn, scn->mem + (RTC_SOC_BASE_ADDRESS +
3623 		       SOC_LF_TIMER_STATUS0_ADDRESS), tmp);
3624 }
3625 
3626 /**
3627  * hif_target_sync() : ensure the target is ready
3628  * @scn: hif control structure
3629  *
3630  * Informs fw that we plan to use legacy interrupts so that
3631  * it can begin booting. Ensures that the fw finishes booting
3632  * before continuing. Should be called before trying to write
3633  * to the targets other registers for the first time.
3634  *
3635  * Return: none
3636  */
hif_target_sync(struct hif_softc * scn)3637 static void hif_target_sync(struct hif_softc *scn)
3638 {
3639 	hif_write32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS |
3640 			    PCIE_INTR_ENABLE_ADDRESS),
3641 			    PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
3642 	/* read to flush pcie write */
3643 	(void)hif_read32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS |
3644 			PCIE_INTR_ENABLE_ADDRESS));
3645 
3646 	hif_write32_mb(scn, scn->mem + PCIE_LOCAL_BASE_ADDRESS +
3647 			PCIE_SOC_WAKE_ADDRESS,
3648 			PCIE_SOC_WAKE_V_MASK);
3649 	while (!hif_targ_is_awake(scn, scn->mem))
3650 		;
3651 
3652 	if (HAS_FW_INDICATOR) {
3653 		int wait_limit = 500;
3654 		int fw_ind = 0;
3655 		int retry_count = 0;
3656 		uint32_t target_type = scn->target_info.target_type;
3657 fw_retry:
3658 		hif_info("Loop checking FW signal");
3659 		while (1) {
3660 			fw_ind = hif_read32_mb(scn, scn->mem +
3661 					FW_INDICATOR_ADDRESS);
3662 			if (fw_ind & FW_IND_INITIALIZED)
3663 				break;
3664 			if (wait_limit-- < 0)
3665 				break;
3666 			hif_write32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS |
3667 			    PCIE_INTR_ENABLE_ADDRESS),
3668 			    PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
3669 			    /* read to flush pcie write */
3670 			(void)hif_read32_mb(scn, scn->mem +
3671 			    (SOC_CORE_BASE_ADDRESS | PCIE_INTR_ENABLE_ADDRESS));
3672 
3673 			qdf_mdelay(10);
3674 		}
3675 		if (wait_limit < 0) {
3676 			if (target_type == TARGET_TYPE_AR9888 &&
3677 			    retry_count++ < 2) {
3678 				hif_trigger_timer_irq(scn);
3679 				wait_limit = 500;
3680 				goto fw_retry;
3681 			}
3682 			hif_info("FW signal timed out");
3683 			qdf_assert_always(0);
3684 		} else {
3685 			hif_info("Got FW signal, retries = %x", 500-wait_limit);
3686 		}
3687 	}
3688 	hif_write32_mb(scn, scn->mem + PCIE_LOCAL_BASE_ADDRESS +
3689 			PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
3690 }
3691 
hif_pci_get_soc_info_pld(struct hif_pci_softc * sc,struct device * dev)3692 static void hif_pci_get_soc_info_pld(struct hif_pci_softc *sc,
3693 				     struct device *dev)
3694 {
3695 	struct pld_soc_info info;
3696 	struct hif_softc *scn = HIF_GET_SOFTC(sc);
3697 
3698 	pld_get_soc_info(dev, &info);
3699 	sc->mem = info.v_addr;
3700 	sc->ce_sc.ol_sc.mem    = info.v_addr;
3701 	sc->ce_sc.ol_sc.mem_pa = info.p_addr;
3702 	sc->device_version.family_number = info.device_version.family_number;
3703 	sc->device_version.device_number = info.device_version.device_number;
3704 	sc->device_version.major_version = info.device_version.major_version;
3705 	sc->device_version.minor_version = info.device_version.minor_version;
3706 
3707 	hif_info("%s: fam num %u dev ver %u maj ver %u min ver %u", __func__,
3708 		 sc->device_version.family_number,
3709 		 sc->device_version.device_number,
3710 		 sc->device_version.major_version,
3711 		 sc->device_version.minor_version);
3712 
3713 	/* dev_mem_info[0] is for CMEM */
3714 	scn->cmem_start = info.dev_mem_info[0].start;
3715 	scn->cmem_size = info.dev_mem_info[0].size;
3716 	scn->target_info.target_version = info.soc_id;
3717 	scn->target_info.target_revision = 0;
3718 	scn->target_info.soc_version = info.device_version.major_version;
3719 }
3720 
hif_pci_get_soc_info_nopld(struct hif_pci_softc * sc,struct device * dev)3721 static void hif_pci_get_soc_info_nopld(struct hif_pci_softc *sc,
3722 				       struct device *dev)
3723 {}
3724 
hif_is_pld_based_target(struct hif_pci_softc * sc,int device_id)3725 static bool hif_is_pld_based_target(struct hif_pci_softc *sc,
3726 				    int device_id)
3727 {
3728 	if (!pld_have_platform_driver_support(sc->dev))
3729 		return false;
3730 
3731 	switch (device_id) {
3732 	case QCA6290_DEVICE_ID:
3733 	case QCN9000_DEVICE_ID:
3734 	case QCN9224_DEVICE_ID:
3735 	case QCA6290_EMULATION_DEVICE_ID:
3736 	case QCA6390_DEVICE_ID:
3737 	case QCA6490_DEVICE_ID:
3738 	case AR6320_DEVICE_ID:
3739 	case QCN7605_DEVICE_ID:
3740 	case KIWI_DEVICE_ID:
3741 	case MANGO_DEVICE_ID:
3742 	case PEACH_DEVICE_ID:
3743 		return true;
3744 	}
3745 	return false;
3746 }
3747 
hif_pci_init_deinit_ops_attach(struct hif_pci_softc * sc,int device_id)3748 static void hif_pci_init_deinit_ops_attach(struct hif_pci_softc *sc,
3749 					   int device_id)
3750 {
3751 	if (hif_is_pld_based_target(sc, device_id)) {
3752 		sc->hif_enable_pci = hif_enable_pci_pld;
3753 		sc->hif_pci_deinit = hif_pci_deinit_pld;
3754 		sc->hif_pci_get_soc_info = hif_pci_get_soc_info_pld;
3755 	} else {
3756 		sc->hif_enable_pci = hif_enable_pci_nopld;
3757 		sc->hif_pci_deinit = hif_pci_deinit_nopld;
3758 		sc->hif_pci_get_soc_info = hif_pci_get_soc_info_nopld;
3759 	}
3760 }
3761 
3762 #ifdef HIF_REG_WINDOW_SUPPORT
hif_pci_init_reg_windowing_support(struct hif_pci_softc * sc,u32 target_type)3763 static void hif_pci_init_reg_windowing_support(struct hif_pci_softc *sc,
3764 					       u32 target_type)
3765 {
3766 	switch (target_type) {
3767 	case TARGET_TYPE_QCN7605:
3768 	case TARGET_TYPE_QCA6490:
3769 	case TARGET_TYPE_QCA6390:
3770 	case TARGET_TYPE_KIWI:
3771 	case TARGET_TYPE_MANGO:
3772 	case TARGET_TYPE_PEACH:
3773 		sc->use_register_windowing = true;
3774 		qdf_spinlock_create(&sc->register_access_lock);
3775 		sc->register_window = 0;
3776 		break;
3777 	default:
3778 		sc->use_register_windowing = false;
3779 	}
3780 }
3781 #else
hif_pci_init_reg_windowing_support(struct hif_pci_softc * sc,u32 target_type)3782 static void hif_pci_init_reg_windowing_support(struct hif_pci_softc *sc,
3783 					       u32 target_type)
3784 {
3785 	sc->use_register_windowing = false;
3786 }
3787 #endif
3788 
3789 /**
3790  * hif_pci_enable_bus(): enable bus
3791  * @ol_sc: soft_sc struct
3792  * @dev: device pointer
3793  * @bdev: bus dev pointer
3794  * @bid: bus id pointer
3795  * @type: enum hif_enable_type such as HIF_ENABLE_TYPE_PROBE
3796  *
3797  * This function enables the bus
3798  *
3799  * Return: QDF_STATUS
3800  */
hif_pci_enable_bus(struct hif_softc * ol_sc,struct device * dev,void * bdev,const struct hif_bus_id * bid,enum hif_enable_type type)3801 QDF_STATUS hif_pci_enable_bus(struct hif_softc *ol_sc,
3802 			  struct device *dev, void *bdev,
3803 			  const struct hif_bus_id *bid,
3804 			  enum hif_enable_type type)
3805 {
3806 	int ret = 0;
3807 	uint32_t hif_type;
3808 	uint32_t target_type = TARGET_TYPE_UNKNOWN;
3809 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(ol_sc);
3810 	struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(ol_sc);
3811 	uint16_t revision_id = 0;
3812 	int probe_again = 0;
3813 	struct pci_dev *pdev = bdev;
3814 	const struct pci_device_id *id = (const struct pci_device_id *)bid;
3815 	struct hif_target_info *tgt_info;
3816 
3817 	if (!ol_sc) {
3818 		hif_err("hif_ctx is NULL");
3819 		return QDF_STATUS_E_NOMEM;
3820 	}
3821 	/* Following print is used by various tools to identify
3822 	 * WLAN SOC (e.g. crash dump analysis and reporting tool).
3823 	 */
3824 	hif_info("con_mode = 0x%x, WLAN_SOC_device_id = 0x%x",
3825 		 hif_get_conparam(ol_sc), id->device);
3826 
3827 	sc->pdev = pdev;
3828 	sc->dev = &pdev->dev;
3829 	sc->devid = id->device;
3830 	sc->cacheline_sz = dma_get_cache_alignment();
3831 	tgt_info = hif_get_target_info_handle(hif_hdl);
3832 	hif_pci_init_deinit_ops_attach(sc, id->device);
3833 	sc->hif_pci_get_soc_info(sc, dev);
3834 again:
3835 	ret = sc->hif_enable_pci(sc, pdev, id);
3836 	if (ret < 0) {
3837 		hif_err("hif_enable_pci error = %d", ret);
3838 		goto err_enable_pci;
3839 	}
3840 	hif_info("hif_enable_pci done");
3841 
3842 	/* Temporary FIX: disable ASPM on peregrine.
3843 	 * Will be removed after the OTP is programmed
3844 	 */
3845 	hif_disable_power_gating(hif_hdl);
3846 
3847 	device_disable_async_suspend(&pdev->dev);
3848 	pfrm_read_config_word(pdev, 0x08, &revision_id);
3849 
3850 	ret = hif_get_device_type(id->device, revision_id,
3851 						&hif_type, &target_type);
3852 	if (ret < 0) {
3853 		hif_err("Invalid device id/revision_id");
3854 		goto err_tgtstate;
3855 	}
3856 	hif_info("hif_type = 0x%x, target_type = 0x%x",
3857 		hif_type, target_type);
3858 
3859 	hif_register_tbl_attach(ol_sc, hif_type);
3860 	hif_target_register_tbl_attach(ol_sc, target_type);
3861 
3862 	hif_pci_init_reg_windowing_support(sc, target_type);
3863 
3864 	tgt_info->target_type = target_type;
3865 
3866 	/*
3867 	 * Disable unlzay interrupt registration for QCN9000
3868 	 */
3869 	if (target_type == TARGET_TYPE_QCN9000 ||
3870 	    target_type == TARGET_TYPE_QCN9224)
3871 		ol_sc->irq_unlazy_disable = 1;
3872 
3873 	if (ce_srng_based(ol_sc)) {
3874 		hif_info("Skip tgt_wake up for srng devices");
3875 	} else {
3876 		ret = hif_pci_probe_tgt_wakeup(sc);
3877 		if (ret < 0) {
3878 			hif_err("hif_pci_prob_wakeup error = %d", ret);
3879 			if (ret == -EAGAIN)
3880 				probe_again++;
3881 			goto err_tgtstate;
3882 		}
3883 		hif_info("hif_pci_probe_tgt_wakeup done");
3884 	}
3885 
3886 	if (!ol_sc->mem_pa) {
3887 		hif_err("BAR0 uninitialized");
3888 		ret = -EIO;
3889 		goto err_tgtstate;
3890 	}
3891 
3892 	if (!ce_srng_based(ol_sc)) {
3893 		hif_target_sync(ol_sc);
3894 
3895 		if (hif_pci_default_link_up(tgt_info))
3896 			hif_vote_link_up(hif_hdl);
3897 	}
3898 
3899 	return QDF_STATUS_SUCCESS;
3900 
3901 err_tgtstate:
3902 	hif_disable_pci(sc);
3903 	sc->pci_enabled = false;
3904 	hif_err("hif_disable_pci done");
3905 	return QDF_STATUS_E_ABORTED;
3906 
3907 err_enable_pci:
3908 	if (probe_again && (probe_again <= ATH_PCI_PROBE_RETRY_MAX)) {
3909 		int delay_time;
3910 
3911 		hif_info("pci reprobe");
3912 		/* 10, 40, 90, 100, 100, ... */
3913 		delay_time = max(100, 10 * (probe_again * probe_again));
3914 		qdf_mdelay(delay_time);
3915 		goto again;
3916 	}
3917 	return qdf_status_from_os_return(ret);
3918 }
3919 
3920 /**
3921  * hif_pci_irq_enable() - ce_irq_enable
3922  * @scn: hif_softc
3923  * @ce_id: ce_id
3924  *
3925  * Return: void
3926  */
hif_pci_irq_enable(struct hif_softc * scn,int ce_id)3927 void hif_pci_irq_enable(struct hif_softc *scn, int ce_id)
3928 {
3929 	uint32_t tmp = 1 << ce_id;
3930 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3931 
3932 	qdf_spin_lock_irqsave(&sc->irq_lock);
3933 	scn->ce_irq_summary &= ~tmp;
3934 	if (scn->ce_irq_summary == 0) {
3935 		/* Enable Legacy PCI line interrupts */
3936 		if (LEGACY_INTERRUPTS(sc) &&
3937 			(scn->target_status != TARGET_STATUS_RESET) &&
3938 			(!qdf_atomic_read(&scn->link_suspended))) {
3939 
3940 			hif_write32_mb(scn, scn->mem +
3941 				(SOC_CORE_BASE_ADDRESS |
3942 				PCIE_INTR_ENABLE_ADDRESS),
3943 				HOST_GROUP0_MASK);
3944 
3945 			hif_read32_mb(scn, scn->mem +
3946 					(SOC_CORE_BASE_ADDRESS |
3947 					PCIE_INTR_ENABLE_ADDRESS));
3948 		}
3949 	}
3950 	if (scn->hif_init_done == true)
3951 		Q_TARGET_ACCESS_END(scn);
3952 	qdf_spin_unlock_irqrestore(&sc->irq_lock);
3953 
3954 	/* check for missed firmware crash */
3955 	hif_fw_interrupt_handler(0, scn);
3956 }
3957 
3958 /**
3959  * hif_pci_irq_disable() - ce_irq_disable
3960  * @scn: hif_softc
3961  * @ce_id: ce_id
3962  *
3963  * only applicable to legacy copy engine...
3964  *
3965  * Return: void
3966  */
hif_pci_irq_disable(struct hif_softc * scn,int ce_id)3967 void hif_pci_irq_disable(struct hif_softc *scn, int ce_id)
3968 {
3969 	/* For Rome only need to wake up target */
3970 	/* target access is maintained until interrupts are re-enabled */
3971 	Q_TARGET_ACCESS_BEGIN(scn);
3972 }
3973 
hif_pci_legacy_map_ce_to_irq(struct hif_softc * scn,int ce_id)3974 int hif_pci_legacy_map_ce_to_irq(struct hif_softc *scn, int ce_id)
3975 {
3976 	struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
3977 
3978 	/* legacy case only has one irq */
3979 	return pci_scn->irq;
3980 }
3981 
hif_pci_addr_in_boundary(struct hif_softc * scn,uint32_t offset)3982 int hif_pci_addr_in_boundary(struct hif_softc *scn, uint32_t offset)
3983 {
3984 	struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
3985 	struct hif_target_info *tgt_info;
3986 
3987 	tgt_info = hif_get_target_info_handle(GET_HIF_OPAQUE_HDL(scn));
3988 
3989 	if (tgt_info->target_type == TARGET_TYPE_QCA6290 ||
3990 	    tgt_info->target_type == TARGET_TYPE_QCA6390 ||
3991 	    tgt_info->target_type == TARGET_TYPE_QCA6490 ||
3992 	    tgt_info->target_type == TARGET_TYPE_QCN7605 ||
3993 	    tgt_info->target_type == TARGET_TYPE_QCA8074 ||
3994 	    tgt_info->target_type == TARGET_TYPE_KIWI ||
3995 	    tgt_info->target_type == TARGET_TYPE_MANGO ||
3996 	    tgt_info->target_type == TARGET_TYPE_PEACH) {
3997 		/*
3998 		 * Need to consider offset's memtype for QCA6290/QCA8074,
3999 		 * also mem_len and DRAM_BASE_ADDRESS/DRAM_SIZE need to be
4000 		 * well initialized/defined.
4001 		 */
4002 		return 0;
4003 	}
4004 
4005 	if ((offset >= DRAM_BASE_ADDRESS && offset <= DRAM_BASE_ADDRESS + DRAM_SIZE)
4006 		 || (offset + sizeof(unsigned int) <= sc->mem_len)) {
4007 		return 0;
4008 	}
4009 
4010 	hif_info("Refusing to read memory at 0x%x - 0x%x (max 0x%zx)",
4011 		offset, (uint32_t)(offset + sizeof(unsigned int)),
4012 		sc->mem_len);
4013 
4014 	return -EINVAL;
4015 }
4016 
4017 /**
4018  * hif_pci_needs_bmi() - return true if the soc needs bmi through the driver
4019  * @scn: hif context
4020  *
4021  * Return: true if soc needs driver bmi otherwise false
4022  */
hif_pci_needs_bmi(struct hif_softc * scn)4023 bool hif_pci_needs_bmi(struct hif_softc *scn)
4024 {
4025 	return !ce_srng_based(scn);
4026 }
4027 
4028 #ifdef FORCE_WAKE
4029 #if defined(DEVICE_FORCE_WAKE_ENABLE) && !defined(CONFIG_PLD_PCIE_FW_SIM)
4030 
4031 /*
4032  * HIF_POLL_UMAC_WAKE poll value to indicate if UMAC is powered up
4033  * Update the below macro with FW defined one.
4034  */
4035 #define HIF_POLL_UMAC_WAKE 0x2
4036 
hif_soc_wake_request(struct hif_opaque_softc * hif_handle)4037 static inline int hif_soc_wake_request(struct hif_opaque_softc *hif_handle)
4038 {
4039 	uint32_t timeout, value;
4040 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
4041 	struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
4042 
4043 	qdf_spin_lock_bh(&pci_scn->force_wake_lock);
4044 	if ((qdf_atomic_inc_return(&scn->active_wake_req_cnt) > 1)) {
4045 		qdf_spin_unlock_bh(&pci_scn->force_wake_lock);
4046 		return 0;
4047 	}
4048 
4049 	hif_write32_mb(scn, scn->mem + PCIE_REG_WAKE_UMAC_OFFSET, 1);
4050 	HIF_STATS_INC(pci_scn, soc_force_wake_register_write_success, 1);
4051 	/*
4052 	 * do not reset the timeout
4053 	 * total_wake_time = MHI_WAKE_TIME + PCI_WAKE_TIME < 50 ms
4054 	 */
4055 	timeout = 0;
4056 	do {
4057 		value = hif_read32_mb(
4058 				scn, scn->mem +
4059 				PCIE_SOC_PCIE_REG_PCIE_SCRATCH_0_SOC_PCIE_REG);
4060 		if (value == HIF_POLL_UMAC_WAKE)
4061 			break;
4062 		qdf_mdelay(FORCE_WAKE_DELAY_MS);
4063 		timeout += FORCE_WAKE_DELAY_MS;
4064 	} while (timeout <= FORCE_WAKE_DELAY_TIMEOUT_MS);
4065 
4066 	if (value != HIF_POLL_UMAC_WAKE) {
4067 		hif_err("force wake handshake failed, reg value = 0x%x",
4068 			value);
4069 		HIF_STATS_INC(pci_scn, soc_force_wake_failure, 1);
4070 		qdf_atomic_dec(&scn->active_wake_req_cnt);
4071 		qdf_spin_unlock_bh(&pci_scn->force_wake_lock);
4072 		return -ETIMEDOUT;
4073 	}
4074 
4075 	HIF_STATS_INC(pci_scn, soc_force_wake_success, 1);
4076 	qdf_spin_unlock_bh(&pci_scn->force_wake_lock);
4077 	return 0;
4078 }
4079 
hif_soc_wake_release(struct hif_opaque_softc * hif_handle)4080 static inline void hif_soc_wake_release(struct hif_opaque_softc *hif_handle)
4081 {
4082 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
4083 	struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
4084 
4085 	qdf_spin_lock_bh(&pci_scn->force_wake_lock);
4086 	if (!qdf_atomic_dec_and_test(&scn->active_wake_req_cnt)) {
4087 		qdf_spin_unlock_bh(&pci_scn->force_wake_lock);
4088 		return;
4089 	}
4090 
4091 	/* Release umac force wake */
4092 	hif_write32_mb(scn, scn->mem + PCIE_REG_WAKE_UMAC_OFFSET, 0);
4093 	qdf_spin_unlock_bh(&pci_scn->force_wake_lock);
4094 }
4095 
4096 /**
4097  * hif_force_wake_request(): Enable the force wake recipe
4098  * @hif_handle: HIF handle
4099  *
4100  * Bring MHI to M0 state and force wake the UMAC by asserting the
4101  * soc wake reg. Poll the scratch reg to check if its set to
4102  * HIF_POLL_UMAC_WAKE. The polled value may return 0x1 in case UMAC
4103  * is powered down.
4104  *
4105  * Return: 0 if handshake is successful or ETIMEDOUT in case of failure
4106  */
hif_force_wake_request(struct hif_opaque_softc * hif_handle)4107 int hif_force_wake_request(struct hif_opaque_softc *hif_handle)
4108 {
4109 	uint32_t timeout;
4110 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
4111 	struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
4112 	int ret, status = 0;
4113 
4114 	/* Prevent runtime PM or trigger resume firstly */
4115 	if (hif_rtpm_get(HIF_RTPM_GET_SYNC, HIF_RTPM_ID_FORCE_WAKE)) {
4116 		hif_err("runtime pm get failed");
4117 		return -EINVAL;
4118 	}
4119 
4120 	HIF_STATS_INC(pci_scn, mhi_force_wake_request_vote, 1);
4121 	if (qdf_in_interrupt())
4122 		timeout = FORCE_WAKE_DELAY_TIMEOUT_MS * 1000;
4123 	else
4124 		timeout = 0;
4125 
4126 	ret = pld_force_wake_request_sync(scn->qdf_dev->dev, timeout);
4127 	if (ret) {
4128 		hif_err("force wake request(timeout %u) send failed: %d",
4129 			timeout, ret);
4130 		HIF_STATS_INC(pci_scn, mhi_force_wake_failure, 1);
4131 		status = -EINVAL;
4132 		goto release_rtpm_ref;
4133 	}
4134 
4135 	/* If device's M1 state-change event races here, it can be ignored,
4136 	 * as the device is expected to immediately move from M2 to M0
4137 	 * without entering low power state.
4138 	 */
4139 	if (!pld_is_device_awake(scn->qdf_dev->dev))
4140 		hif_info("state-change event races, ignore");
4141 
4142 	HIF_STATS_INC(pci_scn, mhi_force_wake_success, 1);
4143 
4144 	ret = hif_soc_wake_request(hif_handle);
4145 	if (ret) {
4146 		hif_err("soc force wake failed: %d", ret);
4147 		status = ret;
4148 		goto release_mhi_wake;
4149 	}
4150 	return 0;
4151 
4152 release_mhi_wake:
4153 	/* Release MHI force wake */
4154 	ret = pld_force_wake_release(scn->qdf_dev->dev);
4155 	if (ret) {
4156 		hif_err("pld force wake release failure");
4157 		HIF_STATS_INC(pci_scn, mhi_force_wake_release_failure, 1);
4158 		status = ret;
4159 	} else {
4160 		HIF_STATS_INC(pci_scn, mhi_force_wake_release_success, 1);
4161 	}
4162 
4163 release_rtpm_ref:
4164 	/* Release runtime PM force wake */
4165 	ret = hif_rtpm_put(HIF_RTPM_PUT_ASYNC, HIF_RTPM_ID_FORCE_WAKE);
4166 	if (ret) {
4167 		hif_err("runtime pm put failure: %d", ret);
4168 		return ret;
4169 	}
4170 
4171 	return status;
4172 }
4173 
hif_force_wake_release(struct hif_opaque_softc * hif_handle)4174 int hif_force_wake_release(struct hif_opaque_softc *hif_handle)
4175 {
4176 	int ret, status;
4177 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
4178 	struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
4179 
4180 	hif_soc_wake_release(hif_handle);
4181 
4182 	/* Release MHI force wake */
4183 	ret = pld_force_wake_release(scn->qdf_dev->dev);
4184 	if (ret) {
4185 		hif_err("pld force wake release failure");
4186 		HIF_STATS_INC(pci_scn, mhi_force_wake_release_failure, 1);
4187 		goto release_rtpm_ref;
4188 	}
4189 	HIF_STATS_INC(pci_scn, mhi_force_wake_release_success, 1);
4190 	HIF_STATS_INC(pci_scn, soc_force_wake_release_success, 1);
4191 
4192 release_rtpm_ref:
4193 	/* Release runtime PM force wake */
4194 	status = hif_rtpm_put(HIF_RTPM_PUT_ASYNC, HIF_RTPM_ID_FORCE_WAKE);
4195 	if (status) {
4196 		hif_err("runtime pm put failure: %d", status);
4197 		return status;
4198 	}
4199 	return ret;
4200 }
4201 
4202 #else /* DEVICE_FORCE_WAKE_ENABLE */
4203 /** hif_force_wake_request() - Disable the PCIE scratch register
4204  * write/read
4205  *
4206  * Return: 0
4207  */
hif_force_wake_request(struct hif_opaque_softc * hif_handle)4208 int hif_force_wake_request(struct hif_opaque_softc *hif_handle)
4209 {
4210 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
4211 	struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
4212 	uint32_t timeout;
4213 	int ret;
4214 
4215 	HIF_STATS_INC(pci_scn, mhi_force_wake_request_vote, 1);
4216 
4217 	if (qdf_in_interrupt())
4218 		timeout = FORCE_WAKE_DELAY_TIMEOUT_MS * 1000;
4219 	else
4220 		timeout = 0;
4221 
4222 	ret = pld_force_wake_request_sync(scn->qdf_dev->dev, timeout);
4223 	if (ret) {
4224 		hif_err("force wake request(timeout %u) send failed: %d",
4225 			timeout, ret);
4226 		HIF_STATS_INC(pci_scn, mhi_force_wake_failure, 1);
4227 		return -EINVAL;
4228 	}
4229 
4230 	/* If device's M1 state-change event races here, it can be ignored,
4231 	 * as the device is expected to immediately move from M2 to M0
4232 	 * without entering low power state.
4233 	 */
4234 	if (!pld_is_device_awake(scn->qdf_dev->dev))
4235 		hif_info("state-change event races, ignore");
4236 
4237 	HIF_STATS_INC(pci_scn, mhi_force_wake_success, 1);
4238 
4239 	return 0;
4240 }
4241 
hif_force_wake_release(struct hif_opaque_softc * hif_handle)4242 int hif_force_wake_release(struct hif_opaque_softc *hif_handle)
4243 {
4244 	int ret;
4245 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
4246 	struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
4247 
4248 	ret = pld_force_wake_release(scn->qdf_dev->dev);
4249 	if (ret) {
4250 		hif_err("force wake release failure");
4251 		HIF_STATS_INC(pci_scn, mhi_force_wake_release_failure, 1);
4252 		return ret;
4253 	}
4254 
4255 	HIF_STATS_INC(pci_scn, mhi_force_wake_release_success, 1);
4256 	return 0;
4257 }
4258 #endif /* DEVICE_FORCE_WAKE_ENABLE */
4259 
hif_print_pci_stats(struct hif_pci_softc * pci_handle)4260 void hif_print_pci_stats(struct hif_pci_softc *pci_handle)
4261 {
4262 	hif_debug("mhi_force_wake_request_vote: %d",
4263 		  pci_handle->stats.mhi_force_wake_request_vote);
4264 	hif_debug("mhi_force_wake_failure: %d",
4265 		  pci_handle->stats.mhi_force_wake_failure);
4266 	hif_debug("mhi_force_wake_success: %d",
4267 		  pci_handle->stats.mhi_force_wake_success);
4268 	hif_debug("soc_force_wake_register_write_success: %d",
4269 		  pci_handle->stats.soc_force_wake_register_write_success);
4270 	hif_debug("soc_force_wake_failure: %d",
4271 		  pci_handle->stats.soc_force_wake_failure);
4272 	hif_debug("soc_force_wake_success: %d",
4273 		  pci_handle->stats.soc_force_wake_success);
4274 	hif_debug("mhi_force_wake_release_failure: %d",
4275 		  pci_handle->stats.mhi_force_wake_release_failure);
4276 	hif_debug("mhi_force_wake_release_success: %d",
4277 		  pci_handle->stats.mhi_force_wake_release_success);
4278 	hif_debug("oc_force_wake_release_success: %d",
4279 		  pci_handle->stats.soc_force_wake_release_success);
4280 }
4281 #endif /* FORCE_WAKE */
4282 
4283 #ifdef FEATURE_HAL_DELAYED_REG_WRITE
hif_prevent_link_low_power_states(struct hif_opaque_softc * hif)4284 int hif_prevent_link_low_power_states(struct hif_opaque_softc *hif)
4285 {
4286 	return pld_prevent_l1(HIF_GET_SOFTC(hif)->qdf_dev->dev);
4287 }
4288 
hif_allow_link_low_power_states(struct hif_opaque_softc * hif)4289 void hif_allow_link_low_power_states(struct hif_opaque_softc *hif)
4290 {
4291 	pld_allow_l1(HIF_GET_SOFTC(hif)->qdf_dev->dev);
4292 }
4293 #endif
4294 
4295 #ifdef IPA_OPT_WIFI_DP
hif_prevent_l1(struct hif_opaque_softc * hif)4296 int hif_prevent_l1(struct hif_opaque_softc *hif)
4297 {
4298 	struct hif_softc *hif_softc = (struct hif_softc *)hif;
4299 	int status;
4300 
4301 	status = hif_force_wake_request(hif);
4302 	if (status) {
4303 		hif_err("Force wake request error");
4304 		return status;
4305 	}
4306 
4307 	qdf_atomic_inc(&hif_softc->opt_wifi_dp_rtpm_cnt);
4308 	hif_info("opt_dp: pcie link up count %d",
4309 		 qdf_atomic_read(&hif_softc->opt_wifi_dp_rtpm_cnt));
4310 	return status;
4311 }
4312 
hif_allow_l1(struct hif_opaque_softc * hif)4313 void hif_allow_l1(struct hif_opaque_softc *hif)
4314 {
4315 	struct hif_softc *hif_softc = (struct hif_softc *)hif;
4316 	int status;
4317 
4318 	if (qdf_atomic_read(&hif_softc->opt_wifi_dp_rtpm_cnt) > 0) {
4319 		status = hif_force_wake_release(hif);
4320 		if (status) {
4321 			hif_err("Force wake release error");
4322 			return;
4323 		}
4324 
4325 		qdf_atomic_dec(&hif_softc->opt_wifi_dp_rtpm_cnt);
4326 		hif_info("opt_dp: pcie link down count %d",
4327 			 qdf_atomic_read(&hif_softc->opt_wifi_dp_rtpm_cnt));
4328 	}
4329 }
4330 #endif
4331