xref: /wlan-driver/qca-wifi-host-cmn/hif/src/hif_main.h (revision 5113495b16420b49004c444715d2daae2066e7dc)
1*5113495bSYour Name /*
2*5113495bSYour Name  * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
3*5113495bSYour Name  * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
4*5113495bSYour Name  *
5*5113495bSYour Name  * Permission to use, copy, modify, and/or distribute this software for
6*5113495bSYour Name  * any purpose with or without fee is hereby granted, provided that the
7*5113495bSYour Name  * above copyright notice and this permission notice appear in all
8*5113495bSYour Name  * copies.
9*5113495bSYour Name  *
10*5113495bSYour Name  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11*5113495bSYour Name  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12*5113495bSYour Name  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13*5113495bSYour Name  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14*5113495bSYour Name  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15*5113495bSYour Name  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16*5113495bSYour Name  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17*5113495bSYour Name  * PERFORMANCE OF THIS SOFTWARE.
18*5113495bSYour Name  */
19*5113495bSYour Name 
20*5113495bSYour Name /*
21*5113495bSYour Name  * NB: Inappropriate references to "HTC" are used in this (and other)
22*5113495bSYour Name  * HIF implementations.  HTC is typically the calling layer, but it
23*5113495bSYour Name  * theoretically could be some alternative.
24*5113495bSYour Name  */
25*5113495bSYour Name 
26*5113495bSYour Name /*
27*5113495bSYour Name  * This holds all state needed to process a pending send/recv interrupt.
28*5113495bSYour Name  * The information is saved here as soon as the interrupt occurs (thus
29*5113495bSYour Name  * allowing the underlying CE to re-use the ring descriptor). The
30*5113495bSYour Name  * information here is eventually processed by a completion processing
31*5113495bSYour Name  * thread.
32*5113495bSYour Name  */
33*5113495bSYour Name 
34*5113495bSYour Name #ifndef __HIF_MAIN_H__
35*5113495bSYour Name #define __HIF_MAIN_H__
36*5113495bSYour Name 
37*5113495bSYour Name #include <qdf_atomic.h>         /* qdf_atomic_read */
38*5113495bSYour Name #include "qdf_lock.h"
39*5113495bSYour Name #include "cepci.h"
40*5113495bSYour Name #include "hif.h"
41*5113495bSYour Name #include "multibus.h"
42*5113495bSYour Name #include "hif_unit_test_suspend_i.h"
43*5113495bSYour Name #ifdef HIF_CE_LOG_INFO
44*5113495bSYour Name #include "qdf_notifier.h"
45*5113495bSYour Name #endif
46*5113495bSYour Name #include "pld_common.h"
47*5113495bSYour Name 
48*5113495bSYour Name #define HIF_MIN_SLEEP_INACTIVITY_TIME_MS     50
49*5113495bSYour Name #define HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS 60
50*5113495bSYour Name 
51*5113495bSYour Name #define HIF_MAX_BUDGET 0xFFFF
52*5113495bSYour Name 
53*5113495bSYour Name #define HIF_STATS_INC(_handle, _field, _delta) \
54*5113495bSYour Name { \
55*5113495bSYour Name 	(_handle)->stats._field += _delta; \
56*5113495bSYour Name }
57*5113495bSYour Name 
58*5113495bSYour Name /*
59*5113495bSYour Name  * This macro implementation is exposed for efficiency only.
60*5113495bSYour Name  * The implementation may change and callers should
61*5113495bSYour Name  * consider the targid to be a completely opaque handle.
62*5113495bSYour Name  */
63*5113495bSYour Name #define TARGID_TO_PCI_ADDR(targid) (*((A_target_id_t *)(targid)))
64*5113495bSYour Name 
65*5113495bSYour Name #ifdef QCA_WIFI_3_0
66*5113495bSYour Name #define DISABLE_L1SS_STATES 1
67*5113495bSYour Name #endif
68*5113495bSYour Name 
69*5113495bSYour Name #define MAX_NUM_OF_RECEIVES HIF_NAPI_MAX_RECEIVES
70*5113495bSYour Name 
71*5113495bSYour Name #ifdef QCA_WIFI_3_0_ADRASTEA
72*5113495bSYour Name #define ADRASTEA_BU 1
73*5113495bSYour Name #else
74*5113495bSYour Name #define ADRASTEA_BU 0
75*5113495bSYour Name #endif
76*5113495bSYour Name 
77*5113495bSYour Name #ifdef QCA_WIFI_3_0
78*5113495bSYour Name #define HAS_FW_INDICATOR 0
79*5113495bSYour Name #else
80*5113495bSYour Name #define HAS_FW_INDICATOR 1
81*5113495bSYour Name #endif
82*5113495bSYour Name 
83*5113495bSYour Name 
84*5113495bSYour Name #define AR9888_DEVICE_ID (0x003c)
85*5113495bSYour Name #define AR6320_DEVICE_ID (0x003e)
86*5113495bSYour Name #define AR6320_FW_1_1  (0x11)
87*5113495bSYour Name #define AR6320_FW_1_3  (0x13)
88*5113495bSYour Name #define AR6320_FW_2_0  (0x20)
89*5113495bSYour Name #define AR6320_FW_3_0  (0x30)
90*5113495bSYour Name #define AR6320_FW_3_2  (0x32)
91*5113495bSYour Name #define QCA6290_EMULATION_DEVICE_ID (0xabcd)
92*5113495bSYour Name #define QCA6290_DEVICE_ID (0x1100)
93*5113495bSYour Name #define QCN9000_DEVICE_ID (0x1104)
94*5113495bSYour Name #define QCN9224_DEVICE_ID (0x1109)
95*5113495bSYour Name #define QCN6122_DEVICE_ID (0xFFFB)
96*5113495bSYour Name #define QCN9160_DEVICE_ID (0xFFF8)
97*5113495bSYour Name #define QCN6432_DEVICE_ID (0xFFF7)
98*5113495bSYour Name #define QCA6390_EMULATION_DEVICE_ID (0x0108)
99*5113495bSYour Name #define QCA6390_DEVICE_ID (0x1101)
100*5113495bSYour Name /* TODO: change IDs for HastingsPrime */
101*5113495bSYour Name #define QCA6490_EMULATION_DEVICE_ID (0x010a)
102*5113495bSYour Name #define QCA6490_DEVICE_ID (0x1103)
103*5113495bSYour Name #define MANGO_DEVICE_ID (0x110a)
104*5113495bSYour Name #define PEACH_DEVICE_ID (0x110e)
105*5113495bSYour Name 
106*5113495bSYour Name /* TODO: change IDs for Moselle */
107*5113495bSYour Name #define QCA6750_EMULATION_DEVICE_ID (0x010c)
108*5113495bSYour Name #define QCA6750_DEVICE_ID (0x1105)
109*5113495bSYour Name 
110*5113495bSYour Name /* TODO: change IDs for Hamilton */
111*5113495bSYour Name #define KIWI_DEVICE_ID (0x1107)
112*5113495bSYour Name 
113*5113495bSYour Name /*TODO: change IDs for Evros */
114*5113495bSYour Name #define WCN6450_DEVICE_ID (0x1108)
115*5113495bSYour Name 
116*5113495bSYour Name #define ADRASTEA_DEVICE_ID_P2_E12 (0x7021)
117*5113495bSYour Name #define AR9887_DEVICE_ID    (0x0050)
118*5113495bSYour Name #define AR900B_DEVICE_ID    (0x0040)
119*5113495bSYour Name #define QCA9984_DEVICE_ID   (0x0046)
120*5113495bSYour Name #define QCA9888_DEVICE_ID   (0x0056)
121*5113495bSYour Name #define QCA8074_DEVICE_ID   (0xffff) /* Todo: replace this with
122*5113495bSYour Name 					actual number once available.
123*5113495bSYour Name 					currently defining this to 0xffff for
124*5113495bSYour Name 					emulation purpose */
125*5113495bSYour Name #define QCA8074V2_DEVICE_ID (0xfffe) /* Todo: replace this with actual number */
126*5113495bSYour Name #define QCA6018_DEVICE_ID (0xfffd) /* Todo: replace this with actual number */
127*5113495bSYour Name #define QCA5018_DEVICE_ID (0xfffc) /* Todo: replace this with actual number */
128*5113495bSYour Name #define QCA9574_DEVICE_ID (0xfffa)
129*5113495bSYour Name #define QCA5332_DEVICE_ID (0xfff9)
130*5113495bSYour Name /* Genoa */
131*5113495bSYour Name #define QCN7605_DEVICE_ID  (0x1102) /* Genoa PCIe device ID*/
132*5113495bSYour Name #define QCN7605_COMPOSITE  (0x9901)
133*5113495bSYour Name #define QCN7605_STANDALONE  (0x9900)
134*5113495bSYour Name #define QCN7605_STANDALONE_V2  (0x9902)
135*5113495bSYour Name #define QCN7605_COMPOSITE_V2  (0x9903)
136*5113495bSYour Name 
137*5113495bSYour Name #define RUMIM2M_DEVICE_ID_NODE0	0xabc0
138*5113495bSYour Name #define RUMIM2M_DEVICE_ID_NODE1	0xabc1
139*5113495bSYour Name #define RUMIM2M_DEVICE_ID_NODE2	0xabc2
140*5113495bSYour Name #define RUMIM2M_DEVICE_ID_NODE3	0xabc3
141*5113495bSYour Name #define RUMIM2M_DEVICE_ID_NODE4	0xaa10
142*5113495bSYour Name #define RUMIM2M_DEVICE_ID_NODE5	0xaa11
143*5113495bSYour Name 
144*5113495bSYour Name #define HIF_GET_PCI_SOFTC(scn) ((struct hif_pci_softc *)scn)
145*5113495bSYour Name #define HIF_GET_IPCI_SOFTC(scn) ((struct hif_ipci_softc *)scn)
146*5113495bSYour Name #define HIF_GET_CE_STATE(scn) ((struct HIF_CE_state *)scn)
147*5113495bSYour Name #define HIF_GET_SDIO_SOFTC(scn) ((struct hif_sdio_softc *)scn)
148*5113495bSYour Name #define HIF_GET_USB_SOFTC(scn) ((struct hif_usb_softc *)scn)
149*5113495bSYour Name #define HIF_GET_USB_DEVICE(scn) ((struct HIF_DEVICE_USB *)scn)
150*5113495bSYour Name #define HIF_GET_SOFTC(scn) ((struct hif_softc *)scn)
151*5113495bSYour Name #define GET_HIF_OPAQUE_HDL(scn) ((struct hif_opaque_softc *)scn)
152*5113495bSYour Name 
153*5113495bSYour Name #ifdef QCA_WIFI_QCN9224
154*5113495bSYour Name #define NUM_CE_AVAILABLE 16
155*5113495bSYour Name #else
156*5113495bSYour Name #define NUM_CE_AVAILABLE 12
157*5113495bSYour Name #endif
158*5113495bSYour Name /* Add 1 here to store default configuration in index 0 */
159*5113495bSYour Name #define NUM_CE_CONTEXT (NUM_CE_AVAILABLE + 1)
160*5113495bSYour Name 
161*5113495bSYour Name #define CE_INTERRUPT_IDX(x) x
162*5113495bSYour Name 
163*5113495bSYour Name #ifdef WLAN_64BIT_DATA_SUPPORT
164*5113495bSYour Name #define RRI_ON_DDR_MEM_SIZE CE_COUNT * sizeof(uint64_t)
165*5113495bSYour Name #else
166*5113495bSYour Name #define RRI_ON_DDR_MEM_SIZE CE_COUNT * sizeof(uint32_t)
167*5113495bSYour Name #endif
168*5113495bSYour Name 
169*5113495bSYour Name struct ce_int_assignment {
170*5113495bSYour Name 	uint8_t msi_idx[NUM_CE_AVAILABLE];
171*5113495bSYour Name };
172*5113495bSYour Name 
173*5113495bSYour Name struct hif_ce_stats {
174*5113495bSYour Name 	int hif_pipe_no_resrc_count;
175*5113495bSYour Name 	int ce_ring_delta_fail_count;
176*5113495bSYour Name };
177*5113495bSYour Name 
178*5113495bSYour Name #ifdef HIF_DETECTION_LATENCY_ENABLE
179*5113495bSYour Name /**
180*5113495bSYour Name  * struct hif_tasklet_running_info - running info of tasklet
181*5113495bSYour Name  * @sched_cpuid: id of cpu on which the tasklet was scheduled
182*5113495bSYour Name  * @sched_time: time when the tasklet was scheduled
183*5113495bSYour Name  * @exec_time: time when the tasklet was executed
184*5113495bSYour Name  */
185*5113495bSYour Name struct hif_tasklet_running_info {
186*5113495bSYour Name 	int sched_cpuid;
187*5113495bSYour Name 	qdf_time_t sched_time;
188*5113495bSYour Name 	qdf_time_t exec_time;
189*5113495bSYour Name };
190*5113495bSYour Name 
191*5113495bSYour Name #define HIF_TASKLET_IN_MONITOR CE_COUNT_MAX
192*5113495bSYour Name 
193*5113495bSYour Name struct hif_latency_detect {
194*5113495bSYour Name 	qdf_timer_t timer;
195*5113495bSYour Name 	uint32_t timeout;
196*5113495bSYour Name 	bool is_timer_started;
197*5113495bSYour Name 	bool enable_detection;
198*5113495bSYour Name 	/* threshold when stall happens */
199*5113495bSYour Name 	uint32_t threshold;
200*5113495bSYour Name 
201*5113495bSYour Name 	/*
202*5113495bSYour Name 	 * Bitmap to indicate the enablement of latency detection for
203*5113495bSYour Name 	 * the tasklets. bit-X represents for tasklet of WLAN_CE_X,
204*5113495bSYour Name 	 * latency detection is enabled on the corresponding tasklet
205*5113495bSYour Name 	 * when a bit is set.
206*5113495bSYour Name 	 * At the same time, this bitmap also indicates the validity of
207*5113495bSYour Name 	 * elements in array 'tasklet_info', bit-X represents for index-X,
208*5113495bSYour Name 	 * the corresponding element is valid when a bit is set.
209*5113495bSYour Name 	 */
210*5113495bSYour Name 	qdf_bitmap(tasklet_bmap, HIF_TASKLET_IN_MONITOR);
211*5113495bSYour Name 
212*5113495bSYour Name 	/*
213*5113495bSYour Name 	 * Array to record running info of tasklets, info of tasklet
214*5113495bSYour Name 	 * for WLAN_CE_X is stored at index-X.
215*5113495bSYour Name 	 */
216*5113495bSYour Name 	struct hif_tasklet_running_info tasklet_info[HIF_TASKLET_IN_MONITOR];
217*5113495bSYour Name 	qdf_time_t credit_request_time;
218*5113495bSYour Name 	qdf_time_t credit_report_time;
219*5113495bSYour Name };
220*5113495bSYour Name #endif
221*5113495bSYour Name 
222*5113495bSYour Name /*
223*5113495bSYour Name  * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
224*5113495bSYour Name  * for defined here
225*5113495bSYour Name  */
226*5113495bSYour Name #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
227*5113495bSYour Name 
228*5113495bSYour Name #define HIF_CE_MAX_LATEST_HIST 2
229*5113495bSYour Name #define HIF_CE_MAX_LATEST_EVTS 2
230*5113495bSYour Name 
231*5113495bSYour Name struct latest_evt_history {
232*5113495bSYour Name 	uint64_t irq_entry_ts;
233*5113495bSYour Name 	uint64_t bh_entry_ts;
234*5113495bSYour Name 	uint64_t bh_resched_ts;
235*5113495bSYour Name 	uint64_t bh_exit_ts;
236*5113495bSYour Name 	uint64_t bh_work_ts;
237*5113495bSYour Name 	int cpu_id;
238*5113495bSYour Name 	uint32_t ring_hp;
239*5113495bSYour Name 	uint32_t ring_tp;
240*5113495bSYour Name };
241*5113495bSYour Name 
242*5113495bSYour Name struct ce_desc_hist {
243*5113495bSYour Name 	qdf_atomic_t history_index[CE_COUNT_MAX];
244*5113495bSYour Name 	uint8_t ce_id_hist_map[CE_COUNT_MAX];
245*5113495bSYour Name 	bool enable[CE_COUNT_MAX];
246*5113495bSYour Name 	bool data_enable[CE_COUNT_MAX];
247*5113495bSYour Name 	qdf_mutex_t ce_dbg_datamem_lock[CE_COUNT_MAX];
248*5113495bSYour Name 	uint32_t hist_index;
249*5113495bSYour Name 	uint32_t hist_id;
250*5113495bSYour Name 	void *hist_ev[CE_COUNT_MAX];
251*5113495bSYour Name 	struct latest_evt_history latest_evts[HIF_CE_MAX_LATEST_HIST][HIF_CE_MAX_LATEST_EVTS];
252*5113495bSYour Name };
253*5113495bSYour Name 
254*5113495bSYour Name void hif_record_latest_evt(struct ce_desc_hist *ce_hist,
255*5113495bSYour Name 			   uint8_t type,
256*5113495bSYour Name 			   int ce_id, uint64_t time,
257*5113495bSYour Name 			   uint32_t hp, uint32_t tp);
258*5113495bSYour Name #endif /*defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)*/
259*5113495bSYour Name 
260*5113495bSYour Name /**
261*5113495bSYour Name  * struct hif_cfg() - store ini config parameters in hif layer
262*5113495bSYour Name  * @ce_status_ring_timer_threshold: ce status ring timer threshold
263*5113495bSYour Name  * @ce_status_ring_batch_count_threshold: ce status ring batch count threshold
264*5113495bSYour Name  * @disable_wake_irq: disable wake irq
265*5113495bSYour Name  */
266*5113495bSYour Name struct hif_cfg {
267*5113495bSYour Name 	uint16_t ce_status_ring_timer_threshold;
268*5113495bSYour Name 	uint8_t ce_status_ring_batch_count_threshold;
269*5113495bSYour Name 	bool disable_wake_irq;
270*5113495bSYour Name };
271*5113495bSYour Name 
272*5113495bSYour Name #ifdef DP_UMAC_HW_RESET_SUPPORT
273*5113495bSYour Name /**
274*5113495bSYour Name  * struct hif_umac_reset_ctx - UMAC HW reset context at HIF layer
275*5113495bSYour Name  * @intr_tq: Tasklet structure
276*5113495bSYour Name  * @irq_handler: IRQ handler
277*5113495bSYour Name  * @cb_handler: Callback handler
278*5113495bSYour Name  * @cb_ctx: Argument to be passed to @cb_handler
279*5113495bSYour Name  * @os_irq: Interrupt number for this IRQ
280*5113495bSYour Name  * @irq_configured: Whether the IRQ has been configured
281*5113495bSYour Name  */
282*5113495bSYour Name struct hif_umac_reset_ctx {
283*5113495bSYour Name 	struct tasklet_struct intr_tq;
284*5113495bSYour Name 	bool (*irq_handler)(void *cb_ctx);
285*5113495bSYour Name 	int (*cb_handler)(void *cb_ctx);
286*5113495bSYour Name 	void *cb_ctx;
287*5113495bSYour Name 	uint32_t os_irq;
288*5113495bSYour Name 	bool irq_configured;
289*5113495bSYour Name };
290*5113495bSYour Name #endif
291*5113495bSYour Name 
292*5113495bSYour Name #define MAX_SHADOW_REGS 40
293*5113495bSYour Name 
294*5113495bSYour Name #ifdef FEATURE_HIF_DELAYED_REG_WRITE
295*5113495bSYour Name /**
296*5113495bSYour Name  * enum hif_reg_sched_delay - ENUM for write sched delay histogram
297*5113495bSYour Name  * @HIF_REG_WRITE_SCHED_DELAY_SUB_100us: index for delay < 100us
298*5113495bSYour Name  * @HIF_REG_WRITE_SCHED_DELAY_SUB_1000us: index for delay < 1000us
299*5113495bSYour Name  * @HIF_REG_WRITE_SCHED_DELAY_SUB_5000us: index for delay < 5000us
300*5113495bSYour Name  * @HIF_REG_WRITE_SCHED_DELAY_GT_5000us: index for delay >= 5000us
301*5113495bSYour Name  * @HIF_REG_WRITE_SCHED_DELAY_HIST_MAX: Max value (nnsize of histogram array)
302*5113495bSYour Name  */
303*5113495bSYour Name enum hif_reg_sched_delay {
304*5113495bSYour Name 	HIF_REG_WRITE_SCHED_DELAY_SUB_100us,
305*5113495bSYour Name 	HIF_REG_WRITE_SCHED_DELAY_SUB_1000us,
306*5113495bSYour Name 	HIF_REG_WRITE_SCHED_DELAY_SUB_5000us,
307*5113495bSYour Name 	HIF_REG_WRITE_SCHED_DELAY_GT_5000us,
308*5113495bSYour Name 	HIF_REG_WRITE_SCHED_DELAY_HIST_MAX,
309*5113495bSYour Name };
310*5113495bSYour Name 
311*5113495bSYour Name /**
312*5113495bSYour Name  * struct hif_reg_write_soc_stats - soc stats to keep track of register writes
313*5113495bSYour Name  * @enqueues: writes enqueued to delayed work
314*5113495bSYour Name  * @dequeues: writes dequeued from delayed work (not written yet)
315*5113495bSYour Name  * @coalesces: writes not enqueued since srng is already queued up
316*5113495bSYour Name  * @direct: writes not enqueud and writted to register directly
317*5113495bSYour Name  * @prevent_l1_fails: prevent l1 API failed
318*5113495bSYour Name  * @q_depth: current queue depth in delayed register write queue
319*5113495bSYour Name  * @max_q_depth: maximum queue for delayed register write queue
320*5113495bSYour Name  * @sched_delay: = kernel work sched delay + bus wakeup delay, histogram
321*5113495bSYour Name  * @dequeue_delay: dequeue operation be delayed
322*5113495bSYour Name  */
323*5113495bSYour Name struct hif_reg_write_soc_stats {
324*5113495bSYour Name 	qdf_atomic_t enqueues;
325*5113495bSYour Name 	uint32_t dequeues;
326*5113495bSYour Name 	qdf_atomic_t coalesces;
327*5113495bSYour Name 	qdf_atomic_t direct;
328*5113495bSYour Name 	uint32_t prevent_l1_fails;
329*5113495bSYour Name 	qdf_atomic_t q_depth;
330*5113495bSYour Name 	uint32_t max_q_depth;
331*5113495bSYour Name 	uint32_t sched_delay[HIF_REG_WRITE_SCHED_DELAY_HIST_MAX];
332*5113495bSYour Name 	uint32_t dequeue_delay;
333*5113495bSYour Name };
334*5113495bSYour Name 
335*5113495bSYour Name /**
336*5113495bSYour Name  * struct hif_reg_write_q_elem - delayed register write queue element
337*5113495bSYour Name  * @ce_state: CE state queued for a delayed write
338*5113495bSYour Name  * @offset: offset of the CE register
339*5113495bSYour Name  * @enqueue_val: register value at the time of delayed write enqueue
340*5113495bSYour Name  * @dequeue_val: register value at the time of delayed write dequeue
341*5113495bSYour Name  * @valid: whether this entry is valid or not
342*5113495bSYour Name  * @enqueue_time: enqueue time (qdf_log_timestamp)
343*5113495bSYour Name  * @work_scheduled_time: work scheduled time (qdf_log_timestamp)
344*5113495bSYour Name  * @dequeue_time: dequeue time (qdf_log_timestamp)
345*5113495bSYour Name  * @cpu_id: record cpuid when schedule work
346*5113495bSYour Name  */
347*5113495bSYour Name struct hif_reg_write_q_elem {
348*5113495bSYour Name 	struct CE_state *ce_state;
349*5113495bSYour Name 	uint32_t offset;
350*5113495bSYour Name 	uint32_t enqueue_val;
351*5113495bSYour Name 	uint32_t dequeue_val;
352*5113495bSYour Name 	uint8_t valid;
353*5113495bSYour Name 	qdf_time_t enqueue_time;
354*5113495bSYour Name 	qdf_time_t work_scheduled_time;
355*5113495bSYour Name 	qdf_time_t dequeue_time;
356*5113495bSYour Name 	int cpu_id;
357*5113495bSYour Name };
358*5113495bSYour Name #endif
359*5113495bSYour Name 
360*5113495bSYour Name struct hif_softc {
361*5113495bSYour Name 	struct hif_opaque_softc osc;
362*5113495bSYour Name 	struct hif_config_info hif_config;
363*5113495bSYour Name 	struct hif_target_info target_info;
364*5113495bSYour Name 	void __iomem *mem;
365*5113495bSYour Name 	void __iomem *mem_ce;
366*5113495bSYour Name 	void __iomem *mem_cmem;
367*5113495bSYour Name 	void __iomem *mem_pmm_base;
368*5113495bSYour Name 	enum qdf_bus_type bus_type;
369*5113495bSYour Name 	struct hif_bus_ops bus_ops;
370*5113495bSYour Name 	void *ce_id_to_state[CE_COUNT_MAX];
371*5113495bSYour Name 	qdf_device_t qdf_dev;
372*5113495bSYour Name 	bool hif_init_done;
373*5113495bSYour Name 	bool request_irq_done;
374*5113495bSYour Name 	bool ext_grp_irq_configured;
375*5113495bSYour Name 	bool free_irq_done;
376*5113495bSYour Name 	uint8_t ce_latency_stats;
377*5113495bSYour Name 	/* Packet statistics */
378*5113495bSYour Name 	struct hif_ce_stats pkt_stats;
379*5113495bSYour Name 	enum hif_target_status target_status;
380*5113495bSYour Name 	uint64_t event_enable_mask;
381*5113495bSYour Name 
382*5113495bSYour Name 	struct targetdef_s *targetdef;
383*5113495bSYour Name 	struct ce_reg_def *target_ce_def;
384*5113495bSYour Name 	struct hostdef_s *hostdef;
385*5113495bSYour Name 	struct host_shadow_regs_s *host_shadow_regs;
386*5113495bSYour Name 
387*5113495bSYour Name 	bool recovery;
388*5113495bSYour Name 	bool notice_send;
389*5113495bSYour Name 	bool per_ce_irq;
390*5113495bSYour Name 	uint32_t ce_irq_summary;
391*5113495bSYour Name 	/* No of copy engines supported */
392*5113495bSYour Name 	unsigned int ce_count;
393*5113495bSYour Name 	struct ce_int_assignment *int_assignment;
394*5113495bSYour Name 	atomic_t active_tasklet_cnt;
395*5113495bSYour Name 	atomic_t active_grp_tasklet_cnt;
396*5113495bSYour Name 	atomic_t active_oom_work_cnt;
397*5113495bSYour Name 	atomic_t link_suspended;
398*5113495bSYour Name 	void *vaddr_rri_on_ddr;
399*5113495bSYour Name 	atomic_t active_wake_req_cnt;
400*5113495bSYour Name 	qdf_dma_addr_t paddr_rri_on_ddr;
401*5113495bSYour Name #ifdef CONFIG_BYPASS_QMI
402*5113495bSYour Name 	uint32_t *vaddr_qmi_bypass;
403*5113495bSYour Name 	qdf_dma_addr_t paddr_qmi_bypass;
404*5113495bSYour Name #endif
405*5113495bSYour Name 	int linkstate_vote;
406*5113495bSYour Name 	bool fastpath_mode_on;
407*5113495bSYour Name 	atomic_t tasklet_from_intr;
408*5113495bSYour Name 	int htc_htt_tx_endpoint;
409*5113495bSYour Name 	qdf_dma_addr_t mem_pa;
410*5113495bSYour Name 	bool athdiag_procfs_inited;
411*5113495bSYour Name #ifdef FEATURE_NAPI
412*5113495bSYour Name 	struct qca_napi_data napi_data;
413*5113495bSYour Name #endif /* FEATURE_NAPI */
414*5113495bSYour Name 	/* stores ce_service_max_yield_time in ns */
415*5113495bSYour Name 	unsigned long long ce_service_max_yield_time;
416*5113495bSYour Name 	uint8_t ce_service_max_rx_ind_flush;
417*5113495bSYour Name 	struct hif_driver_state_callbacks callbacks;
418*5113495bSYour Name 	uint32_t hif_con_param;
419*5113495bSYour Name #ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT
420*5113495bSYour Name 	uint32_t nss_wifi_ol_mode;
421*5113495bSYour Name #endif
422*5113495bSYour Name 	void *hal_soc;
423*5113495bSYour Name 	struct hif_ut_suspend_context ut_suspend_ctx;
424*5113495bSYour Name 	uint32_t hif_attribute;
425*5113495bSYour Name 	int wake_irq;
426*5113495bSYour Name 	hif_pm_wake_irq_type wake_irq_type;
427*5113495bSYour Name 	void (*initial_wakeup_cb)(void *);
428*5113495bSYour Name 	void *initial_wakeup_priv;
429*5113495bSYour Name #ifdef REMOVE_PKT_LOG
430*5113495bSYour Name 	/* Handle to pktlog device */
431*5113495bSYour Name 	void *pktlog_dev;
432*5113495bSYour Name #endif
433*5113495bSYour Name #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
434*5113495bSYour Name 	/* Pointer to the srng event history */
435*5113495bSYour Name 	struct hif_event_history *evt_hist[HIF_NUM_INT_CONTEXTS];
436*5113495bSYour Name #endif
437*5113495bSYour Name 
438*5113495bSYour Name /*
439*5113495bSYour Name  * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
440*5113495bSYour Name  * for defined here
441*5113495bSYour Name  */
442*5113495bSYour Name #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
443*5113495bSYour Name 	struct ce_desc_hist hif_ce_desc_hist;
444*5113495bSYour Name #endif /*defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)*/
445*5113495bSYour Name #ifdef IPA_OFFLOAD
446*5113495bSYour Name 	qdf_shared_mem_t *ipa_ce_ring;
447*5113495bSYour Name #endif
448*5113495bSYour Name #ifdef IPA_OPT_WIFI_DP
449*5113495bSYour Name 	qdf_atomic_t opt_wifi_dp_rtpm_cnt;
450*5113495bSYour Name #endif
451*5113495bSYour Name 	struct hif_cfg ini_cfg;
452*5113495bSYour Name #ifdef HIF_CE_LOG_INFO
453*5113495bSYour Name 	qdf_notif_block hif_recovery_notifier;
454*5113495bSYour Name #endif
455*5113495bSYour Name #if defined(HIF_CPU_PERF_AFFINE_MASK) || \
456*5113495bSYour Name 	defined(FEATURE_ENABLE_CE_DP_IRQ_AFFINE)
457*5113495bSYour Name 
458*5113495bSYour Name 	/* The CPU hotplug event registration handle */
459*5113495bSYour Name 	struct qdf_cpuhp_handler *cpuhp_event_handle;
460*5113495bSYour Name #endif
461*5113495bSYour Name 	uint32_t irq_unlazy_disable;
462*5113495bSYour Name 	/* Should the unlzay support for interrupt delivery be disabled */
463*5113495bSYour Name 	/* Flag to indicate whether bus is suspended */
464*5113495bSYour Name 	bool bus_suspended;
465*5113495bSYour Name 	bool pktlog_init;
466*5113495bSYour Name #ifdef FEATURE_RUNTIME_PM
467*5113495bSYour Name 	/* Variable to track the link state change in RTPM */
468*5113495bSYour Name 	qdf_atomic_t pm_link_state;
469*5113495bSYour Name #endif
470*5113495bSYour Name #ifdef HIF_DETECTION_LATENCY_ENABLE
471*5113495bSYour Name 	struct hif_latency_detect latency_detect;
472*5113495bSYour Name #endif
473*5113495bSYour Name #ifdef FEATURE_RUNTIME_PM
474*5113495bSYour Name 	qdf_runtime_lock_t prevent_linkdown_lock;
475*5113495bSYour Name #endif
476*5113495bSYour Name #ifdef SYSTEM_PM_CHECK
477*5113495bSYour Name 	qdf_atomic_t sys_pm_state;
478*5113495bSYour Name #endif
479*5113495bSYour Name #if defined(HIF_IPCI) && defined(FEATURE_HAL_DELAYED_REG_WRITE)
480*5113495bSYour Name 	qdf_atomic_t dp_ep_vote_access;
481*5113495bSYour Name 	qdf_atomic_t ep_vote_access;
482*5113495bSYour Name #endif
483*5113495bSYour Name 	/* CMEM address target reserved for host usage */
484*5113495bSYour Name 	uint64_t cmem_start;
485*5113495bSYour Name 	/* CMEM size target reserved */
486*5113495bSYour Name 	uint64_t cmem_size;
487*5113495bSYour Name #ifdef DP_UMAC_HW_RESET_SUPPORT
488*5113495bSYour Name 	struct hif_umac_reset_ctx umac_reset_ctx;
489*5113495bSYour Name #endif
490*5113495bSYour Name #ifdef CONFIG_SHADOW_V3
491*5113495bSYour Name 	struct pld_shadow_reg_v3_cfg shadow_regs[MAX_SHADOW_REGS];
492*5113495bSYour Name 	int num_shadow_registers_configured;
493*5113495bSYour Name #endif
494*5113495bSYour Name #ifdef WLAN_FEATURE_AFFINITY_MGR
495*5113495bSYour Name 	/* CPU Affinity info of IRQs */
496*5113495bSYour Name 	bool affinity_mgr_supported;
497*5113495bSYour Name 	uint64_t time_threshold;
498*5113495bSYour Name 	struct hif_cpu_affinity ce_irq_cpu_mask[CE_COUNT_MAX];
499*5113495bSYour Name 	struct hif_cpu_affinity irq_cpu_mask[HIF_MAX_GROUP][HIF_MAX_GRP_IRQ];
500*5113495bSYour Name 	qdf_cpu_mask allowed_mask;
501*5113495bSYour Name #endif
502*5113495bSYour Name #ifdef FEATURE_DIRECT_LINK
503*5113495bSYour Name 	struct qdf_mem_multi_page_t dl_recv_pages;
504*5113495bSYour Name 	int dl_recv_pipe_num;
505*5113495bSYour Name #endif
506*5113495bSYour Name #ifdef WLAN_FEATURE_CE_RX_BUFFER_REUSE
507*5113495bSYour Name 	struct wbuff_mod_handle *wbuff_handle;
508*5113495bSYour Name #endif
509*5113495bSYour Name #ifdef FEATURE_HIF_DELAYED_REG_WRITE
510*5113495bSYour Name 	/* queue(array) to hold register writes */
511*5113495bSYour Name 	struct hif_reg_write_q_elem *reg_write_queue;
512*5113495bSYour Name 	/* delayed work to be queued into workqueue */
513*5113495bSYour Name 	qdf_work_t reg_write_work;
514*5113495bSYour Name 	/* workqueue for delayed register writes */
515*5113495bSYour Name 	qdf_workqueue_t *reg_write_wq;
516*5113495bSYour Name 	/* write index used by caller to enqueue delayed work */
517*5113495bSYour Name 	qdf_atomic_t write_idx;
518*5113495bSYour Name 	/* read index used by worker thread to dequeue/write registers */
519*5113495bSYour Name 	uint32_t read_idx;
520*5113495bSYour Name 	struct hif_reg_write_soc_stats wstats;
521*5113495bSYour Name 	qdf_atomic_t active_work_cnt;
522*5113495bSYour Name #endif /* FEATURE_HIF_DELAYED_REG_WRITE */
523*5113495bSYour Name };
524*5113495bSYour Name 
525*5113495bSYour Name #if defined(NUM_SOC_PERF_CLUSTER) && (NUM_SOC_PERF_CLUSTER > 1)
hif_get_perf_cluster_bitmap(void)526*5113495bSYour Name static inline uint16_t hif_get_perf_cluster_bitmap(void)
527*5113495bSYour Name {
528*5113495bSYour Name 	return (BIT(CPU_CLUSTER_TYPE_PERF) | BIT(CPU_CLUSTER_TYPE_PERF2));
529*5113495bSYour Name }
530*5113495bSYour Name #else /* NUM_SOC_PERF_CLUSTER > 1 */
hif_get_perf_cluster_bitmap(void)531*5113495bSYour Name static inline uint16_t hif_get_perf_cluster_bitmap(void)
532*5113495bSYour Name {
533*5113495bSYour Name 	return BIT(CPU_CLUSTER_TYPE_PERF);
534*5113495bSYour Name }
535*5113495bSYour Name #endif /* NUM_SOC_PERF_CLUSTER > 1 */
536*5113495bSYour Name 
537*5113495bSYour Name static inline
hif_get_hal_handle(struct hif_opaque_softc * hif_hdl)538*5113495bSYour Name void *hif_get_hal_handle(struct hif_opaque_softc *hif_hdl)
539*5113495bSYour Name {
540*5113495bSYour Name 	struct hif_softc *sc = (struct hif_softc *)hif_hdl;
541*5113495bSYour Name 
542*5113495bSYour Name 	if (!sc)
543*5113495bSYour Name 		return NULL;
544*5113495bSYour Name 
545*5113495bSYour Name 	return sc->hal_soc;
546*5113495bSYour Name }
547*5113495bSYour Name 
548*5113495bSYour Name /**
549*5113495bSYour Name  * hif_get_cmem_info() - get CMEM address and size from HIF handle
550*5113495bSYour Name  * @hif_hdl: HIF handle pointer
551*5113495bSYour Name  * @cmem_start: pointer for CMEM address
552*5113495bSYour Name  * @cmem_size: pointer for CMEM size
553*5113495bSYour Name  *
554*5113495bSYour Name  * Return: None.
555*5113495bSYour Name  */
556*5113495bSYour Name static inline
hif_get_cmem_info(struct hif_opaque_softc * hif_hdl,uint64_t * cmem_start,uint64_t * cmem_size)557*5113495bSYour Name void hif_get_cmem_info(struct hif_opaque_softc *hif_hdl,
558*5113495bSYour Name 		       uint64_t *cmem_start,
559*5113495bSYour Name 		       uint64_t *cmem_size)
560*5113495bSYour Name {
561*5113495bSYour Name 	struct hif_softc *sc = (struct hif_softc *)hif_hdl;
562*5113495bSYour Name 
563*5113495bSYour Name 	*cmem_start = sc->cmem_start;
564*5113495bSYour Name 	*cmem_size = sc->cmem_size;
565*5113495bSYour Name }
566*5113495bSYour Name 
567*5113495bSYour Name /**
568*5113495bSYour Name  * hif_get_num_active_tasklets() - get the number of active
569*5113495bSYour Name  *		tasklets pending to be completed.
570*5113495bSYour Name  * @scn: HIF context
571*5113495bSYour Name  *
572*5113495bSYour Name  * Returns: the number of tasklets which are active
573*5113495bSYour Name  */
hif_get_num_active_tasklets(struct hif_softc * scn)574*5113495bSYour Name static inline int hif_get_num_active_tasklets(struct hif_softc *scn)
575*5113495bSYour Name {
576*5113495bSYour Name 	return qdf_atomic_read(&scn->active_tasklet_cnt);
577*5113495bSYour Name }
578*5113495bSYour Name 
579*5113495bSYour Name /**
580*5113495bSYour Name  * hif_get_num_active_oom_work() - get the number of active
581*5113495bSYour Name  *		oom work pending to be completed.
582*5113495bSYour Name  * @scn: HIF context
583*5113495bSYour Name  *
584*5113495bSYour Name  * Returns: the number of oom works which are active
585*5113495bSYour Name  */
hif_get_num_active_oom_work(struct hif_softc * scn)586*5113495bSYour Name static inline int hif_get_num_active_oom_work(struct hif_softc *scn)
587*5113495bSYour Name {
588*5113495bSYour Name 	return qdf_atomic_read(&scn->active_oom_work_cnt);
589*5113495bSYour Name }
590*5113495bSYour Name 
591*5113495bSYour Name /*
592*5113495bSYour Name  * Max waiting time during Runtime PM suspend to finish all
593*5113495bSYour Name  * the tasks. This is in the multiple of 10ms.
594*5113495bSYour Name  */
595*5113495bSYour Name #ifdef PANIC_ON_BUG
596*5113495bSYour Name #define HIF_TASK_DRAIN_WAIT_CNT 200
597*5113495bSYour Name #else
598*5113495bSYour Name #define HIF_TASK_DRAIN_WAIT_CNT 25
599*5113495bSYour Name #endif
600*5113495bSYour Name 
601*5113495bSYour Name /**
602*5113495bSYour Name  * hif_try_complete_tasks() - Try to complete all the pending tasks
603*5113495bSYour Name  * @scn: HIF context
604*5113495bSYour Name  *
605*5113495bSYour Name  * Try to complete all the pending datapath tasks, i.e. tasklets,
606*5113495bSYour Name  * DP group tasklets and works which are queued, in a given time
607*5113495bSYour Name  * slot.
608*5113495bSYour Name  *
609*5113495bSYour Name  * Returns: QDF_STATUS_SUCCESS if all the tasks were completed
610*5113495bSYour Name  *	QDF error code, if the time slot exhausted
611*5113495bSYour Name  */
612*5113495bSYour Name QDF_STATUS hif_try_complete_tasks(struct hif_softc *scn);
613*5113495bSYour Name 
614*5113495bSYour Name #ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT
hif_is_nss_wifi_enabled(struct hif_softc * sc)615*5113495bSYour Name static inline bool hif_is_nss_wifi_enabled(struct hif_softc *sc)
616*5113495bSYour Name {
617*5113495bSYour Name 	return !!(sc->nss_wifi_ol_mode);
618*5113495bSYour Name }
619*5113495bSYour Name #else
hif_is_nss_wifi_enabled(struct hif_softc * sc)620*5113495bSYour Name static inline bool hif_is_nss_wifi_enabled(struct hif_softc *sc)
621*5113495bSYour Name {
622*5113495bSYour Name 	return false;
623*5113495bSYour Name }
624*5113495bSYour Name #endif
625*5113495bSYour Name 
hif_is_attribute_set(struct hif_softc * sc,uint32_t hif_attrib)626*5113495bSYour Name static inline uint8_t hif_is_attribute_set(struct hif_softc *sc,
627*5113495bSYour Name 						uint32_t hif_attrib)
628*5113495bSYour Name {
629*5113495bSYour Name 	return sc->hif_attribute == hif_attrib;
630*5113495bSYour Name }
631*5113495bSYour Name 
632*5113495bSYour Name #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
hif_set_event_hist_mask(struct hif_opaque_softc * hif_handle)633*5113495bSYour Name static inline void hif_set_event_hist_mask(struct hif_opaque_softc *hif_handle)
634*5113495bSYour Name {
635*5113495bSYour Name 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
636*5113495bSYour Name 
637*5113495bSYour Name 	scn->event_enable_mask = HIF_EVENT_HIST_ENABLE_MASK;
638*5113495bSYour Name }
639*5113495bSYour Name #else
hif_set_event_hist_mask(struct hif_opaque_softc * hif_handle)640*5113495bSYour Name static inline void hif_set_event_hist_mask(struct hif_opaque_softc *hif_handle)
641*5113495bSYour Name {
642*5113495bSYour Name }
643*5113495bSYour Name #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
644*5113495bSYour Name 
645*5113495bSYour Name A_target_id_t hif_get_target_id(struct hif_softc *scn);
646*5113495bSYour Name void hif_dump_pipe_debug_count(struct hif_softc *scn);
647*5113495bSYour Name void hif_display_bus_stats(struct hif_opaque_softc *scn);
648*5113495bSYour Name void hif_clear_bus_stats(struct hif_opaque_softc *scn);
649*5113495bSYour Name bool hif_max_num_receives_reached(struct hif_softc *scn, unsigned int count);
650*5113495bSYour Name void hif_shutdown_device(struct hif_opaque_softc *hif_ctx);
651*5113495bSYour Name int hif_bus_configure(struct hif_softc *scn);
652*5113495bSYour Name void hif_cancel_deferred_target_sleep(struct hif_softc *scn);
653*5113495bSYour Name int hif_config_ce(struct hif_softc *scn);
654*5113495bSYour Name int hif_config_ce_pktlog(struct hif_opaque_softc *hif_ctx);
655*5113495bSYour Name int hif_config_ce_by_id(struct hif_softc *scn, int pipe_num);
656*5113495bSYour Name void hif_unconfig_ce(struct hif_softc *scn);
657*5113495bSYour Name void hif_ce_prepare_config(struct hif_softc *scn);
658*5113495bSYour Name QDF_STATUS hif_ce_open(struct hif_softc *scn);
659*5113495bSYour Name void hif_ce_close(struct hif_softc *scn);
660*5113495bSYour Name int athdiag_procfs_init(void *scn);
661*5113495bSYour Name void athdiag_procfs_remove(void);
662*5113495bSYour Name /* routine to modify the initial buffer count to be allocated on an os
663*5113495bSYour Name  * platform basis. Platform owner will need to modify this as needed
664*5113495bSYour Name  */
665*5113495bSYour Name qdf_size_t init_buffer_count(qdf_size_t maxSize);
666*5113495bSYour Name 
667*5113495bSYour Name irqreturn_t hif_fw_interrupt_handler(int irq, void *arg);
668*5113495bSYour Name int hif_get_device_type(uint32_t device_id,
669*5113495bSYour Name 			uint32_t revision_id,
670*5113495bSYour Name 			uint32_t *hif_type, uint32_t *target_type);
671*5113495bSYour Name /*These functions are exposed to HDD*/
672*5113495bSYour Name void hif_nointrs(struct hif_softc *scn);
673*5113495bSYour Name void hif_bus_close(struct hif_softc *ol_sc);
674*5113495bSYour Name QDF_STATUS hif_bus_open(struct hif_softc *ol_sc,
675*5113495bSYour Name 	enum qdf_bus_type bus_type);
676*5113495bSYour Name QDF_STATUS hif_enable_bus(struct hif_softc *ol_sc, struct device *dev,
677*5113495bSYour Name 	void *bdev, const struct hif_bus_id *bid, enum hif_enable_type type);
678*5113495bSYour Name void hif_disable_bus(struct hif_softc *scn);
679*5113495bSYour Name void hif_bus_prevent_linkdown(struct hif_softc *scn, bool flag);
680*5113495bSYour Name int hif_bus_get_context_size(enum qdf_bus_type bus_type);
681*5113495bSYour Name void hif_read_phy_mem_base(struct hif_softc *scn, qdf_dma_addr_t *bar_value);
682*5113495bSYour Name uint32_t hif_get_conparam(struct hif_softc *scn);
683*5113495bSYour Name struct hif_driver_state_callbacks *hif_get_callbacks_handle(
684*5113495bSYour Name 							struct hif_softc *scn);
685*5113495bSYour Name bool hif_is_driver_unloading(struct hif_softc *scn);
686*5113495bSYour Name bool hif_is_load_or_unload_in_progress(struct hif_softc *scn);
687*5113495bSYour Name bool hif_is_recovery_in_progress(struct hif_softc *scn);
688*5113495bSYour Name bool hif_is_target_ready(struct hif_softc *scn);
689*5113495bSYour Name 
690*5113495bSYour Name /**
691*5113495bSYour Name  * hif_get_bandwidth_level() - API to get the current bandwidth level
692*5113495bSYour Name  * @hif_handle: HIF Context
693*5113495bSYour Name  *
694*5113495bSYour Name  * Return: PLD bandwidth level
695*5113495bSYour Name  */
696*5113495bSYour Name int hif_get_bandwidth_level(struct hif_opaque_softc *hif_handle);
697*5113495bSYour Name 
698*5113495bSYour Name void hif_wlan_disable(struct hif_softc *scn);
699*5113495bSYour Name int hif_target_sleep_state_adjust(struct hif_softc *scn,
700*5113495bSYour Name 					 bool sleep_ok,
701*5113495bSYour Name 					 bool wait_for_it);
702*5113495bSYour Name 
703*5113495bSYour Name #ifdef DP_MEM_PRE_ALLOC
704*5113495bSYour Name void *hif_mem_alloc_consistent_unaligned(struct hif_softc *scn,
705*5113495bSYour Name 					 qdf_size_t size,
706*5113495bSYour Name 					 qdf_dma_addr_t *paddr,
707*5113495bSYour Name 					 uint32_t ring_type,
708*5113495bSYour Name 					 uint8_t *is_mem_prealloc);
709*5113495bSYour Name 
710*5113495bSYour Name void hif_mem_free_consistent_unaligned(struct hif_softc *scn,
711*5113495bSYour Name 				       qdf_size_t size,
712*5113495bSYour Name 				       void *vaddr,
713*5113495bSYour Name 				       qdf_dma_addr_t paddr,
714*5113495bSYour Name 				       qdf_dma_context_t memctx,
715*5113495bSYour Name 				       uint8_t is_mem_prealloc);
716*5113495bSYour Name 
717*5113495bSYour Name /**
718*5113495bSYour Name  * hif_prealloc_get_multi_pages() - gets pre-alloc DP multi-pages memory
719*5113495bSYour Name  * @scn: HIF context
720*5113495bSYour Name  * @desc_type: descriptor type
721*5113495bSYour Name  * @elem_size: single element size
722*5113495bSYour Name  * @elem_num: total number of elements should be allocated
723*5113495bSYour Name  * @pages: multi page information storage
724*5113495bSYour Name  * @cacheable: coherent memory or cacheable memory
725*5113495bSYour Name  *
726*5113495bSYour Name  * Return: None
727*5113495bSYour Name  */
728*5113495bSYour Name void hif_prealloc_get_multi_pages(struct hif_softc *scn, uint32_t desc_type,
729*5113495bSYour Name 				  qdf_size_t elem_size, uint16_t elem_num,
730*5113495bSYour Name 				  struct qdf_mem_multi_page_t *pages,
731*5113495bSYour Name 				  bool cacheable);
732*5113495bSYour Name 
733*5113495bSYour Name /**
734*5113495bSYour Name  * hif_prealloc_put_multi_pages() - puts back pre-alloc DP multi-pages memory
735*5113495bSYour Name  * @scn: HIF context
736*5113495bSYour Name  * @desc_type: descriptor type
737*5113495bSYour Name  * @pages: multi page information storage
738*5113495bSYour Name  * @cacheable: coherent memory or cacheable memory
739*5113495bSYour Name  *
740*5113495bSYour Name  * Return: None
741*5113495bSYour Name  */
742*5113495bSYour Name void hif_prealloc_put_multi_pages(struct hif_softc *scn, uint32_t desc_type,
743*5113495bSYour Name 				  struct qdf_mem_multi_page_t *pages,
744*5113495bSYour Name 				  bool cacheable);
745*5113495bSYour Name #else
746*5113495bSYour Name static inline
hif_mem_alloc_consistent_unaligned(struct hif_softc * scn,qdf_size_t size,qdf_dma_addr_t * paddr,uint32_t ring_type,uint8_t * is_mem_prealloc)747*5113495bSYour Name void *hif_mem_alloc_consistent_unaligned(struct hif_softc *scn,
748*5113495bSYour Name 					 qdf_size_t size,
749*5113495bSYour Name 					 qdf_dma_addr_t *paddr,
750*5113495bSYour Name 					 uint32_t ring_type,
751*5113495bSYour Name 					 uint8_t *is_mem_prealloc)
752*5113495bSYour Name {
753*5113495bSYour Name 	return qdf_mem_alloc_consistent(scn->qdf_dev,
754*5113495bSYour Name 					scn->qdf_dev->dev,
755*5113495bSYour Name 					size,
756*5113495bSYour Name 					paddr);
757*5113495bSYour Name }
758*5113495bSYour Name 
759*5113495bSYour Name static inline
hif_mem_free_consistent_unaligned(struct hif_softc * scn,qdf_size_t size,void * vaddr,qdf_dma_addr_t paddr,qdf_dma_context_t memctx,uint8_t is_mem_prealloc)760*5113495bSYour Name void hif_mem_free_consistent_unaligned(struct hif_softc *scn,
761*5113495bSYour Name 				       qdf_size_t size,
762*5113495bSYour Name 				       void *vaddr,
763*5113495bSYour Name 				       qdf_dma_addr_t paddr,
764*5113495bSYour Name 				       qdf_dma_context_t memctx,
765*5113495bSYour Name 				       uint8_t is_mem_prealloc)
766*5113495bSYour Name {
767*5113495bSYour Name 	return qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
768*5113495bSYour Name 				       size, vaddr, paddr, memctx);
769*5113495bSYour Name }
770*5113495bSYour Name 
771*5113495bSYour Name static inline
hif_prealloc_get_multi_pages(struct hif_softc * scn,uint32_t desc_type,qdf_size_t elem_size,uint16_t elem_num,struct qdf_mem_multi_page_t * pages,bool cacheable)772*5113495bSYour Name void hif_prealloc_get_multi_pages(struct hif_softc *scn, uint32_t desc_type,
773*5113495bSYour Name 				  qdf_size_t elem_size, uint16_t elem_num,
774*5113495bSYour Name 				  struct qdf_mem_multi_page_t *pages,
775*5113495bSYour Name 				  bool cacheable)
776*5113495bSYour Name {
777*5113495bSYour Name 	qdf_mem_multi_pages_alloc(scn->qdf_dev, pages,
778*5113495bSYour Name 				  elem_size, elem_num, 0, cacheable);
779*5113495bSYour Name }
780*5113495bSYour Name 
781*5113495bSYour Name static inline
hif_prealloc_put_multi_pages(struct hif_softc * scn,uint32_t desc_type,struct qdf_mem_multi_page_t * pages,bool cacheable)782*5113495bSYour Name void hif_prealloc_put_multi_pages(struct hif_softc *scn, uint32_t desc_type,
783*5113495bSYour Name 				  struct qdf_mem_multi_page_t *pages,
784*5113495bSYour Name 				  bool cacheable)
785*5113495bSYour Name {
786*5113495bSYour Name 	qdf_mem_multi_pages_free(scn->qdf_dev, pages, 0,
787*5113495bSYour Name 				 cacheable);
788*5113495bSYour Name }
789*5113495bSYour Name #endif
790*5113495bSYour Name 
791*5113495bSYour Name /**
792*5113495bSYour Name  * hif_get_rx_ctx_id() - Returns NAPI instance ID based on CE ID
793*5113495bSYour Name  * @ctx_id: Rx CE context ID
794*5113495bSYour Name  * @hif_hdl: HIF Context
795*5113495bSYour Name  *
796*5113495bSYour Name  * Return: Rx instance ID
797*5113495bSYour Name  */
798*5113495bSYour Name int hif_get_rx_ctx_id(int ctx_id, struct hif_opaque_softc *hif_hdl);
799*5113495bSYour Name void hif_ramdump_handler(struct hif_opaque_softc *scn);
800*5113495bSYour Name #ifdef HIF_USB
801*5113495bSYour Name void hif_usb_get_hw_info(struct hif_softc *scn);
802*5113495bSYour Name void hif_usb_ramdump_handler(struct hif_opaque_softc *scn);
803*5113495bSYour Name #else
hif_usb_get_hw_info(struct hif_softc * scn)804*5113495bSYour Name static inline void hif_usb_get_hw_info(struct hif_softc *scn) {}
hif_usb_ramdump_handler(struct hif_opaque_softc * scn)805*5113495bSYour Name static inline void hif_usb_ramdump_handler(struct hif_opaque_softc *scn) {}
806*5113495bSYour Name #endif
807*5113495bSYour Name 
808*5113495bSYour Name /**
809*5113495bSYour Name  * hif_wake_interrupt_handler() - interrupt handler for standalone wake irq
810*5113495bSYour Name  * @irq: the irq number that fired
811*5113495bSYour Name  * @context: the opaque pointer passed to request_irq()
812*5113495bSYour Name  *
813*5113495bSYour Name  * Return: an irq return type
814*5113495bSYour Name  */
815*5113495bSYour Name irqreturn_t hif_wake_interrupt_handler(int irq, void *context);
816*5113495bSYour Name 
817*5113495bSYour Name #if defined(HIF_SNOC)
818*5113495bSYour Name bool hif_is_target_register_access_allowed(struct hif_softc *hif_sc);
819*5113495bSYour Name #elif defined(HIF_IPCI)
820*5113495bSYour Name static inline bool
hif_is_target_register_access_allowed(struct hif_softc * hif_sc)821*5113495bSYour Name hif_is_target_register_access_allowed(struct hif_softc *hif_sc)
822*5113495bSYour Name {
823*5113495bSYour Name 	return !(hif_sc->recovery);
824*5113495bSYour Name }
825*5113495bSYour Name #else
826*5113495bSYour Name static inline
hif_is_target_register_access_allowed(struct hif_softc * hif_sc)827*5113495bSYour Name bool hif_is_target_register_access_allowed(struct hif_softc *hif_sc)
828*5113495bSYour Name {
829*5113495bSYour Name 	return true;
830*5113495bSYour Name }
831*5113495bSYour Name #endif
832*5113495bSYour Name 
833*5113495bSYour Name #ifdef ADRASTEA_RRI_ON_DDR
834*5113495bSYour Name void hif_uninit_rri_on_ddr(struct hif_softc *scn);
835*5113495bSYour Name #else
836*5113495bSYour Name static inline
hif_uninit_rri_on_ddr(struct hif_softc * scn)837*5113495bSYour Name void hif_uninit_rri_on_ddr(struct hif_softc *scn) {}
838*5113495bSYour Name #endif
839*5113495bSYour Name void hif_cleanup_static_buf_to_target(struct hif_softc *scn);
840*5113495bSYour Name 
841*5113495bSYour Name #ifdef FEATURE_RUNTIME_PM
842*5113495bSYour Name /**
843*5113495bSYour Name  * hif_runtime_prevent_linkdown() - prevent or allow a runtime pm from occurring
844*5113495bSYour Name  * @scn: hif context
845*5113495bSYour Name  * @is_get: prevent linkdown if true otherwise allow
846*5113495bSYour Name  *
847*5113495bSYour Name  * this api should only be called as part of bus prevent linkdown
848*5113495bSYour Name  */
849*5113495bSYour Name void hif_runtime_prevent_linkdown(struct hif_softc *scn, bool is_get);
850*5113495bSYour Name #else
851*5113495bSYour Name static inline
hif_runtime_prevent_linkdown(struct hif_softc * scn,bool is_get)852*5113495bSYour Name void hif_runtime_prevent_linkdown(struct hif_softc *scn, bool is_get)
853*5113495bSYour Name {
854*5113495bSYour Name }
855*5113495bSYour Name #endif
856*5113495bSYour Name 
857*5113495bSYour Name #ifdef HIF_HAL_REG_ACCESS_SUPPORT
858*5113495bSYour Name void hif_reg_window_write(struct hif_softc *scn,
859*5113495bSYour Name 			  uint32_t offset, uint32_t value);
860*5113495bSYour Name uint32_t hif_reg_window_read(struct hif_softc *scn, uint32_t offset);
861*5113495bSYour Name #endif
862*5113495bSYour Name 
863*5113495bSYour Name #ifdef FEATURE_HIF_DELAYED_REG_WRITE
864*5113495bSYour Name void hif_delayed_reg_write(struct hif_softc *scn, uint32_t ctrl_addr,
865*5113495bSYour Name 			   uint32_t val);
866*5113495bSYour Name #endif
867*5113495bSYour Name 
868*5113495bSYour Name #if defined(HIF_IPCI) && defined(FEATURE_HAL_DELAYED_REG_WRITE)
hif_is_ep_vote_access_disabled(struct hif_softc * scn)869*5113495bSYour Name static inline bool hif_is_ep_vote_access_disabled(struct hif_softc *scn)
870*5113495bSYour Name {
871*5113495bSYour Name 	if ((qdf_atomic_read(&scn->dp_ep_vote_access) ==
872*5113495bSYour Name 	     HIF_EP_VOTE_ACCESS_DISABLE) &&
873*5113495bSYour Name 	    (qdf_atomic_read(&scn->ep_vote_access) ==
874*5113495bSYour Name 	     HIF_EP_VOTE_ACCESS_DISABLE))
875*5113495bSYour Name 		return true;
876*5113495bSYour Name 
877*5113495bSYour Name 	return false;
878*5113495bSYour Name }
879*5113495bSYour Name #else
hif_is_ep_vote_access_disabled(struct hif_softc * scn)880*5113495bSYour Name static inline bool hif_is_ep_vote_access_disabled(struct hif_softc *scn)
881*5113495bSYour Name {
882*5113495bSYour Name 	return false;
883*5113495bSYour Name }
884*5113495bSYour Name #endif
885*5113495bSYour Name #endif /* __HIF_MAIN_H__ */
886