xref: /wlan-driver/qca-wifi-host-cmn/hif/src/hif_main.c (revision 5113495b16420b49004c444715d2daae2066e7dc)
1*5113495bSYour Name /*
2*5113495bSYour Name  * Copyright (c) 2015-2021 The Linux Foundation. All rights reserved.
3*5113495bSYour Name  * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
4*5113495bSYour Name  *
5*5113495bSYour Name  * Permission to use, copy, modify, and/or distribute this software for
6*5113495bSYour Name  * any purpose with or without fee is hereby granted, provided that the
7*5113495bSYour Name  * above copyright notice and this permission notice appear in all
8*5113495bSYour Name  * copies.
9*5113495bSYour Name  *
10*5113495bSYour Name  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11*5113495bSYour Name  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12*5113495bSYour Name  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13*5113495bSYour Name  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14*5113495bSYour Name  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15*5113495bSYour Name  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16*5113495bSYour Name  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17*5113495bSYour Name  * PERFORMANCE OF THIS SOFTWARE.
18*5113495bSYour Name  */
19*5113495bSYour Name 
20*5113495bSYour Name #include "targcfg.h"
21*5113495bSYour Name #include "qdf_lock.h"
22*5113495bSYour Name #include "qdf_status.h"
23*5113495bSYour Name #include "qdf_status.h"
24*5113495bSYour Name #include <qdf_atomic.h>         /* qdf_atomic_read */
25*5113495bSYour Name #include <targaddrs.h>
26*5113495bSYour Name #include "hif_io32.h"
27*5113495bSYour Name #include <hif.h>
28*5113495bSYour Name #include <target_type.h>
29*5113495bSYour Name #include "regtable.h"
30*5113495bSYour Name #define ATH_MODULE_NAME hif
31*5113495bSYour Name #include <a_debug.h>
32*5113495bSYour Name #include "hif_main.h"
33*5113495bSYour Name #include "hif_hw_version.h"
34*5113495bSYour Name #if (defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) || \
35*5113495bSYour Name      defined(HIF_IPCI))
36*5113495bSYour Name #include "ce_tasklet.h"
37*5113495bSYour Name #include "ce_api.h"
38*5113495bSYour Name #endif
39*5113495bSYour Name #include "qdf_trace.h"
40*5113495bSYour Name #include "qdf_status.h"
41*5113495bSYour Name #include "hif_debug.h"
42*5113495bSYour Name #include "mp_dev.h"
43*5113495bSYour Name #if defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
44*5113495bSYour Name 	defined(QCA_WIFI_QCA5018) || defined(QCA_WIFI_QCA9574) || \
45*5113495bSYour Name 	defined(QCA_WIFI_QCA5332)
46*5113495bSYour Name #include "hal_api.h"
47*5113495bSYour Name #endif
48*5113495bSYour Name #include "hif_napi.h"
49*5113495bSYour Name #include "hif_unit_test_suspend_i.h"
50*5113495bSYour Name #include "qdf_module.h"
51*5113495bSYour Name #ifdef HIF_CE_LOG_INFO
52*5113495bSYour Name #include <qdf_notifier.h>
53*5113495bSYour Name #include <qdf_hang_event_notifier.h>
54*5113495bSYour Name #endif
55*5113495bSYour Name #include <linux/cpumask.h>
56*5113495bSYour Name 
57*5113495bSYour Name #include <pld_common.h>
58*5113495bSYour Name #include "ce_internal.h"
59*5113495bSYour Name #include <qdf_tracepoint.h>
60*5113495bSYour Name #include "qdf_ssr_driver_dump.h"
61*5113495bSYour Name 
hif_dump(struct hif_opaque_softc * hif_ctx,uint8_t cmd_id,bool start)62*5113495bSYour Name void hif_dump(struct hif_opaque_softc *hif_ctx, uint8_t cmd_id, bool start)
63*5113495bSYour Name {
64*5113495bSYour Name 	hif_trigger_dump(hif_ctx, cmd_id, start);
65*5113495bSYour Name }
66*5113495bSYour Name 
67*5113495bSYour Name /**
68*5113495bSYour Name  * hif_get_target_id(): hif_get_target_id
69*5113495bSYour Name  * @scn: scn
70*5113495bSYour Name  *
71*5113495bSYour Name  * Return the virtual memory base address to the caller
72*5113495bSYour Name  *
73*5113495bSYour Name  * @scn: hif_softc
74*5113495bSYour Name  *
75*5113495bSYour Name  * Return: A_target_id_t
76*5113495bSYour Name  */
hif_get_target_id(struct hif_softc * scn)77*5113495bSYour Name A_target_id_t hif_get_target_id(struct hif_softc *scn)
78*5113495bSYour Name {
79*5113495bSYour Name 	return scn->mem;
80*5113495bSYour Name }
81*5113495bSYour Name 
82*5113495bSYour Name /**
83*5113495bSYour Name  * hif_get_targetdef(): hif_get_targetdef
84*5113495bSYour Name  * @hif_ctx: hif context
85*5113495bSYour Name  *
86*5113495bSYour Name  * Return: void *
87*5113495bSYour Name  */
hif_get_targetdef(struct hif_opaque_softc * hif_ctx)88*5113495bSYour Name void *hif_get_targetdef(struct hif_opaque_softc *hif_ctx)
89*5113495bSYour Name {
90*5113495bSYour Name 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
91*5113495bSYour Name 
92*5113495bSYour Name 	return scn->targetdef;
93*5113495bSYour Name }
94*5113495bSYour Name 
95*5113495bSYour Name #ifdef FORCE_WAKE
96*5113495bSYour Name #ifndef QCA_WIFI_WCN6450
hif_srng_init_phase(struct hif_opaque_softc * hif_ctx,bool init_phase)97*5113495bSYour Name void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx,
98*5113495bSYour Name 			 bool init_phase)
99*5113495bSYour Name {
100*5113495bSYour Name 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
101*5113495bSYour Name 
102*5113495bSYour Name 	if (ce_srng_based(scn))
103*5113495bSYour Name 		hal_set_init_phase(scn->hal_soc, init_phase);
104*5113495bSYour Name }
105*5113495bSYour Name #else
hif_srng_init_phase(struct hif_opaque_softc * hif_ctx,bool init_phase)106*5113495bSYour Name void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx,
107*5113495bSYour Name 			 bool init_phase)
108*5113495bSYour Name {
109*5113495bSYour Name 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
110*5113495bSYour Name 
111*5113495bSYour Name 	hal_set_init_phase(scn->hal_soc, init_phase);
112*5113495bSYour Name }
113*5113495bSYour Name #endif
114*5113495bSYour Name #endif /* FORCE_WAKE */
115*5113495bSYour Name 
116*5113495bSYour Name #ifdef HIF_IPCI
hif_shutdown_notifier_cb(void * hif_ctx)117*5113495bSYour Name void hif_shutdown_notifier_cb(void *hif_ctx)
118*5113495bSYour Name {
119*5113495bSYour Name 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
120*5113495bSYour Name 
121*5113495bSYour Name 	scn->recovery = true;
122*5113495bSYour Name }
123*5113495bSYour Name #endif
124*5113495bSYour Name 
125*5113495bSYour Name /**
126*5113495bSYour Name  * hif_vote_link_down(): unvote for link up
127*5113495bSYour Name  * @hif_ctx: hif context
128*5113495bSYour Name  *
129*5113495bSYour Name  * Call hif_vote_link_down to release a previous request made using
130*5113495bSYour Name  * hif_vote_link_up. A hif_vote_link_down call should only be made
131*5113495bSYour Name  * after a corresponding hif_vote_link_up, otherwise you could be
132*5113495bSYour Name  * negating a vote from another source. When no votes are present
133*5113495bSYour Name  * hif will not guarantee the linkstate after hif_bus_suspend.
134*5113495bSYour Name  *
135*5113495bSYour Name  * SYNCHRONIZE WITH hif_vote_link_up by only calling in MC thread
136*5113495bSYour Name  * and initialization deinitialization sequencences.
137*5113495bSYour Name  *
138*5113495bSYour Name  * Return: n/a
139*5113495bSYour Name  */
hif_vote_link_down(struct hif_opaque_softc * hif_ctx)140*5113495bSYour Name void hif_vote_link_down(struct hif_opaque_softc *hif_ctx)
141*5113495bSYour Name {
142*5113495bSYour Name 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
143*5113495bSYour Name 
144*5113495bSYour Name 	QDF_BUG(scn);
145*5113495bSYour Name 	if (scn->linkstate_vote == 0)
146*5113495bSYour Name 		QDF_DEBUG_PANIC("linkstate_vote(%d) has already been 0",
147*5113495bSYour Name 				scn->linkstate_vote);
148*5113495bSYour Name 
149*5113495bSYour Name 	scn->linkstate_vote--;
150*5113495bSYour Name 	hif_info("Down_linkstate_vote %d", scn->linkstate_vote);
151*5113495bSYour Name 	if (scn->linkstate_vote == 0)
152*5113495bSYour Name 		hif_bus_prevent_linkdown(scn, false);
153*5113495bSYour Name }
154*5113495bSYour Name 
155*5113495bSYour Name /**
156*5113495bSYour Name  * hif_vote_link_up(): vote to prevent bus from suspending
157*5113495bSYour Name  * @hif_ctx: hif context
158*5113495bSYour Name  *
159*5113495bSYour Name  * Makes hif guarantee that fw can message the host normally
160*5113495bSYour Name  * during suspend.
161*5113495bSYour Name  *
162*5113495bSYour Name  * SYNCHRONIZE WITH hif_vote_link_up by only calling in MC thread
163*5113495bSYour Name  * and initialization deinitialization sequencences.
164*5113495bSYour Name  *
165*5113495bSYour Name  * Return: n/a
166*5113495bSYour Name  */
hif_vote_link_up(struct hif_opaque_softc * hif_ctx)167*5113495bSYour Name void hif_vote_link_up(struct hif_opaque_softc *hif_ctx)
168*5113495bSYour Name {
169*5113495bSYour Name 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
170*5113495bSYour Name 
171*5113495bSYour Name 	QDF_BUG(scn);
172*5113495bSYour Name 	scn->linkstate_vote++;
173*5113495bSYour Name 	hif_info("Up_linkstate_vote %d", scn->linkstate_vote);
174*5113495bSYour Name 	if (scn->linkstate_vote == 1)
175*5113495bSYour Name 		hif_bus_prevent_linkdown(scn, true);
176*5113495bSYour Name }
177*5113495bSYour Name 
178*5113495bSYour Name /**
179*5113495bSYour Name  * hif_can_suspend_link(): query if hif is permitted to suspend the link
180*5113495bSYour Name  * @hif_ctx: hif context
181*5113495bSYour Name  *
182*5113495bSYour Name  * Hif will ensure that the link won't be suspended if the upperlayers
183*5113495bSYour Name  * don't want it to.
184*5113495bSYour Name  *
185*5113495bSYour Name  * SYNCHRONIZATION: MC thread is stopped before bus suspend thus
186*5113495bSYour Name  * we don't need extra locking to ensure votes dont change while
187*5113495bSYour Name  * we are in the process of suspending or resuming.
188*5113495bSYour Name  *
189*5113495bSYour Name  * Return: false if hif will guarantee link up during suspend.
190*5113495bSYour Name  */
hif_can_suspend_link(struct hif_opaque_softc * hif_ctx)191*5113495bSYour Name bool hif_can_suspend_link(struct hif_opaque_softc *hif_ctx)
192*5113495bSYour Name {
193*5113495bSYour Name 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
194*5113495bSYour Name 
195*5113495bSYour Name 	QDF_BUG(scn);
196*5113495bSYour Name 	return scn->linkstate_vote == 0;
197*5113495bSYour Name }
198*5113495bSYour Name 
199*5113495bSYour Name /**
200*5113495bSYour Name  * hif_hia_item_address(): hif_hia_item_address
201*5113495bSYour Name  * @target_type: target_type
202*5113495bSYour Name  * @item_offset: item_offset
203*5113495bSYour Name  *
204*5113495bSYour Name  * Return: n/a
205*5113495bSYour Name  */
hif_hia_item_address(uint32_t target_type,uint32_t item_offset)206*5113495bSYour Name uint32_t hif_hia_item_address(uint32_t target_type, uint32_t item_offset)
207*5113495bSYour Name {
208*5113495bSYour Name 	switch (target_type) {
209*5113495bSYour Name 	case TARGET_TYPE_AR6002:
210*5113495bSYour Name 		return AR6002_HOST_INTEREST_ADDRESS + item_offset;
211*5113495bSYour Name 	case TARGET_TYPE_AR6003:
212*5113495bSYour Name 		return AR6003_HOST_INTEREST_ADDRESS + item_offset;
213*5113495bSYour Name 	case TARGET_TYPE_AR6004:
214*5113495bSYour Name 		return AR6004_HOST_INTEREST_ADDRESS + item_offset;
215*5113495bSYour Name 	case TARGET_TYPE_AR6006:
216*5113495bSYour Name 		return AR6006_HOST_INTEREST_ADDRESS + item_offset;
217*5113495bSYour Name 	case TARGET_TYPE_AR9888:
218*5113495bSYour Name 		return AR9888_HOST_INTEREST_ADDRESS + item_offset;
219*5113495bSYour Name 	case TARGET_TYPE_AR6320:
220*5113495bSYour Name 	case TARGET_TYPE_AR6320V2:
221*5113495bSYour Name 		return AR6320_HOST_INTEREST_ADDRESS + item_offset;
222*5113495bSYour Name 	case TARGET_TYPE_ADRASTEA:
223*5113495bSYour Name 		/* ADRASTEA doesn't have a host interest address */
224*5113495bSYour Name 		ASSERT(0);
225*5113495bSYour Name 		return 0;
226*5113495bSYour Name 	case TARGET_TYPE_AR900B:
227*5113495bSYour Name 		return AR900B_HOST_INTEREST_ADDRESS + item_offset;
228*5113495bSYour Name 	case TARGET_TYPE_QCA9984:
229*5113495bSYour Name 		return QCA9984_HOST_INTEREST_ADDRESS + item_offset;
230*5113495bSYour Name 	case TARGET_TYPE_QCA9888:
231*5113495bSYour Name 		return QCA9888_HOST_INTEREST_ADDRESS + item_offset;
232*5113495bSYour Name 
233*5113495bSYour Name 	default:
234*5113495bSYour Name 		ASSERT(0);
235*5113495bSYour Name 		return 0;
236*5113495bSYour Name 	}
237*5113495bSYour Name }
238*5113495bSYour Name 
239*5113495bSYour Name /**
240*5113495bSYour Name  * hif_max_num_receives_reached() - check max receive is reached
241*5113495bSYour Name  * @scn: HIF Context
242*5113495bSYour Name  * @count: unsigned int.
243*5113495bSYour Name  *
244*5113495bSYour Name  * Output check status as bool
245*5113495bSYour Name  *
246*5113495bSYour Name  * Return: bool
247*5113495bSYour Name  */
hif_max_num_receives_reached(struct hif_softc * scn,unsigned int count)248*5113495bSYour Name bool hif_max_num_receives_reached(struct hif_softc *scn, unsigned int count)
249*5113495bSYour Name {
250*5113495bSYour Name 	if (QDF_IS_EPPING_ENABLED(hif_get_conparam(scn)))
251*5113495bSYour Name 		return count > 120;
252*5113495bSYour Name 	else
253*5113495bSYour Name 		return count > MAX_NUM_OF_RECEIVES;
254*5113495bSYour Name }
255*5113495bSYour Name 
256*5113495bSYour Name /**
257*5113495bSYour Name  * init_buffer_count() - initial buffer count
258*5113495bSYour Name  * @maxSize: qdf_size_t
259*5113495bSYour Name  *
260*5113495bSYour Name  * routine to modify the initial buffer count to be allocated on an os
261*5113495bSYour Name  * platform basis. Platform owner will need to modify this as needed
262*5113495bSYour Name  *
263*5113495bSYour Name  * Return: qdf_size_t
264*5113495bSYour Name  */
init_buffer_count(qdf_size_t maxSize)265*5113495bSYour Name qdf_size_t init_buffer_count(qdf_size_t maxSize)
266*5113495bSYour Name {
267*5113495bSYour Name 	return maxSize;
268*5113495bSYour Name }
269*5113495bSYour Name 
270*5113495bSYour Name /**
271*5113495bSYour Name  * hif_save_htc_htt_config_endpoint() - save htt_tx_endpoint
272*5113495bSYour Name  * @hif_ctx: hif context
273*5113495bSYour Name  * @htc_htt_tx_endpoint: htt_tx_endpoint
274*5113495bSYour Name  *
275*5113495bSYour Name  * Return: void
276*5113495bSYour Name  */
hif_save_htc_htt_config_endpoint(struct hif_opaque_softc * hif_ctx,int htc_htt_tx_endpoint)277*5113495bSYour Name void hif_save_htc_htt_config_endpoint(struct hif_opaque_softc *hif_ctx,
278*5113495bSYour Name 							int htc_htt_tx_endpoint)
279*5113495bSYour Name {
280*5113495bSYour Name 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
281*5113495bSYour Name 
282*5113495bSYour Name 	if (!scn) {
283*5113495bSYour Name 		hif_err("scn or scn->hif_sc is NULL!");
284*5113495bSYour Name 		return;
285*5113495bSYour Name 	}
286*5113495bSYour Name 
287*5113495bSYour Name 	scn->htc_htt_tx_endpoint = htc_htt_tx_endpoint;
288*5113495bSYour Name }
289*5113495bSYour Name qdf_export_symbol(hif_save_htc_htt_config_endpoint);
290*5113495bSYour Name 
291*5113495bSYour Name static const struct qwlan_hw qwlan_hw_list[] = {
292*5113495bSYour Name 	{
293*5113495bSYour Name 		.id = AR6320_REV1_VERSION,
294*5113495bSYour Name 		.subid = 0,
295*5113495bSYour Name 		.name = "QCA6174_REV1",
296*5113495bSYour Name 	},
297*5113495bSYour Name 	{
298*5113495bSYour Name 		.id = AR6320_REV1_1_VERSION,
299*5113495bSYour Name 		.subid = 0x1,
300*5113495bSYour Name 		.name = "QCA6174_REV1_1",
301*5113495bSYour Name 	},
302*5113495bSYour Name 	{
303*5113495bSYour Name 		.id = AR6320_REV1_3_VERSION,
304*5113495bSYour Name 		.subid = 0x2,
305*5113495bSYour Name 		.name = "QCA6174_REV1_3",
306*5113495bSYour Name 	},
307*5113495bSYour Name 	{
308*5113495bSYour Name 		.id = AR6320_REV2_1_VERSION,
309*5113495bSYour Name 		.subid = 0x4,
310*5113495bSYour Name 		.name = "QCA6174_REV2_1",
311*5113495bSYour Name 	},
312*5113495bSYour Name 	{
313*5113495bSYour Name 		.id = AR6320_REV2_1_VERSION,
314*5113495bSYour Name 		.subid = 0x5,
315*5113495bSYour Name 		.name = "QCA6174_REV2_2",
316*5113495bSYour Name 	},
317*5113495bSYour Name 	{
318*5113495bSYour Name 		.id = AR6320_REV3_VERSION,
319*5113495bSYour Name 		.subid = 0x6,
320*5113495bSYour Name 		.name = "QCA6174_REV2.3",
321*5113495bSYour Name 	},
322*5113495bSYour Name 	{
323*5113495bSYour Name 		.id = AR6320_REV3_VERSION,
324*5113495bSYour Name 		.subid = 0x8,
325*5113495bSYour Name 		.name = "QCA6174_REV3",
326*5113495bSYour Name 	},
327*5113495bSYour Name 	{
328*5113495bSYour Name 		.id = AR6320_REV3_VERSION,
329*5113495bSYour Name 		.subid = 0x9,
330*5113495bSYour Name 		.name = "QCA6174_REV3_1",
331*5113495bSYour Name 	},
332*5113495bSYour Name 	{
333*5113495bSYour Name 		.id = AR6320_REV3_2_VERSION,
334*5113495bSYour Name 		.subid = 0xA,
335*5113495bSYour Name 		.name = "AR6320_REV3_2_VERSION",
336*5113495bSYour Name 	},
337*5113495bSYour Name 	{
338*5113495bSYour Name 		.id = QCA6390_V1,
339*5113495bSYour Name 		.subid = 0x0,
340*5113495bSYour Name 		.name = "QCA6390_V1",
341*5113495bSYour Name 	},
342*5113495bSYour Name 	{
343*5113495bSYour Name 		.id = QCA6490_V1,
344*5113495bSYour Name 		.subid = 0x0,
345*5113495bSYour Name 		.name = "QCA6490_V1",
346*5113495bSYour Name 	},
347*5113495bSYour Name 	{
348*5113495bSYour Name 		.id = WCN3990_v1,
349*5113495bSYour Name 		.subid = 0x0,
350*5113495bSYour Name 		.name = "WCN3990_V1",
351*5113495bSYour Name 	},
352*5113495bSYour Name 	{
353*5113495bSYour Name 		.id = WCN3990_v2,
354*5113495bSYour Name 		.subid = 0x0,
355*5113495bSYour Name 		.name = "WCN3990_V2",
356*5113495bSYour Name 	},
357*5113495bSYour Name 	{
358*5113495bSYour Name 		.id = WCN3990_v2_1,
359*5113495bSYour Name 		.subid = 0x0,
360*5113495bSYour Name 		.name = "WCN3990_V2.1",
361*5113495bSYour Name 	},
362*5113495bSYour Name 	{
363*5113495bSYour Name 		.id = WCN3998,
364*5113495bSYour Name 		.subid = 0x0,
365*5113495bSYour Name 		.name = "WCN3998",
366*5113495bSYour Name 	},
367*5113495bSYour Name 	{
368*5113495bSYour Name 		.id = QCA9379_REV1_VERSION,
369*5113495bSYour Name 		.subid = 0xC,
370*5113495bSYour Name 		.name = "QCA9379_REV1",
371*5113495bSYour Name 	},
372*5113495bSYour Name 	{
373*5113495bSYour Name 		.id = QCA9379_REV1_VERSION,
374*5113495bSYour Name 		.subid = 0xD,
375*5113495bSYour Name 		.name = "QCA9379_REV1_1",
376*5113495bSYour Name 	},
377*5113495bSYour Name 	{
378*5113495bSYour Name 		.id = MANGO_V1,
379*5113495bSYour Name 		.subid = 0xF,
380*5113495bSYour Name 		.name = "MANGO_V1",
381*5113495bSYour Name 	},
382*5113495bSYour Name 	{
383*5113495bSYour Name 		.id = PEACH_V1,
384*5113495bSYour Name 		.subid = 0,
385*5113495bSYour Name 		.name = "PEACH_V1",
386*5113495bSYour Name 	},
387*5113495bSYour Name 
388*5113495bSYour Name 	{
389*5113495bSYour Name 		.id = KIWI_V1,
390*5113495bSYour Name 		.subid = 0,
391*5113495bSYour Name 		.name = "KIWI_V1",
392*5113495bSYour Name 	},
393*5113495bSYour Name 	{
394*5113495bSYour Name 		.id = KIWI_V2,
395*5113495bSYour Name 		.subid = 0,
396*5113495bSYour Name 		.name = "KIWI_V2",
397*5113495bSYour Name 	},
398*5113495bSYour Name 	{
399*5113495bSYour Name 		.id = WCN6750_V1,
400*5113495bSYour Name 		.subid = 0,
401*5113495bSYour Name 		.name = "WCN6750_V1",
402*5113495bSYour Name 	},
403*5113495bSYour Name 	{
404*5113495bSYour Name 		.id = WCN6750_V2,
405*5113495bSYour Name 		.subid = 0,
406*5113495bSYour Name 		.name = "WCN6750_V2",
407*5113495bSYour Name 	},
408*5113495bSYour Name 	{
409*5113495bSYour Name 		.id = WCN6450_V1,
410*5113495bSYour Name 		.subid = 0,
411*5113495bSYour Name 		.name = "WCN6450_V1",
412*5113495bSYour Name 	},
413*5113495bSYour Name 	{
414*5113495bSYour Name 		.id = QCA6490_v2_1,
415*5113495bSYour Name 		.subid = 0,
416*5113495bSYour Name 		.name = "QCA6490",
417*5113495bSYour Name 	},
418*5113495bSYour Name 	{
419*5113495bSYour Name 		.id = QCA6490_v2,
420*5113495bSYour Name 		.subid = 0,
421*5113495bSYour Name 		.name = "QCA6490",
422*5113495bSYour Name 	},
423*5113495bSYour Name 	{
424*5113495bSYour Name 		.id = WCN3990_TALOS,
425*5113495bSYour Name 		.subid = 0,
426*5113495bSYour Name 		.name = "WCN3990",
427*5113495bSYour Name 	},
428*5113495bSYour Name 	{
429*5113495bSYour Name 		.id = WCN3990_MOOREA,
430*5113495bSYour Name 		.subid = 0,
431*5113495bSYour Name 		.name = "WCN3990",
432*5113495bSYour Name 	},
433*5113495bSYour Name 	{
434*5113495bSYour Name 		.id = WCN3990_SAIPAN,
435*5113495bSYour Name 		.subid = 0,
436*5113495bSYour Name 		.name = "WCN3990",
437*5113495bSYour Name 	},
438*5113495bSYour Name 	{
439*5113495bSYour Name 		.id = WCN3990_RENNELL,
440*5113495bSYour Name 		.subid = 0,
441*5113495bSYour Name 		.name = "WCN3990",
442*5113495bSYour Name 	},
443*5113495bSYour Name 	{
444*5113495bSYour Name 		.id = WCN3990_BITRA,
445*5113495bSYour Name 		.subid = 0,
446*5113495bSYour Name 		.name = "WCN3990",
447*5113495bSYour Name 	},
448*5113495bSYour Name 	{
449*5113495bSYour Name 		.id = WCN3990_DIVAR,
450*5113495bSYour Name 		.subid = 0,
451*5113495bSYour Name 		.name = "WCN3990",
452*5113495bSYour Name 	},
453*5113495bSYour Name 	{
454*5113495bSYour Name 		.id = WCN3990_ATHERTON,
455*5113495bSYour Name 		.subid = 0,
456*5113495bSYour Name 		.name = "WCN3990",
457*5113495bSYour Name 	},
458*5113495bSYour Name 	{
459*5113495bSYour Name 		.id = WCN3990_STRAIT,
460*5113495bSYour Name 		.subid = 0,
461*5113495bSYour Name 		.name = "WCN3990",
462*5113495bSYour Name 	},
463*5113495bSYour Name 	{
464*5113495bSYour Name 		.id = WCN3990_NETRANI,
465*5113495bSYour Name 		.subid = 0,
466*5113495bSYour Name 		.name = "WCN3990",
467*5113495bSYour Name 	},
468*5113495bSYour Name 	{
469*5113495bSYour Name 		.id = WCN3990_CLARENCE,
470*5113495bSYour Name 		.subid = 0,
471*5113495bSYour Name 		.name = "WCN3990",
472*5113495bSYour Name 	}
473*5113495bSYour Name };
474*5113495bSYour Name 
475*5113495bSYour Name /**
476*5113495bSYour Name  * hif_get_hw_name(): get a human readable name for the hardware
477*5113495bSYour Name  * @info: Target Info
478*5113495bSYour Name  *
479*5113495bSYour Name  * Return: human readable name for the underlying wifi hardware.
480*5113495bSYour Name  */
hif_get_hw_name(struct hif_target_info * info)481*5113495bSYour Name static const char *hif_get_hw_name(struct hif_target_info *info)
482*5113495bSYour Name {
483*5113495bSYour Name 	int i;
484*5113495bSYour Name 
485*5113495bSYour Name 	hif_debug("target version = %d, target revision = %d",
486*5113495bSYour Name 		  info->target_version,
487*5113495bSYour Name 		  info->target_revision);
488*5113495bSYour Name 
489*5113495bSYour Name 	if (info->hw_name)
490*5113495bSYour Name 		return info->hw_name;
491*5113495bSYour Name 
492*5113495bSYour Name 	for (i = 0; i < ARRAY_SIZE(qwlan_hw_list); i++) {
493*5113495bSYour Name 		if (info->target_version == qwlan_hw_list[i].id &&
494*5113495bSYour Name 		    info->target_revision == qwlan_hw_list[i].subid) {
495*5113495bSYour Name 			return qwlan_hw_list[i].name;
496*5113495bSYour Name 		}
497*5113495bSYour Name 	}
498*5113495bSYour Name 
499*5113495bSYour Name 	info->hw_name = qdf_mem_malloc(64);
500*5113495bSYour Name 	if (!info->hw_name)
501*5113495bSYour Name 		return "Unknown Device (nomem)";
502*5113495bSYour Name 
503*5113495bSYour Name 	i = qdf_snprint(info->hw_name, 64, "HW_VERSION=%x.",
504*5113495bSYour Name 			info->target_version);
505*5113495bSYour Name 	if (i < 0)
506*5113495bSYour Name 		return "Unknown Device (snprintf failure)";
507*5113495bSYour Name 	else
508*5113495bSYour Name 		return info->hw_name;
509*5113495bSYour Name }
510*5113495bSYour Name 
511*5113495bSYour Name /**
512*5113495bSYour Name  * hif_get_hw_info(): hif_get_hw_info
513*5113495bSYour Name  * @scn: scn
514*5113495bSYour Name  * @version: version
515*5113495bSYour Name  * @revision: revision
516*5113495bSYour Name  * @target_name: target name
517*5113495bSYour Name  *
518*5113495bSYour Name  * Return: n/a
519*5113495bSYour Name  */
hif_get_hw_info(struct hif_opaque_softc * scn,u32 * version,u32 * revision,const char ** target_name)520*5113495bSYour Name void hif_get_hw_info(struct hif_opaque_softc *scn, u32 *version, u32 *revision,
521*5113495bSYour Name 			const char **target_name)
522*5113495bSYour Name {
523*5113495bSYour Name 	struct hif_target_info *info = hif_get_target_info_handle(scn);
524*5113495bSYour Name 	struct hif_softc *sc = HIF_GET_SOFTC(scn);
525*5113495bSYour Name 
526*5113495bSYour Name 	if (sc->bus_type == QDF_BUS_TYPE_USB)
527*5113495bSYour Name 		hif_usb_get_hw_info(sc);
528*5113495bSYour Name 
529*5113495bSYour Name 	*version = info->target_version;
530*5113495bSYour Name 	*revision = info->target_revision;
531*5113495bSYour Name 	*target_name = hif_get_hw_name(info);
532*5113495bSYour Name }
533*5113495bSYour Name 
534*5113495bSYour Name /**
535*5113495bSYour Name  * hif_get_dev_ba(): API to get device base address.
536*5113495bSYour Name  * @hif_handle: hif handle
537*5113495bSYour Name  *
538*5113495bSYour Name  * Return: device base address
539*5113495bSYour Name  */
hif_get_dev_ba(struct hif_opaque_softc * hif_handle)540*5113495bSYour Name void *hif_get_dev_ba(struct hif_opaque_softc *hif_handle)
541*5113495bSYour Name {
542*5113495bSYour Name 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
543*5113495bSYour Name 
544*5113495bSYour Name 	return scn->mem;
545*5113495bSYour Name }
546*5113495bSYour Name qdf_export_symbol(hif_get_dev_ba);
547*5113495bSYour Name 
548*5113495bSYour Name /**
549*5113495bSYour Name  * hif_get_dev_ba_ce(): API to get device ce base address.
550*5113495bSYour Name  * @hif_handle: hif handle
551*5113495bSYour Name  *
552*5113495bSYour Name  * Return: dev mem base address for CE
553*5113495bSYour Name  */
hif_get_dev_ba_ce(struct hif_opaque_softc * hif_handle)554*5113495bSYour Name void *hif_get_dev_ba_ce(struct hif_opaque_softc *hif_handle)
555*5113495bSYour Name {
556*5113495bSYour Name 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
557*5113495bSYour Name 
558*5113495bSYour Name 	return scn->mem_ce;
559*5113495bSYour Name }
560*5113495bSYour Name 
561*5113495bSYour Name qdf_export_symbol(hif_get_dev_ba_ce);
562*5113495bSYour Name 
563*5113495bSYour Name /**
564*5113495bSYour Name  * hif_get_dev_ba_pmm(): API to get device pmm base address.
565*5113495bSYour Name  * @hif_handle: scn
566*5113495bSYour Name  *
567*5113495bSYour Name  * Return: dev mem base address for PMM
568*5113495bSYour Name  */
569*5113495bSYour Name 
hif_get_dev_ba_pmm(struct hif_opaque_softc * hif_handle)570*5113495bSYour Name void *hif_get_dev_ba_pmm(struct hif_opaque_softc *hif_handle)
571*5113495bSYour Name {
572*5113495bSYour Name 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
573*5113495bSYour Name 
574*5113495bSYour Name 	return scn->mem_pmm_base;
575*5113495bSYour Name }
576*5113495bSYour Name 
577*5113495bSYour Name qdf_export_symbol(hif_get_dev_ba_pmm);
578*5113495bSYour Name 
hif_get_soc_version(struct hif_opaque_softc * hif_handle)579*5113495bSYour Name uint32_t hif_get_soc_version(struct hif_opaque_softc *hif_handle)
580*5113495bSYour Name {
581*5113495bSYour Name 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
582*5113495bSYour Name 
583*5113495bSYour Name 	return scn->target_info.soc_version;
584*5113495bSYour Name }
585*5113495bSYour Name 
586*5113495bSYour Name qdf_export_symbol(hif_get_soc_version);
587*5113495bSYour Name 
588*5113495bSYour Name /**
589*5113495bSYour Name  * hif_get_dev_ba_cmem(): API to get device ce base address.
590*5113495bSYour Name  * @hif_handle: hif handle
591*5113495bSYour Name  *
592*5113495bSYour Name  * Return: dev mem base address for CMEM
593*5113495bSYour Name  */
hif_get_dev_ba_cmem(struct hif_opaque_softc * hif_handle)594*5113495bSYour Name void *hif_get_dev_ba_cmem(struct hif_opaque_softc *hif_handle)
595*5113495bSYour Name {
596*5113495bSYour Name 	struct hif_softc *scn = (struct hif_softc *)hif_handle;
597*5113495bSYour Name 
598*5113495bSYour Name 	return scn->mem_cmem;
599*5113495bSYour Name }
600*5113495bSYour Name 
601*5113495bSYour Name qdf_export_symbol(hif_get_dev_ba_cmem);
602*5113495bSYour Name 
603*5113495bSYour Name #ifdef FEATURE_RUNTIME_PM
hif_runtime_prevent_linkdown(struct hif_softc * scn,bool is_get)604*5113495bSYour Name void hif_runtime_prevent_linkdown(struct hif_softc *scn, bool is_get)
605*5113495bSYour Name {
606*5113495bSYour Name 	if (is_get)
607*5113495bSYour Name 		qdf_runtime_pm_prevent_suspend(&scn->prevent_linkdown_lock);
608*5113495bSYour Name 	else
609*5113495bSYour Name 		qdf_runtime_pm_allow_suspend(&scn->prevent_linkdown_lock);
610*5113495bSYour Name }
611*5113495bSYour Name 
612*5113495bSYour Name static inline
hif_rtpm_lock_init(struct hif_softc * scn)613*5113495bSYour Name void hif_rtpm_lock_init(struct hif_softc *scn)
614*5113495bSYour Name {
615*5113495bSYour Name 	qdf_runtime_lock_init(&scn->prevent_linkdown_lock);
616*5113495bSYour Name }
617*5113495bSYour Name 
618*5113495bSYour Name static inline
hif_rtpm_lock_deinit(struct hif_softc * scn)619*5113495bSYour Name void hif_rtpm_lock_deinit(struct hif_softc *scn)
620*5113495bSYour Name {
621*5113495bSYour Name 	qdf_runtime_lock_deinit(&scn->prevent_linkdown_lock);
622*5113495bSYour Name }
623*5113495bSYour Name #else
624*5113495bSYour Name static inline
hif_rtpm_lock_init(struct hif_softc * scn)625*5113495bSYour Name void hif_rtpm_lock_init(struct hif_softc *scn)
626*5113495bSYour Name {
627*5113495bSYour Name }
628*5113495bSYour Name 
629*5113495bSYour Name static inline
hif_rtpm_lock_deinit(struct hif_softc * scn)630*5113495bSYour Name void hif_rtpm_lock_deinit(struct hif_softc *scn)
631*5113495bSYour Name {
632*5113495bSYour Name }
633*5113495bSYour Name #endif
634*5113495bSYour Name 
635*5113495bSYour Name #ifdef WLAN_CE_INTERRUPT_THRESHOLD_CONFIG
636*5113495bSYour Name /**
637*5113495bSYour Name  * hif_get_interrupt_threshold_cfg_from_psoc() - Retrieve ini cfg from psoc
638*5113495bSYour Name  * @scn: hif context
639*5113495bSYour Name  * @psoc: psoc objmgr handle
640*5113495bSYour Name  *
641*5113495bSYour Name  * Return: None
642*5113495bSYour Name  */
643*5113495bSYour Name static inline
hif_get_interrupt_threshold_cfg_from_psoc(struct hif_softc * scn,struct wlan_objmgr_psoc * psoc)644*5113495bSYour Name void hif_get_interrupt_threshold_cfg_from_psoc(struct hif_softc *scn,
645*5113495bSYour Name 					       struct wlan_objmgr_psoc *psoc)
646*5113495bSYour Name {
647*5113495bSYour Name 	if (psoc) {
648*5113495bSYour Name 		scn->ini_cfg.ce_status_ring_timer_threshold =
649*5113495bSYour Name 			cfg_get(psoc,
650*5113495bSYour Name 				CFG_CE_STATUS_RING_TIMER_THRESHOLD);
651*5113495bSYour Name 		scn->ini_cfg.ce_status_ring_batch_count_threshold =
652*5113495bSYour Name 			cfg_get(psoc,
653*5113495bSYour Name 				CFG_CE_STATUS_RING_BATCH_COUNT_THRESHOLD);
654*5113495bSYour Name 	}
655*5113495bSYour Name }
656*5113495bSYour Name #else
657*5113495bSYour Name static inline
hif_get_interrupt_threshold_cfg_from_psoc(struct hif_softc * scn,struct wlan_objmgr_psoc * psoc)658*5113495bSYour Name void hif_get_interrupt_threshold_cfg_from_psoc(struct hif_softc *scn,
659*5113495bSYour Name 					       struct wlan_objmgr_psoc *psoc)
660*5113495bSYour Name {
661*5113495bSYour Name }
662*5113495bSYour Name #endif /* WLAN_CE_INTERRUPT_THRESHOLD_CONFIG */
663*5113495bSYour Name 
664*5113495bSYour Name /**
665*5113495bSYour Name  * hif_get_cfg_from_psoc() - Retrieve ini cfg from psoc
666*5113495bSYour Name  * @scn: hif context
667*5113495bSYour Name  * @psoc: psoc objmgr handle
668*5113495bSYour Name  *
669*5113495bSYour Name  * Return: None
670*5113495bSYour Name  */
671*5113495bSYour Name static inline
hif_get_cfg_from_psoc(struct hif_softc * scn,struct wlan_objmgr_psoc * psoc)672*5113495bSYour Name void hif_get_cfg_from_psoc(struct hif_softc *scn,
673*5113495bSYour Name 			   struct wlan_objmgr_psoc *psoc)
674*5113495bSYour Name {
675*5113495bSYour Name 	if (psoc) {
676*5113495bSYour Name 		scn->ini_cfg.disable_wake_irq =
677*5113495bSYour Name 			cfg_get(psoc, CFG_DISABLE_WAKE_IRQ);
678*5113495bSYour Name 		/**
679*5113495bSYour Name 		 * Wake IRQ can't share the same IRQ with the copy engines
680*5113495bSYour Name 		 * In one MSI mode, we don't know whether wake IRQ is triggered
681*5113495bSYour Name 		 * or not in wake IRQ handler. known issue CR 2055359
682*5113495bSYour Name 		 * If you want to support Wake IRQ. Please allocate at least
683*5113495bSYour Name 		 * 2 MSI vector. The first is for wake IRQ while the others
684*5113495bSYour Name 		 * share the second vector
685*5113495bSYour Name 		 */
686*5113495bSYour Name 		if (pld_is_one_msi(scn->qdf_dev->dev)) {
687*5113495bSYour Name 			hif_debug("Disable wake IRQ once it is one MSI mode");
688*5113495bSYour Name 			scn->ini_cfg.disable_wake_irq = true;
689*5113495bSYour Name 		}
690*5113495bSYour Name 		hif_get_interrupt_threshold_cfg_from_psoc(scn, psoc);
691*5113495bSYour Name 	}
692*5113495bSYour Name }
693*5113495bSYour Name 
694*5113495bSYour Name #if defined(HIF_CE_LOG_INFO) || defined(HIF_BUS_LOG_INFO)
695*5113495bSYour Name /**
696*5113495bSYour Name  * hif_recovery_notifier_cb - Recovery notifier callback to log
697*5113495bSYour Name  *  hang event data
698*5113495bSYour Name  * @block: notifier block
699*5113495bSYour Name  * @state: state
700*5113495bSYour Name  * @data: notifier data
701*5113495bSYour Name  *
702*5113495bSYour Name  * Return: status
703*5113495bSYour Name  */
704*5113495bSYour Name static
hif_recovery_notifier_cb(struct notifier_block * block,unsigned long state,void * data)705*5113495bSYour Name int hif_recovery_notifier_cb(struct notifier_block *block, unsigned long state,
706*5113495bSYour Name 			     void *data)
707*5113495bSYour Name {
708*5113495bSYour Name 	struct qdf_notifer_data *notif_data = data;
709*5113495bSYour Name 	qdf_notif_block *notif_block;
710*5113495bSYour Name 	struct hif_softc *hif_handle;
711*5113495bSYour Name 	bool bus_id_invalid;
712*5113495bSYour Name 
713*5113495bSYour Name 	if (!data || !block)
714*5113495bSYour Name 		return -EINVAL;
715*5113495bSYour Name 
716*5113495bSYour Name 	notif_block = qdf_container_of(block, qdf_notif_block, notif_block);
717*5113495bSYour Name 
718*5113495bSYour Name 	hif_handle = notif_block->priv_data;
719*5113495bSYour Name 	if (!hif_handle)
720*5113495bSYour Name 		return -EINVAL;
721*5113495bSYour Name 
722*5113495bSYour Name 	bus_id_invalid = hif_log_bus_info(hif_handle, notif_data->hang_data,
723*5113495bSYour Name 					  &notif_data->offset);
724*5113495bSYour Name 	if (bus_id_invalid)
725*5113495bSYour Name 		return NOTIFY_STOP_MASK;
726*5113495bSYour Name 
727*5113495bSYour Name 	hif_log_ce_info(hif_handle, notif_data->hang_data,
728*5113495bSYour Name 			&notif_data->offset);
729*5113495bSYour Name 
730*5113495bSYour Name 	return 0;
731*5113495bSYour Name }
732*5113495bSYour Name 
733*5113495bSYour Name /**
734*5113495bSYour Name  * hif_register_recovery_notifier - Register hif recovery notifier
735*5113495bSYour Name  * @hif_handle: hif handle
736*5113495bSYour Name  *
737*5113495bSYour Name  * Return: status
738*5113495bSYour Name  */
739*5113495bSYour Name static
hif_register_recovery_notifier(struct hif_softc * hif_handle)740*5113495bSYour Name QDF_STATUS hif_register_recovery_notifier(struct hif_softc *hif_handle)
741*5113495bSYour Name {
742*5113495bSYour Name 	qdf_notif_block *hif_notifier;
743*5113495bSYour Name 
744*5113495bSYour Name 	if (!hif_handle)
745*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
746*5113495bSYour Name 
747*5113495bSYour Name 	hif_notifier = &hif_handle->hif_recovery_notifier;
748*5113495bSYour Name 
749*5113495bSYour Name 	hif_notifier->notif_block.notifier_call = hif_recovery_notifier_cb;
750*5113495bSYour Name 	hif_notifier->priv_data = hif_handle;
751*5113495bSYour Name 	return qdf_hang_event_register_notifier(hif_notifier);
752*5113495bSYour Name }
753*5113495bSYour Name 
754*5113495bSYour Name /**
755*5113495bSYour Name  * hif_unregister_recovery_notifier - Un-register hif recovery notifier
756*5113495bSYour Name  * @hif_handle: hif handle
757*5113495bSYour Name  *
758*5113495bSYour Name  * Return: status
759*5113495bSYour Name  */
760*5113495bSYour Name static
hif_unregister_recovery_notifier(struct hif_softc * hif_handle)761*5113495bSYour Name QDF_STATUS hif_unregister_recovery_notifier(struct hif_softc *hif_handle)
762*5113495bSYour Name {
763*5113495bSYour Name 	qdf_notif_block *hif_notifier = &hif_handle->hif_recovery_notifier;
764*5113495bSYour Name 
765*5113495bSYour Name 	return qdf_hang_event_unregister_notifier(hif_notifier);
766*5113495bSYour Name }
767*5113495bSYour Name #else
768*5113495bSYour Name static inline
hif_register_recovery_notifier(struct hif_softc * hif_handle)769*5113495bSYour Name QDF_STATUS hif_register_recovery_notifier(struct hif_softc *hif_handle)
770*5113495bSYour Name {
771*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
772*5113495bSYour Name }
773*5113495bSYour Name 
774*5113495bSYour Name static inline
hif_unregister_recovery_notifier(struct hif_softc * hif_handle)775*5113495bSYour Name QDF_STATUS hif_unregister_recovery_notifier(struct hif_softc *hif_handle)
776*5113495bSYour Name {
777*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
778*5113495bSYour Name }
779*5113495bSYour Name #endif
780*5113495bSYour Name 
781*5113495bSYour Name #if defined(HIF_CPU_PERF_AFFINE_MASK) || \
782*5113495bSYour Name 	defined(FEATURE_ENABLE_CE_DP_IRQ_AFFINE)
783*5113495bSYour Name /**
784*5113495bSYour Name  * __hif_cpu_hotplug_notify() - CPU hotplug event handler
785*5113495bSYour Name  * @context: HIF context
786*5113495bSYour Name  * @cpu: CPU Id of the CPU generating the event
787*5113495bSYour Name  * @cpu_up: true if the CPU is online
788*5113495bSYour Name  *
789*5113495bSYour Name  * Return: None
790*5113495bSYour Name  */
__hif_cpu_hotplug_notify(void * context,uint32_t cpu,bool cpu_up)791*5113495bSYour Name static void __hif_cpu_hotplug_notify(void *context,
792*5113495bSYour Name 				     uint32_t cpu, bool cpu_up)
793*5113495bSYour Name {
794*5113495bSYour Name 	struct hif_softc *scn = context;
795*5113495bSYour Name 
796*5113495bSYour Name 	if (!scn)
797*5113495bSYour Name 		return;
798*5113495bSYour Name 	if (hif_is_driver_unloading(scn) || hif_is_recovery_in_progress(scn))
799*5113495bSYour Name 		return;
800*5113495bSYour Name 
801*5113495bSYour Name 	if (cpu_up) {
802*5113495bSYour Name 		hif_config_irq_set_perf_affinity_hint(GET_HIF_OPAQUE_HDL(scn));
803*5113495bSYour Name 		hif_debug("Setting affinity for online CPU: %d", cpu);
804*5113495bSYour Name 	} else {
805*5113495bSYour Name 		hif_debug("Skip setting affinity for offline CPU: %d", cpu);
806*5113495bSYour Name 	}
807*5113495bSYour Name }
808*5113495bSYour Name 
809*5113495bSYour Name /**
810*5113495bSYour Name  * hif_cpu_hotplug_notify - cpu core up/down notification
811*5113495bSYour Name  * handler
812*5113495bSYour Name  * @context: HIF context
813*5113495bSYour Name  * @cpu: CPU generating the event
814*5113495bSYour Name  * @cpu_up: true if the CPU is online
815*5113495bSYour Name  *
816*5113495bSYour Name  * Return: None
817*5113495bSYour Name  */
hif_cpu_hotplug_notify(void * context,uint32_t cpu,bool cpu_up)818*5113495bSYour Name static void hif_cpu_hotplug_notify(void *context, uint32_t cpu, bool cpu_up)
819*5113495bSYour Name {
820*5113495bSYour Name 	struct qdf_op_sync *op_sync;
821*5113495bSYour Name 
822*5113495bSYour Name 	if (qdf_op_protect(&op_sync))
823*5113495bSYour Name 		return;
824*5113495bSYour Name 
825*5113495bSYour Name 	__hif_cpu_hotplug_notify(context, cpu, cpu_up);
826*5113495bSYour Name 
827*5113495bSYour Name 	qdf_op_unprotect(op_sync);
828*5113495bSYour Name }
829*5113495bSYour Name 
hif_cpu_online_cb(void * context,uint32_t cpu)830*5113495bSYour Name static void hif_cpu_online_cb(void *context, uint32_t cpu)
831*5113495bSYour Name {
832*5113495bSYour Name 	hif_cpu_hotplug_notify(context, cpu, true);
833*5113495bSYour Name }
834*5113495bSYour Name 
hif_cpu_before_offline_cb(void * context,uint32_t cpu)835*5113495bSYour Name static void hif_cpu_before_offline_cb(void *context, uint32_t cpu)
836*5113495bSYour Name {
837*5113495bSYour Name 	hif_cpu_hotplug_notify(context, cpu, false);
838*5113495bSYour Name }
839*5113495bSYour Name 
hif_cpuhp_register(struct hif_softc * scn)840*5113495bSYour Name static void hif_cpuhp_register(struct hif_softc *scn)
841*5113495bSYour Name {
842*5113495bSYour Name 	if (!scn) {
843*5113495bSYour Name 		hif_info_high("cannot register hotplug notifiers");
844*5113495bSYour Name 		return;
845*5113495bSYour Name 	}
846*5113495bSYour Name 	qdf_cpuhp_register(&scn->cpuhp_event_handle,
847*5113495bSYour Name 			   scn,
848*5113495bSYour Name 			   hif_cpu_online_cb,
849*5113495bSYour Name 			   hif_cpu_before_offline_cb);
850*5113495bSYour Name }
851*5113495bSYour Name 
hif_cpuhp_unregister(struct hif_softc * scn)852*5113495bSYour Name static void hif_cpuhp_unregister(struct hif_softc *scn)
853*5113495bSYour Name {
854*5113495bSYour Name 	if (!scn) {
855*5113495bSYour Name 		hif_info_high("cannot unregister hotplug notifiers");
856*5113495bSYour Name 		return;
857*5113495bSYour Name 	}
858*5113495bSYour Name 	qdf_cpuhp_unregister(&scn->cpuhp_event_handle);
859*5113495bSYour Name }
860*5113495bSYour Name 
861*5113495bSYour Name #else
hif_cpuhp_register(struct hif_softc * scn)862*5113495bSYour Name static void hif_cpuhp_register(struct hif_softc *scn)
863*5113495bSYour Name {
864*5113495bSYour Name }
865*5113495bSYour Name 
hif_cpuhp_unregister(struct hif_softc * scn)866*5113495bSYour Name static void hif_cpuhp_unregister(struct hif_softc *scn)
867*5113495bSYour Name {
868*5113495bSYour Name }
869*5113495bSYour Name #endif /* ifdef HIF_CPU_PERF_AFFINE_MASK */
870*5113495bSYour Name 
871*5113495bSYour Name #ifdef HIF_DETECTION_LATENCY_ENABLE
872*5113495bSYour Name /*
873*5113495bSYour Name  * Bitmask to control enablement of latency detection for the tasklets,
874*5113495bSYour Name  * bit-X represents for tasklet of WLAN_CE_X.
875*5113495bSYour Name  */
876*5113495bSYour Name #ifndef DETECTION_LATENCY_TASKLET_MASK
877*5113495bSYour Name #define DETECTION_LATENCY_TASKLET_MASK (BIT(2) | BIT(7))
878*5113495bSYour Name #endif
879*5113495bSYour Name 
880*5113495bSYour Name static inline int
__hif_tasklet_latency(struct hif_softc * scn,bool from_timer,int idx)881*5113495bSYour Name __hif_tasklet_latency(struct hif_softc *scn, bool from_timer, int idx)
882*5113495bSYour Name {
883*5113495bSYour Name 	qdf_time_t sched_time =
884*5113495bSYour Name 		scn->latency_detect.tasklet_info[idx].sched_time;
885*5113495bSYour Name 	qdf_time_t exec_time =
886*5113495bSYour Name 		scn->latency_detect.tasklet_info[idx].exec_time;
887*5113495bSYour Name 	qdf_time_t curr_time = qdf_system_ticks();
888*5113495bSYour Name 	uint32_t threshold = scn->latency_detect.threshold;
889*5113495bSYour Name 	qdf_time_t expect_exec_time =
890*5113495bSYour Name 		sched_time + qdf_system_msecs_to_ticks(threshold);
891*5113495bSYour Name 
892*5113495bSYour Name 	/* 2 kinds of check here.
893*5113495bSYour Name 	 * from_timer==true:  check if tasklet stall
894*5113495bSYour Name 	 * from_timer==false: check tasklet execute comes late
895*5113495bSYour Name 	 */
896*5113495bSYour Name 	if (from_timer ?
897*5113495bSYour Name 	    (qdf_system_time_after(sched_time, exec_time) &&
898*5113495bSYour Name 	     qdf_system_time_after(curr_time, expect_exec_time)) :
899*5113495bSYour Name 	    qdf_system_time_after(exec_time, expect_exec_time)) {
900*5113495bSYour Name 		hif_err("tasklet[%d] latency detected: from_timer %d, curr_time %lu, sched_time %lu, exec_time %lu, threshold %ums, timeout %ums, cpu_id %d, called: %ps",
901*5113495bSYour Name 			idx, from_timer, curr_time, sched_time,
902*5113495bSYour Name 			exec_time, threshold,
903*5113495bSYour Name 			scn->latency_detect.timeout,
904*5113495bSYour Name 			qdf_get_cpu(), (void *)_RET_IP_);
905*5113495bSYour Name 		qdf_trigger_self_recovery(NULL,
906*5113495bSYour Name 					  QDF_TASKLET_CREDIT_LATENCY_DETECT);
907*5113495bSYour Name 		return -ETIMEDOUT;
908*5113495bSYour Name 	}
909*5113495bSYour Name 
910*5113495bSYour Name 	return 0;
911*5113495bSYour Name }
912*5113495bSYour Name 
913*5113495bSYour Name /**
914*5113495bSYour Name  * hif_tasklet_latency_detect_enabled() - check whether latency detect
915*5113495bSYour Name  * is enabled for the tasklet which is specified by idx
916*5113495bSYour Name  * @scn: HIF opaque context
917*5113495bSYour Name  * @idx: CE id
918*5113495bSYour Name  *
919*5113495bSYour Name  * Return: true if latency detect is enabled for the specified tasklet,
920*5113495bSYour Name  * false otherwise.
921*5113495bSYour Name  */
922*5113495bSYour Name static inline bool
hif_tasklet_latency_detect_enabled(struct hif_softc * scn,int idx)923*5113495bSYour Name hif_tasklet_latency_detect_enabled(struct hif_softc *scn, int idx)
924*5113495bSYour Name {
925*5113495bSYour Name 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
926*5113495bSYour Name 		return false;
927*5113495bSYour Name 
928*5113495bSYour Name 	if (!scn->latency_detect.enable_detection)
929*5113495bSYour Name 		return false;
930*5113495bSYour Name 
931*5113495bSYour Name 	if (idx < 0 || idx >= HIF_TASKLET_IN_MONITOR ||
932*5113495bSYour Name 	    !qdf_test_bit(idx, scn->latency_detect.tasklet_bmap))
933*5113495bSYour Name 		return false;
934*5113495bSYour Name 
935*5113495bSYour Name 	return true;
936*5113495bSYour Name }
937*5113495bSYour Name 
hif_tasklet_latency_record_exec(struct hif_softc * scn,int idx)938*5113495bSYour Name void hif_tasklet_latency_record_exec(struct hif_softc *scn, int idx)
939*5113495bSYour Name {
940*5113495bSYour Name 	if (!hif_tasklet_latency_detect_enabled(scn, idx))
941*5113495bSYour Name 		return;
942*5113495bSYour Name 
943*5113495bSYour Name 	/*
944*5113495bSYour Name 	 * hif_set_enable_detection(true) might come between
945*5113495bSYour Name 	 * hif_tasklet_latency_record_sched() and
946*5113495bSYour Name 	 * hif_tasklet_latency_record_exec() during wlan startup, then the
947*5113495bSYour Name 	 * sched_time is 0 but exec_time is not, and hit the timeout case in
948*5113495bSYour Name 	 * __hif_tasklet_latency().
949*5113495bSYour Name 	 * To avoid such issue, skip exec_time recording if sched_time has not
950*5113495bSYour Name 	 * been recorded.
951*5113495bSYour Name 	 */
952*5113495bSYour Name 	if (!scn->latency_detect.tasklet_info[idx].sched_time)
953*5113495bSYour Name 		return;
954*5113495bSYour Name 
955*5113495bSYour Name 	scn->latency_detect.tasklet_info[idx].exec_time = qdf_system_ticks();
956*5113495bSYour Name 	__hif_tasklet_latency(scn, false, idx);
957*5113495bSYour Name }
958*5113495bSYour Name 
hif_tasklet_latency_record_sched(struct hif_softc * scn,int idx)959*5113495bSYour Name void hif_tasklet_latency_record_sched(struct hif_softc *scn, int idx)
960*5113495bSYour Name {
961*5113495bSYour Name 	if (!hif_tasklet_latency_detect_enabled(scn, idx))
962*5113495bSYour Name 		return;
963*5113495bSYour Name 
964*5113495bSYour Name 	scn->latency_detect.tasklet_info[idx].sched_cpuid = qdf_get_cpu();
965*5113495bSYour Name 	scn->latency_detect.tasklet_info[idx].sched_time = qdf_system_ticks();
966*5113495bSYour Name }
967*5113495bSYour Name 
hif_credit_latency(struct hif_softc * scn,bool from_timer)968*5113495bSYour Name static inline void hif_credit_latency(struct hif_softc *scn, bool from_timer)
969*5113495bSYour Name {
970*5113495bSYour Name 	qdf_time_t credit_request_time =
971*5113495bSYour Name 		scn->latency_detect.credit_request_time;
972*5113495bSYour Name 	qdf_time_t credit_report_time = scn->latency_detect.credit_report_time;
973*5113495bSYour Name 	qdf_time_t curr_jiffies = qdf_system_ticks();
974*5113495bSYour Name 	uint32_t threshold = scn->latency_detect.threshold;
975*5113495bSYour Name 	int cpu_id = qdf_get_cpu();
976*5113495bSYour Name 
977*5113495bSYour Name 	/* 2 kinds of check here.
978*5113495bSYour Name 	 * from_timer==true:  check if credit report stall
979*5113495bSYour Name 	 * from_timer==false: check credit report comes late
980*5113495bSYour Name 	 */
981*5113495bSYour Name 
982*5113495bSYour Name 	if ((from_timer ?
983*5113495bSYour Name 	     qdf_system_time_after(credit_request_time, credit_report_time) :
984*5113495bSYour Name 	     qdf_system_time_after(credit_report_time, credit_request_time)) &&
985*5113495bSYour Name 	    qdf_system_time_after(curr_jiffies,
986*5113495bSYour Name 				  credit_request_time +
987*5113495bSYour Name 				  qdf_system_msecs_to_ticks(threshold))) {
988*5113495bSYour Name 		hif_err("credit report latency: from timer %d, curr_jiffies %lu, credit_request_time %lu, credit_report_time %lu, threshold %ums, timeout %ums, cpu_id %d, called: %ps",
989*5113495bSYour Name 			from_timer, curr_jiffies, credit_request_time,
990*5113495bSYour Name 			credit_report_time, threshold,
991*5113495bSYour Name 			scn->latency_detect.timeout,
992*5113495bSYour Name 			cpu_id, (void *)_RET_IP_);
993*5113495bSYour Name 		goto latency;
994*5113495bSYour Name 	}
995*5113495bSYour Name 	return;
996*5113495bSYour Name 
997*5113495bSYour Name latency:
998*5113495bSYour Name 	qdf_trigger_self_recovery(NULL, QDF_TASKLET_CREDIT_LATENCY_DETECT);
999*5113495bSYour Name }
1000*5113495bSYour Name 
hif_tasklet_latency(struct hif_softc * scn,bool from_timer)1001*5113495bSYour Name static inline void hif_tasklet_latency(struct hif_softc *scn, bool from_timer)
1002*5113495bSYour Name {
1003*5113495bSYour Name 	int i, ret;
1004*5113495bSYour Name 
1005*5113495bSYour Name 	for (i = 0; i < HIF_TASKLET_IN_MONITOR; i++) {
1006*5113495bSYour Name 		if (!qdf_test_bit(i, scn->latency_detect.tasklet_bmap))
1007*5113495bSYour Name 			continue;
1008*5113495bSYour Name 
1009*5113495bSYour Name 		ret = __hif_tasklet_latency(scn, from_timer, i);
1010*5113495bSYour Name 		if (ret)
1011*5113495bSYour Name 			return;
1012*5113495bSYour Name 	}
1013*5113495bSYour Name }
1014*5113495bSYour Name 
1015*5113495bSYour Name /**
1016*5113495bSYour Name  * hif_check_detection_latency(): to check if latency for tasklet/credit
1017*5113495bSYour Name  *
1018*5113495bSYour Name  * @scn: hif context
1019*5113495bSYour Name  * @from_timer: if called from timer handler
1020*5113495bSYour Name  * @bitmap_type: indicate if check tasklet or credit
1021*5113495bSYour Name  *
1022*5113495bSYour Name  * Return: none
1023*5113495bSYour Name  */
hif_check_detection_latency(struct hif_softc * scn,bool from_timer,uint32_t bitmap_type)1024*5113495bSYour Name void hif_check_detection_latency(struct hif_softc *scn,
1025*5113495bSYour Name 				 bool from_timer,
1026*5113495bSYour Name 				 uint32_t bitmap_type)
1027*5113495bSYour Name {
1028*5113495bSYour Name 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
1029*5113495bSYour Name 		return;
1030*5113495bSYour Name 
1031*5113495bSYour Name 	if (!scn->latency_detect.enable_detection)
1032*5113495bSYour Name 		return;
1033*5113495bSYour Name 
1034*5113495bSYour Name 	if (bitmap_type & BIT(HIF_DETECT_TASKLET))
1035*5113495bSYour Name 		hif_tasklet_latency(scn, from_timer);
1036*5113495bSYour Name 
1037*5113495bSYour Name 	if (bitmap_type & BIT(HIF_DETECT_CREDIT))
1038*5113495bSYour Name 		hif_credit_latency(scn, from_timer);
1039*5113495bSYour Name }
1040*5113495bSYour Name 
hif_latency_detect_timeout_handler(void * arg)1041*5113495bSYour Name static void hif_latency_detect_timeout_handler(void *arg)
1042*5113495bSYour Name {
1043*5113495bSYour Name 	struct hif_softc *scn = (struct hif_softc *)arg;
1044*5113495bSYour Name 	int next_cpu, i;
1045*5113495bSYour Name 	qdf_cpu_mask cpu_mask = {0};
1046*5113495bSYour Name 	struct hif_latency_detect *detect = &scn->latency_detect;
1047*5113495bSYour Name 
1048*5113495bSYour Name 	hif_check_detection_latency(scn, true,
1049*5113495bSYour Name 				    BIT(HIF_DETECT_TASKLET) |
1050*5113495bSYour Name 				    BIT(HIF_DETECT_CREDIT));
1051*5113495bSYour Name 
1052*5113495bSYour Name 	/* it need to make sure timer start on a different cpu,
1053*5113495bSYour Name 	 * so it can detect the tasklet schedule stall, but there
1054*5113495bSYour Name 	 * is still chance that, after timer has been started, then
1055*5113495bSYour Name 	 * irq/tasklet happens on the same cpu, then tasklet will
1056*5113495bSYour Name 	 * execute before softirq timer, if this tasklet stall, the
1057*5113495bSYour Name 	 * timer can't detect it, we can accept this as a limitation,
1058*5113495bSYour Name 	 * if tasklet stall, anyway other place will detect it, just
1059*5113495bSYour Name 	 * a little later.
1060*5113495bSYour Name 	 */
1061*5113495bSYour Name 	qdf_cpumask_copy(&cpu_mask, (const qdf_cpu_mask *)cpu_active_mask);
1062*5113495bSYour Name 	for (i = 0; i < HIF_TASKLET_IN_MONITOR; i++) {
1063*5113495bSYour Name 		if (!qdf_test_bit(i, detect->tasklet_bmap))
1064*5113495bSYour Name 			continue;
1065*5113495bSYour Name 
1066*5113495bSYour Name 		qdf_cpumask_clear_cpu(detect->tasklet_info[i].sched_cpuid,
1067*5113495bSYour Name 				      &cpu_mask);
1068*5113495bSYour Name 	}
1069*5113495bSYour Name 
1070*5113495bSYour Name 	next_cpu = cpumask_first(&cpu_mask);
1071*5113495bSYour Name 	if (qdf_unlikely(next_cpu >= nr_cpu_ids)) {
1072*5113495bSYour Name 		hif_debug("start timer on local");
1073*5113495bSYour Name 		/* it doesn't found a available cpu, start on local cpu*/
1074*5113495bSYour Name 		qdf_timer_mod(&detect->timer, detect->timeout);
1075*5113495bSYour Name 	} else {
1076*5113495bSYour Name 		qdf_timer_start_on(&detect->timer, detect->timeout, next_cpu);
1077*5113495bSYour Name 	}
1078*5113495bSYour Name }
1079*5113495bSYour Name 
hif_latency_detect_timer_init(struct hif_softc * scn)1080*5113495bSYour Name static void hif_latency_detect_timer_init(struct hif_softc *scn)
1081*5113495bSYour Name {
1082*5113495bSYour Name 	scn->latency_detect.timeout =
1083*5113495bSYour Name 		DETECTION_TIMER_TIMEOUT;
1084*5113495bSYour Name 	scn->latency_detect.threshold =
1085*5113495bSYour Name 		DETECTION_LATENCY_THRESHOLD;
1086*5113495bSYour Name 
1087*5113495bSYour Name 	hif_info("timer timeout %u, latency threshold %u",
1088*5113495bSYour Name 		 scn->latency_detect.timeout,
1089*5113495bSYour Name 		 scn->latency_detect.threshold);
1090*5113495bSYour Name 
1091*5113495bSYour Name 	scn->latency_detect.is_timer_started = false;
1092*5113495bSYour Name 
1093*5113495bSYour Name 	qdf_timer_init(NULL,
1094*5113495bSYour Name 		       &scn->latency_detect.timer,
1095*5113495bSYour Name 		       &hif_latency_detect_timeout_handler,
1096*5113495bSYour Name 		       scn,
1097*5113495bSYour Name 		       QDF_TIMER_TYPE_SW_SPIN);
1098*5113495bSYour Name }
1099*5113495bSYour Name 
hif_latency_detect_timer_deinit(struct hif_softc * scn)1100*5113495bSYour Name static void hif_latency_detect_timer_deinit(struct hif_softc *scn)
1101*5113495bSYour Name {
1102*5113495bSYour Name 	hif_info("deinit timer");
1103*5113495bSYour Name 	qdf_timer_free(&scn->latency_detect.timer);
1104*5113495bSYour Name }
1105*5113495bSYour Name 
hif_latency_detect_init(struct hif_softc * scn)1106*5113495bSYour Name static void hif_latency_detect_init(struct hif_softc *scn)
1107*5113495bSYour Name {
1108*5113495bSYour Name 	uint32_t tasklet_mask;
1109*5113495bSYour Name 	int i;
1110*5113495bSYour Name 
1111*5113495bSYour Name 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
1112*5113495bSYour Name 		return;
1113*5113495bSYour Name 
1114*5113495bSYour Name 	tasklet_mask = DETECTION_LATENCY_TASKLET_MASK;
1115*5113495bSYour Name 	hif_info("tasklet mask is 0x%x", tasklet_mask);
1116*5113495bSYour Name 	for (i = 0; i < HIF_TASKLET_IN_MONITOR; i++) {
1117*5113495bSYour Name 		if (BIT(i) & tasklet_mask)
1118*5113495bSYour Name 			qdf_set_bit(i, scn->latency_detect.tasklet_bmap);
1119*5113495bSYour Name 	}
1120*5113495bSYour Name 
1121*5113495bSYour Name 	hif_latency_detect_timer_init(scn);
1122*5113495bSYour Name }
1123*5113495bSYour Name 
hif_latency_detect_deinit(struct hif_softc * scn)1124*5113495bSYour Name static void hif_latency_detect_deinit(struct hif_softc *scn)
1125*5113495bSYour Name {
1126*5113495bSYour Name 	int i;
1127*5113495bSYour Name 
1128*5113495bSYour Name 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
1129*5113495bSYour Name 		return;
1130*5113495bSYour Name 
1131*5113495bSYour Name 	hif_latency_detect_timer_deinit(scn);
1132*5113495bSYour Name 	for (i = 0; i < HIF_TASKLET_IN_MONITOR; i++)
1133*5113495bSYour Name 		qdf_clear_bit(i, scn->latency_detect.tasklet_bmap);
1134*5113495bSYour Name }
1135*5113495bSYour Name 
hif_latency_detect_timer_start(struct hif_opaque_softc * hif_ctx)1136*5113495bSYour Name void hif_latency_detect_timer_start(struct hif_opaque_softc *hif_ctx)
1137*5113495bSYour Name {
1138*5113495bSYour Name 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1139*5113495bSYour Name 
1140*5113495bSYour Name 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
1141*5113495bSYour Name 		return;
1142*5113495bSYour Name 
1143*5113495bSYour Name 	hif_debug_rl("start timer");
1144*5113495bSYour Name 	if (scn->latency_detect.is_timer_started) {
1145*5113495bSYour Name 		hif_info("timer has been started");
1146*5113495bSYour Name 		return;
1147*5113495bSYour Name 	}
1148*5113495bSYour Name 
1149*5113495bSYour Name 	qdf_timer_start(&scn->latency_detect.timer,
1150*5113495bSYour Name 			scn->latency_detect.timeout);
1151*5113495bSYour Name 	scn->latency_detect.is_timer_started = true;
1152*5113495bSYour Name }
1153*5113495bSYour Name 
hif_latency_detect_timer_stop(struct hif_opaque_softc * hif_ctx)1154*5113495bSYour Name void hif_latency_detect_timer_stop(struct hif_opaque_softc *hif_ctx)
1155*5113495bSYour Name {
1156*5113495bSYour Name 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1157*5113495bSYour Name 
1158*5113495bSYour Name 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
1159*5113495bSYour Name 		return;
1160*5113495bSYour Name 
1161*5113495bSYour Name 	hif_debug_rl("stop timer");
1162*5113495bSYour Name 
1163*5113495bSYour Name 	qdf_timer_sync_cancel(&scn->latency_detect.timer);
1164*5113495bSYour Name 	scn->latency_detect.is_timer_started = false;
1165*5113495bSYour Name }
1166*5113495bSYour Name 
hif_latency_detect_credit_record_time(enum hif_credit_exchange_type type,struct hif_opaque_softc * hif_ctx)1167*5113495bSYour Name void hif_latency_detect_credit_record_time(
1168*5113495bSYour Name 	enum hif_credit_exchange_type type,
1169*5113495bSYour Name 	struct hif_opaque_softc *hif_ctx)
1170*5113495bSYour Name {
1171*5113495bSYour Name 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1172*5113495bSYour Name 
1173*5113495bSYour Name 	if (!scn) {
1174*5113495bSYour Name 		hif_err("Could not do runtime put, scn is null");
1175*5113495bSYour Name 		return;
1176*5113495bSYour Name 	}
1177*5113495bSYour Name 
1178*5113495bSYour Name 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
1179*5113495bSYour Name 		return;
1180*5113495bSYour Name 
1181*5113495bSYour Name 	if (HIF_REQUEST_CREDIT == type)
1182*5113495bSYour Name 		scn->latency_detect.credit_request_time = qdf_system_ticks();
1183*5113495bSYour Name 	else if (HIF_PROCESS_CREDIT_REPORT == type)
1184*5113495bSYour Name 		scn->latency_detect.credit_report_time = qdf_system_ticks();
1185*5113495bSYour Name 
1186*5113495bSYour Name 	hif_check_detection_latency(scn, false, BIT(HIF_DETECT_CREDIT));
1187*5113495bSYour Name }
1188*5113495bSYour Name 
hif_set_enable_detection(struct hif_opaque_softc * hif_ctx,bool value)1189*5113495bSYour Name void hif_set_enable_detection(struct hif_opaque_softc *hif_ctx, bool value)
1190*5113495bSYour Name {
1191*5113495bSYour Name 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1192*5113495bSYour Name 
1193*5113495bSYour Name 	if (!scn) {
1194*5113495bSYour Name 		hif_err("Could not do runtime put, scn is null");
1195*5113495bSYour Name 		return;
1196*5113495bSYour Name 	}
1197*5113495bSYour Name 
1198*5113495bSYour Name 	if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
1199*5113495bSYour Name 		return;
1200*5113495bSYour Name 
1201*5113495bSYour Name 	scn->latency_detect.enable_detection = value;
1202*5113495bSYour Name }
1203*5113495bSYour Name #else
hif_latency_detect_init(struct hif_softc * scn)1204*5113495bSYour Name static inline void hif_latency_detect_init(struct hif_softc *scn)
1205*5113495bSYour Name {}
1206*5113495bSYour Name 
hif_latency_detect_deinit(struct hif_softc * scn)1207*5113495bSYour Name static inline void hif_latency_detect_deinit(struct hif_softc *scn)
1208*5113495bSYour Name {}
1209*5113495bSYour Name #endif
1210*5113495bSYour Name 
1211*5113495bSYour Name #ifdef WLAN_FEATURE_AFFINITY_MGR
1212*5113495bSYour Name #define AFFINITY_THRESHOLD 5000000
1213*5113495bSYour Name static inline void
hif_affinity_mgr_init(struct hif_softc * scn,struct wlan_objmgr_psoc * psoc)1214*5113495bSYour Name hif_affinity_mgr_init(struct hif_softc *scn, struct wlan_objmgr_psoc *psoc)
1215*5113495bSYour Name {
1216*5113495bSYour Name 	unsigned int cpus;
1217*5113495bSYour Name 	qdf_cpu_mask allowed_mask = {0};
1218*5113495bSYour Name 
1219*5113495bSYour Name 	scn->affinity_mgr_supported =
1220*5113495bSYour Name 		(cfg_get(psoc, CFG_IRQ_AFFINE_AUDIO_USE_CASE) &&
1221*5113495bSYour Name 		qdf_walt_get_cpus_taken_supported());
1222*5113495bSYour Name 
1223*5113495bSYour Name 	hif_info("Affinity Manager supported: %d", scn->affinity_mgr_supported);
1224*5113495bSYour Name 
1225*5113495bSYour Name 	if (!scn->affinity_mgr_supported)
1226*5113495bSYour Name 		return;
1227*5113495bSYour Name 
1228*5113495bSYour Name 	scn->time_threshold = AFFINITY_THRESHOLD;
1229*5113495bSYour Name 	qdf_for_each_possible_cpu(cpus)
1230*5113495bSYour Name 		if (qdf_topology_physical_package_id(cpus) ==
1231*5113495bSYour Name 			CPU_CLUSTER_TYPE_LITTLE)
1232*5113495bSYour Name 			qdf_cpumask_set_cpu(cpus, &allowed_mask);
1233*5113495bSYour Name 	qdf_cpumask_copy(&scn->allowed_mask, &allowed_mask);
1234*5113495bSYour Name }
1235*5113495bSYour Name #else
1236*5113495bSYour Name static inline void
hif_affinity_mgr_init(struct hif_softc * scn,struct wlan_objmgr_psoc * psoc)1237*5113495bSYour Name hif_affinity_mgr_init(struct hif_softc *scn, struct wlan_objmgr_psoc *psoc)
1238*5113495bSYour Name {
1239*5113495bSYour Name }
1240*5113495bSYour Name #endif
1241*5113495bSYour Name 
1242*5113495bSYour Name #ifdef FEATURE_DIRECT_LINK
1243*5113495bSYour Name /**
1244*5113495bSYour Name  * hif_init_direct_link_rcv_pipe_num(): Initialize the direct link receive
1245*5113495bSYour Name  *  pipe number
1246*5113495bSYour Name  * @scn: hif context
1247*5113495bSYour Name  *
1248*5113495bSYour Name  * Return: None
1249*5113495bSYour Name  */
1250*5113495bSYour Name static inline
hif_init_direct_link_rcv_pipe_num(struct hif_softc * scn)1251*5113495bSYour Name void hif_init_direct_link_rcv_pipe_num(struct hif_softc *scn)
1252*5113495bSYour Name {
1253*5113495bSYour Name 	scn->dl_recv_pipe_num = INVALID_PIPE_NO;
1254*5113495bSYour Name }
1255*5113495bSYour Name #else
1256*5113495bSYour Name static inline
hif_init_direct_link_rcv_pipe_num(struct hif_softc * scn)1257*5113495bSYour Name void hif_init_direct_link_rcv_pipe_num(struct hif_softc *scn)
1258*5113495bSYour Name {
1259*5113495bSYour Name }
1260*5113495bSYour Name #endif
1261*5113495bSYour Name 
hif_open(qdf_device_t qdf_ctx,uint32_t mode,enum qdf_bus_type bus_type,struct hif_driver_state_callbacks * cbk,struct wlan_objmgr_psoc * psoc)1262*5113495bSYour Name struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx,
1263*5113495bSYour Name 				  uint32_t mode,
1264*5113495bSYour Name 				  enum qdf_bus_type bus_type,
1265*5113495bSYour Name 				  struct hif_driver_state_callbacks *cbk,
1266*5113495bSYour Name 				  struct wlan_objmgr_psoc *psoc)
1267*5113495bSYour Name {
1268*5113495bSYour Name 	struct hif_softc *scn;
1269*5113495bSYour Name 	QDF_STATUS status = QDF_STATUS_SUCCESS;
1270*5113495bSYour Name 	int bus_context_size = hif_bus_get_context_size(bus_type);
1271*5113495bSYour Name 
1272*5113495bSYour Name 	if (bus_context_size == 0) {
1273*5113495bSYour Name 		hif_err("context size 0 not allowed");
1274*5113495bSYour Name 		return NULL;
1275*5113495bSYour Name 	}
1276*5113495bSYour Name 
1277*5113495bSYour Name 	scn = (struct hif_softc *)qdf_mem_malloc(bus_context_size);
1278*5113495bSYour Name 	if (!scn)
1279*5113495bSYour Name 		return GET_HIF_OPAQUE_HDL(scn);
1280*5113495bSYour Name 
1281*5113495bSYour Name 	scn->qdf_dev = qdf_ctx;
1282*5113495bSYour Name 	scn->hif_con_param = mode;
1283*5113495bSYour Name 	qdf_atomic_init(&scn->active_tasklet_cnt);
1284*5113495bSYour Name 	qdf_atomic_init(&scn->active_oom_work_cnt);
1285*5113495bSYour Name 
1286*5113495bSYour Name 	qdf_atomic_init(&scn->active_grp_tasklet_cnt);
1287*5113495bSYour Name 	qdf_atomic_init(&scn->link_suspended);
1288*5113495bSYour Name 	qdf_atomic_init(&scn->tasklet_from_intr);
1289*5113495bSYour Name 	hif_system_pm_set_state_on(GET_HIF_OPAQUE_HDL(scn));
1290*5113495bSYour Name 	qdf_mem_copy(&scn->callbacks, cbk,
1291*5113495bSYour Name 		     sizeof(struct hif_driver_state_callbacks));
1292*5113495bSYour Name 	scn->bus_type  = bus_type;
1293*5113495bSYour Name 
1294*5113495bSYour Name 	hif_allow_ep_vote_access(GET_HIF_OPAQUE_HDL(scn));
1295*5113495bSYour Name 	hif_get_cfg_from_psoc(scn, psoc);
1296*5113495bSYour Name 
1297*5113495bSYour Name 	hif_set_event_hist_mask(GET_HIF_OPAQUE_HDL(scn));
1298*5113495bSYour Name 	status = hif_bus_open(scn, bus_type);
1299*5113495bSYour Name 	if (status != QDF_STATUS_SUCCESS) {
1300*5113495bSYour Name 		hif_err("hif_bus_open error = %d, bus_type = %d",
1301*5113495bSYour Name 			status, bus_type);
1302*5113495bSYour Name 		qdf_mem_free(scn);
1303*5113495bSYour Name 		scn = NULL;
1304*5113495bSYour Name 		goto out;
1305*5113495bSYour Name 	}
1306*5113495bSYour Name 
1307*5113495bSYour Name 	hif_rtpm_lock_init(scn);
1308*5113495bSYour Name 
1309*5113495bSYour Name 	hif_cpuhp_register(scn);
1310*5113495bSYour Name 	hif_latency_detect_init(scn);
1311*5113495bSYour Name 	hif_affinity_mgr_init(scn, psoc);
1312*5113495bSYour Name 	hif_init_direct_link_rcv_pipe_num(scn);
1313*5113495bSYour Name 	hif_ce_desc_history_log_register(scn);
1314*5113495bSYour Name 	hif_desc_history_log_register();
1315*5113495bSYour Name 	qdf_ssr_driver_dump_register_region("hif", scn, sizeof(*scn));
1316*5113495bSYour Name 
1317*5113495bSYour Name out:
1318*5113495bSYour Name 	return GET_HIF_OPAQUE_HDL(scn);
1319*5113495bSYour Name }
1320*5113495bSYour Name 
1321*5113495bSYour Name #ifdef ADRASTEA_RRI_ON_DDR
1322*5113495bSYour Name /**
1323*5113495bSYour Name  * hif_uninit_rri_on_ddr(): free consistent memory allocated for rri
1324*5113495bSYour Name  * @scn: hif context
1325*5113495bSYour Name  *
1326*5113495bSYour Name  * Return: none
1327*5113495bSYour Name  */
hif_uninit_rri_on_ddr(struct hif_softc * scn)1328*5113495bSYour Name void hif_uninit_rri_on_ddr(struct hif_softc *scn)
1329*5113495bSYour Name {
1330*5113495bSYour Name 	if (scn->vaddr_rri_on_ddr)
1331*5113495bSYour Name 		qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
1332*5113495bSYour Name 					RRI_ON_DDR_MEM_SIZE,
1333*5113495bSYour Name 					scn->vaddr_rri_on_ddr,
1334*5113495bSYour Name 					scn->paddr_rri_on_ddr, 0);
1335*5113495bSYour Name 	scn->vaddr_rri_on_ddr = NULL;
1336*5113495bSYour Name }
1337*5113495bSYour Name #endif
1338*5113495bSYour Name 
1339*5113495bSYour Name /**
1340*5113495bSYour Name  * hif_close(): hif_close
1341*5113495bSYour Name  * @hif_ctx: hif_ctx
1342*5113495bSYour Name  *
1343*5113495bSYour Name  * Return: n/a
1344*5113495bSYour Name  */
hif_close(struct hif_opaque_softc * hif_ctx)1345*5113495bSYour Name void hif_close(struct hif_opaque_softc *hif_ctx)
1346*5113495bSYour Name {
1347*5113495bSYour Name 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1348*5113495bSYour Name 
1349*5113495bSYour Name 	if (!scn) {
1350*5113495bSYour Name 		hif_err("hif_opaque_softc is NULL");
1351*5113495bSYour Name 		return;
1352*5113495bSYour Name 	}
1353*5113495bSYour Name 
1354*5113495bSYour Name 	qdf_ssr_driver_dump_unregister_region("hif");
1355*5113495bSYour Name 	hif_desc_history_log_unregister();
1356*5113495bSYour Name 	hif_ce_desc_history_log_unregister();
1357*5113495bSYour Name 	hif_latency_detect_deinit(scn);
1358*5113495bSYour Name 
1359*5113495bSYour Name 	if (scn->athdiag_procfs_inited) {
1360*5113495bSYour Name 		athdiag_procfs_remove();
1361*5113495bSYour Name 		scn->athdiag_procfs_inited = false;
1362*5113495bSYour Name 	}
1363*5113495bSYour Name 
1364*5113495bSYour Name 	if (scn->target_info.hw_name) {
1365*5113495bSYour Name 		char *hw_name = scn->target_info.hw_name;
1366*5113495bSYour Name 
1367*5113495bSYour Name 		scn->target_info.hw_name = "ErrUnloading";
1368*5113495bSYour Name 		qdf_mem_free(hw_name);
1369*5113495bSYour Name 	}
1370*5113495bSYour Name 
1371*5113495bSYour Name 	hif_uninit_rri_on_ddr(scn);
1372*5113495bSYour Name 	hif_cleanup_static_buf_to_target(scn);
1373*5113495bSYour Name 	hif_cpuhp_unregister(scn);
1374*5113495bSYour Name 	hif_rtpm_lock_deinit(scn);
1375*5113495bSYour Name 
1376*5113495bSYour Name 	hif_bus_close(scn);
1377*5113495bSYour Name 
1378*5113495bSYour Name 	qdf_mem_free(scn);
1379*5113495bSYour Name }
1380*5113495bSYour Name 
1381*5113495bSYour Name /**
1382*5113495bSYour Name  * hif_get_num_active_grp_tasklets() - get the number of active
1383*5113495bSYour Name  *		datapath group tasklets pending to be completed.
1384*5113495bSYour Name  * @scn: HIF context
1385*5113495bSYour Name  *
1386*5113495bSYour Name  * Returns: the number of datapath group tasklets which are active
1387*5113495bSYour Name  */
hif_get_num_active_grp_tasklets(struct hif_softc * scn)1388*5113495bSYour Name static inline int hif_get_num_active_grp_tasklets(struct hif_softc *scn)
1389*5113495bSYour Name {
1390*5113495bSYour Name 	return qdf_atomic_read(&scn->active_grp_tasklet_cnt);
1391*5113495bSYour Name }
1392*5113495bSYour Name 
1393*5113495bSYour Name #if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
1394*5113495bSYour Name 	defined(QCA_WIFI_QCA6290) || defined(QCA_WIFI_QCA6390) || \
1395*5113495bSYour Name 	defined(QCA_WIFI_QCN9000) || defined(QCA_WIFI_QCA6490) || \
1396*5113495bSYour Name 	defined(QCA_WIFI_QCA6750) || defined(QCA_WIFI_QCA5018) || \
1397*5113495bSYour Name 	defined(QCA_WIFI_KIWI) || defined(QCA_WIFI_QCN9224) || \
1398*5113495bSYour Name 	defined(QCA_WIFI_QCN6432) || \
1399*5113495bSYour Name 	defined(QCA_WIFI_QCA9574)) || defined(QCA_WIFI_QCA5332)
1400*5113495bSYour Name /**
1401*5113495bSYour Name  * hif_get_num_pending_work() - get the number of entries in
1402*5113495bSYour Name  *		the workqueue pending to be completed.
1403*5113495bSYour Name  * @scn: HIF context
1404*5113495bSYour Name  *
1405*5113495bSYour Name  * Returns: the number of tasklets which are active
1406*5113495bSYour Name  */
hif_get_num_pending_work(struct hif_softc * scn)1407*5113495bSYour Name static inline int hif_get_num_pending_work(struct hif_softc *scn)
1408*5113495bSYour Name {
1409*5113495bSYour Name 	return hal_get_reg_write_pending_work(scn->hal_soc);
1410*5113495bSYour Name }
1411*5113495bSYour Name #elif defined(FEATURE_HIF_DELAYED_REG_WRITE)
hif_get_num_pending_work(struct hif_softc * scn)1412*5113495bSYour Name static inline int hif_get_num_pending_work(struct hif_softc *scn)
1413*5113495bSYour Name {
1414*5113495bSYour Name 	return qdf_atomic_read(&scn->active_work_cnt);
1415*5113495bSYour Name }
1416*5113495bSYour Name #else
1417*5113495bSYour Name 
hif_get_num_pending_work(struct hif_softc * scn)1418*5113495bSYour Name static inline int hif_get_num_pending_work(struct hif_softc *scn)
1419*5113495bSYour Name {
1420*5113495bSYour Name 	return 0;
1421*5113495bSYour Name }
1422*5113495bSYour Name #endif
1423*5113495bSYour Name 
hif_try_complete_tasks(struct hif_softc * scn)1424*5113495bSYour Name QDF_STATUS hif_try_complete_tasks(struct hif_softc *scn)
1425*5113495bSYour Name {
1426*5113495bSYour Name 	uint32_t task_drain_wait_cnt = 0;
1427*5113495bSYour Name 	int tasklet = 0, grp_tasklet = 0, work = 0, oom_work = 0;
1428*5113495bSYour Name 
1429*5113495bSYour Name 	while ((tasklet = hif_get_num_active_tasklets(scn)) ||
1430*5113495bSYour Name 	       (grp_tasklet = hif_get_num_active_grp_tasklets(scn)) ||
1431*5113495bSYour Name 	       (work = hif_get_num_pending_work(scn)) ||
1432*5113495bSYour Name 		(oom_work = hif_get_num_active_oom_work(scn))) {
1433*5113495bSYour Name 		if (++task_drain_wait_cnt > HIF_TASK_DRAIN_WAIT_CNT) {
1434*5113495bSYour Name 			hif_err("pending tasklets %d grp tasklets %d work %d oom work %d",
1435*5113495bSYour Name 				tasklet, grp_tasklet, work, oom_work);
1436*5113495bSYour Name 			/*
1437*5113495bSYour Name 			 * There is chance of OOM thread getting scheduled
1438*5113495bSYour Name 			 * continuously or execution get delayed during low
1439*5113495bSYour Name 			 * memory state. So avoid panic and prevent suspend
1440*5113495bSYour Name 			 * if OOM thread is unable to complete pending
1441*5113495bSYour Name 			 * work.
1442*5113495bSYour Name 			 */
1443*5113495bSYour Name 			if (oom_work)
1444*5113495bSYour Name 				hif_err("OOM thread is still pending %d tasklets %d grp tasklets %d work %d",
1445*5113495bSYour Name 					oom_work, tasklet, grp_tasklet, work);
1446*5113495bSYour Name 			else
1447*5113495bSYour Name 				QDF_DEBUG_PANIC("Complete tasks takes more than %u ms: tasklets %d grp tasklets %d work %d oom_work %d",
1448*5113495bSYour Name 						HIF_TASK_DRAIN_WAIT_CNT * 10,
1449*5113495bSYour Name 						tasklet, grp_tasklet, work,
1450*5113495bSYour Name 						oom_work);
1451*5113495bSYour Name 			return QDF_STATUS_E_FAULT;
1452*5113495bSYour Name 		}
1453*5113495bSYour Name 		hif_info("waiting for tasklets %d grp tasklets %d work %d oom_work %d",
1454*5113495bSYour Name 			 tasklet, grp_tasklet, work, oom_work);
1455*5113495bSYour Name 		msleep(10);
1456*5113495bSYour Name 	}
1457*5113495bSYour Name 
1458*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
1459*5113495bSYour Name }
1460*5113495bSYour Name 
hif_try_complete_dp_tasks(struct hif_opaque_softc * hif_ctx)1461*5113495bSYour Name QDF_STATUS hif_try_complete_dp_tasks(struct hif_opaque_softc *hif_ctx)
1462*5113495bSYour Name {
1463*5113495bSYour Name 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1464*5113495bSYour Name 	uint32_t task_drain_wait_cnt = 0;
1465*5113495bSYour Name 	int grp_tasklet = 0, work = 0;
1466*5113495bSYour Name 
1467*5113495bSYour Name 	while ((grp_tasklet = hif_get_num_active_grp_tasklets(scn)) ||
1468*5113495bSYour Name 	       (work = hif_get_num_pending_work(scn))) {
1469*5113495bSYour Name 		if (++task_drain_wait_cnt > HIF_TASK_DRAIN_WAIT_CNT) {
1470*5113495bSYour Name 			hif_err("pending grp tasklets %d work %d",
1471*5113495bSYour Name 				grp_tasklet, work);
1472*5113495bSYour Name 			QDF_DEBUG_PANIC("Complete tasks takes more than %u ms: grp tasklets %d work %d",
1473*5113495bSYour Name 					HIF_TASK_DRAIN_WAIT_CNT * 10,
1474*5113495bSYour Name 					grp_tasklet, work);
1475*5113495bSYour Name 			return QDF_STATUS_E_FAULT;
1476*5113495bSYour Name 		}
1477*5113495bSYour Name 		hif_info("waiting for grp tasklets %d work %d",
1478*5113495bSYour Name 			 grp_tasklet, work);
1479*5113495bSYour Name 		msleep(10);
1480*5113495bSYour Name 	}
1481*5113495bSYour Name 
1482*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
1483*5113495bSYour Name }
1484*5113495bSYour Name 
1485*5113495bSYour Name #ifdef HIF_HAL_REG_ACCESS_SUPPORT
hif_reg_window_write(struct hif_softc * scn,uint32_t offset,uint32_t value)1486*5113495bSYour Name void hif_reg_window_write(struct hif_softc *scn, uint32_t offset,
1487*5113495bSYour Name 			  uint32_t value)
1488*5113495bSYour Name {
1489*5113495bSYour Name 	hal_write32_mb(scn->hal_soc, offset, value);
1490*5113495bSYour Name }
1491*5113495bSYour Name 
hif_reg_window_read(struct hif_softc * scn,uint32_t offset)1492*5113495bSYour Name uint32_t hif_reg_window_read(struct hif_softc *scn, uint32_t offset)
1493*5113495bSYour Name {
1494*5113495bSYour Name 	return hal_read32_mb(scn->hal_soc, offset);
1495*5113495bSYour Name }
1496*5113495bSYour Name #endif
1497*5113495bSYour Name 
1498*5113495bSYour Name #if defined(HIF_IPCI) && defined(FEATURE_HAL_DELAYED_REG_WRITE)
hif_try_prevent_ep_vote_access(struct hif_opaque_softc * hif_ctx)1499*5113495bSYour Name QDF_STATUS hif_try_prevent_ep_vote_access(struct hif_opaque_softc *hif_ctx)
1500*5113495bSYour Name {
1501*5113495bSYour Name 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1502*5113495bSYour Name 	uint32_t work_drain_wait_cnt = 0;
1503*5113495bSYour Name 	uint32_t wait_cnt = 0;
1504*5113495bSYour Name 	int work = 0;
1505*5113495bSYour Name 
1506*5113495bSYour Name 	qdf_atomic_set(&scn->dp_ep_vote_access,
1507*5113495bSYour Name 		       HIF_EP_VOTE_ACCESS_DISABLE);
1508*5113495bSYour Name 	qdf_atomic_set(&scn->ep_vote_access,
1509*5113495bSYour Name 		       HIF_EP_VOTE_ACCESS_DISABLE);
1510*5113495bSYour Name 
1511*5113495bSYour Name 	while ((work = hif_get_num_pending_work(scn))) {
1512*5113495bSYour Name 		if (++work_drain_wait_cnt > HIF_WORK_DRAIN_WAIT_CNT) {
1513*5113495bSYour Name 			qdf_atomic_set(&scn->dp_ep_vote_access,
1514*5113495bSYour Name 				       HIF_EP_VOTE_ACCESS_ENABLE);
1515*5113495bSYour Name 			qdf_atomic_set(&scn->ep_vote_access,
1516*5113495bSYour Name 				       HIF_EP_VOTE_ACCESS_ENABLE);
1517*5113495bSYour Name 			hif_err("timeout wait for pending work %d ", work);
1518*5113495bSYour Name 			return QDF_STATUS_E_FAULT;
1519*5113495bSYour Name 		}
1520*5113495bSYour Name 		qdf_sleep(10);
1521*5113495bSYour Name 	}
1522*5113495bSYour Name 
1523*5113495bSYour Name 	if (pld_is_pci_ep_awake(scn->qdf_dev->dev) == -ENOTSUPP)
1524*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
1525*5113495bSYour Name 
1526*5113495bSYour Name 	while (pld_is_pci_ep_awake(scn->qdf_dev->dev)) {
1527*5113495bSYour Name 		if (++wait_cnt > HIF_EP_WAKE_RESET_WAIT_CNT) {
1528*5113495bSYour Name 			hif_err("Release EP vote is not proceed by Fw");
1529*5113495bSYour Name 			return QDF_STATUS_E_FAULT;
1530*5113495bSYour Name 		}
1531*5113495bSYour Name 		qdf_sleep(5);
1532*5113495bSYour Name 	}
1533*5113495bSYour Name 
1534*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
1535*5113495bSYour Name }
1536*5113495bSYour Name 
hif_set_ep_intermediate_vote_access(struct hif_opaque_softc * hif_ctx)1537*5113495bSYour Name void hif_set_ep_intermediate_vote_access(struct hif_opaque_softc *hif_ctx)
1538*5113495bSYour Name {
1539*5113495bSYour Name 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1540*5113495bSYour Name 	uint8_t vote_access;
1541*5113495bSYour Name 
1542*5113495bSYour Name 	vote_access = qdf_atomic_read(&scn->ep_vote_access);
1543*5113495bSYour Name 
1544*5113495bSYour Name 	if (vote_access != HIF_EP_VOTE_ACCESS_DISABLE)
1545*5113495bSYour Name 		hif_info("EP vote changed from:%u to intermediate state",
1546*5113495bSYour Name 			 vote_access);
1547*5113495bSYour Name 
1548*5113495bSYour Name 	if (QDF_IS_STATUS_ERROR(hif_try_prevent_ep_vote_access(hif_ctx)))
1549*5113495bSYour Name 		QDF_BUG(0);
1550*5113495bSYour Name 
1551*5113495bSYour Name 	qdf_atomic_set(&scn->ep_vote_access,
1552*5113495bSYour Name 		       HIF_EP_VOTE_INTERMEDIATE_ACCESS);
1553*5113495bSYour Name }
1554*5113495bSYour Name 
hif_allow_ep_vote_access(struct hif_opaque_softc * hif_ctx)1555*5113495bSYour Name void hif_allow_ep_vote_access(struct hif_opaque_softc *hif_ctx)
1556*5113495bSYour Name {
1557*5113495bSYour Name 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1558*5113495bSYour Name 
1559*5113495bSYour Name 	qdf_atomic_set(&scn->dp_ep_vote_access,
1560*5113495bSYour Name 		       HIF_EP_VOTE_ACCESS_ENABLE);
1561*5113495bSYour Name 	qdf_atomic_set(&scn->ep_vote_access,
1562*5113495bSYour Name 		       HIF_EP_VOTE_ACCESS_ENABLE);
1563*5113495bSYour Name }
1564*5113495bSYour Name 
hif_set_ep_vote_access(struct hif_opaque_softc * hif_ctx,uint8_t type,uint8_t access)1565*5113495bSYour Name void hif_set_ep_vote_access(struct hif_opaque_softc *hif_ctx,
1566*5113495bSYour Name 			    uint8_t type, uint8_t access)
1567*5113495bSYour Name {
1568*5113495bSYour Name 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1569*5113495bSYour Name 
1570*5113495bSYour Name 	if (type == HIF_EP_VOTE_DP_ACCESS)
1571*5113495bSYour Name 		qdf_atomic_set(&scn->dp_ep_vote_access, access);
1572*5113495bSYour Name 	else
1573*5113495bSYour Name 		qdf_atomic_set(&scn->ep_vote_access, access);
1574*5113495bSYour Name }
1575*5113495bSYour Name 
hif_get_ep_vote_access(struct hif_opaque_softc * hif_ctx,uint8_t type)1576*5113495bSYour Name uint8_t hif_get_ep_vote_access(struct hif_opaque_softc *hif_ctx,
1577*5113495bSYour Name 			       uint8_t type)
1578*5113495bSYour Name {
1579*5113495bSYour Name 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1580*5113495bSYour Name 
1581*5113495bSYour Name 	if (type == HIF_EP_VOTE_DP_ACCESS)
1582*5113495bSYour Name 		return qdf_atomic_read(&scn->dp_ep_vote_access);
1583*5113495bSYour Name 	else
1584*5113495bSYour Name 		return qdf_atomic_read(&scn->ep_vote_access);
1585*5113495bSYour Name }
1586*5113495bSYour Name #endif
1587*5113495bSYour Name 
1588*5113495bSYour Name #ifdef FEATURE_HIF_DELAYED_REG_WRITE
1589*5113495bSYour Name #ifdef MEMORY_DEBUG
1590*5113495bSYour Name #define HIF_REG_WRITE_QUEUE_LEN 128
1591*5113495bSYour Name #else
1592*5113495bSYour Name #define HIF_REG_WRITE_QUEUE_LEN 32
1593*5113495bSYour Name #endif
1594*5113495bSYour Name 
1595*5113495bSYour Name /**
1596*5113495bSYour Name  * hif_print_reg_write_stats() - Print hif delayed reg write stats
1597*5113495bSYour Name  * @hif_ctx: hif opaque handle
1598*5113495bSYour Name  *
1599*5113495bSYour Name  * Return: None
1600*5113495bSYour Name  */
hif_print_reg_write_stats(struct hif_opaque_softc * hif_ctx)1601*5113495bSYour Name void hif_print_reg_write_stats(struct hif_opaque_softc *hif_ctx)
1602*5113495bSYour Name {
1603*5113495bSYour Name 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1604*5113495bSYour Name 	struct CE_state *ce_state;
1605*5113495bSYour Name 	uint32_t *hist;
1606*5113495bSYour Name 	int i;
1607*5113495bSYour Name 
1608*5113495bSYour Name 	hist = scn->wstats.sched_delay;
1609*5113495bSYour Name 	hif_debug("wstats: enq %u deq %u coal %u direct %u q_depth %u max_q %u sched-delay hist %u %u %u %u",
1610*5113495bSYour Name 		  qdf_atomic_read(&scn->wstats.enqueues),
1611*5113495bSYour Name 		  scn->wstats.dequeues,
1612*5113495bSYour Name 		  qdf_atomic_read(&scn->wstats.coalesces),
1613*5113495bSYour Name 		  qdf_atomic_read(&scn->wstats.direct),
1614*5113495bSYour Name 		  qdf_atomic_read(&scn->wstats.q_depth),
1615*5113495bSYour Name 		  scn->wstats.max_q_depth,
1616*5113495bSYour Name 		  hist[HIF_REG_WRITE_SCHED_DELAY_SUB_100us],
1617*5113495bSYour Name 		  hist[HIF_REG_WRITE_SCHED_DELAY_SUB_1000us],
1618*5113495bSYour Name 		  hist[HIF_REG_WRITE_SCHED_DELAY_SUB_5000us],
1619*5113495bSYour Name 		  hist[HIF_REG_WRITE_SCHED_DELAY_GT_5000us]);
1620*5113495bSYour Name 
1621*5113495bSYour Name 	for (i = 0; i < scn->ce_count; i++) {
1622*5113495bSYour Name 		ce_state = scn->ce_id_to_state[i];
1623*5113495bSYour Name 		if (!ce_state)
1624*5113495bSYour Name 			continue;
1625*5113495bSYour Name 
1626*5113495bSYour Name 		hif_debug("ce%d: enq %u deq %u coal %u direct %u",
1627*5113495bSYour Name 			  i, ce_state->wstats.enqueues,
1628*5113495bSYour Name 			  ce_state->wstats.dequeues,
1629*5113495bSYour Name 			  ce_state->wstats.coalesces,
1630*5113495bSYour Name 			  ce_state->wstats.direct);
1631*5113495bSYour Name 	}
1632*5113495bSYour Name }
1633*5113495bSYour Name 
1634*5113495bSYour Name /**
1635*5113495bSYour Name  * hif_is_reg_write_tput_level_high() - throughput level for delayed reg writes
1636*5113495bSYour Name  * @scn: hif_softc pointer
1637*5113495bSYour Name  *
1638*5113495bSYour Name  * Return: true if throughput is high, else false.
1639*5113495bSYour Name  */
hif_is_reg_write_tput_level_high(struct hif_softc * scn)1640*5113495bSYour Name static inline bool hif_is_reg_write_tput_level_high(struct hif_softc *scn)
1641*5113495bSYour Name {
1642*5113495bSYour Name 	int bw_level = hif_get_bandwidth_level(GET_HIF_OPAQUE_HDL(scn));
1643*5113495bSYour Name 
1644*5113495bSYour Name 	return (bw_level >= PLD_BUS_WIDTH_MEDIUM) ? true : false;
1645*5113495bSYour Name }
1646*5113495bSYour Name 
1647*5113495bSYour Name /**
1648*5113495bSYour Name  * hif_reg_write_fill_sched_delay_hist() - fill reg write delay histogram
1649*5113495bSYour Name  * @scn: hif_softc pointer
1650*5113495bSYour Name  * @delay_us: delay in us
1651*5113495bSYour Name  *
1652*5113495bSYour Name  * Return: None
1653*5113495bSYour Name  */
hif_reg_write_fill_sched_delay_hist(struct hif_softc * scn,uint64_t delay_us)1654*5113495bSYour Name static inline void hif_reg_write_fill_sched_delay_hist(struct hif_softc *scn,
1655*5113495bSYour Name 						       uint64_t delay_us)
1656*5113495bSYour Name {
1657*5113495bSYour Name 	uint32_t *hist;
1658*5113495bSYour Name 
1659*5113495bSYour Name 	hist = scn->wstats.sched_delay;
1660*5113495bSYour Name 
1661*5113495bSYour Name 	if (delay_us < 100)
1662*5113495bSYour Name 		hist[HIF_REG_WRITE_SCHED_DELAY_SUB_100us]++;
1663*5113495bSYour Name 	else if (delay_us < 1000)
1664*5113495bSYour Name 		hist[HIF_REG_WRITE_SCHED_DELAY_SUB_1000us]++;
1665*5113495bSYour Name 	else if (delay_us < 5000)
1666*5113495bSYour Name 		hist[HIF_REG_WRITE_SCHED_DELAY_SUB_5000us]++;
1667*5113495bSYour Name 	else
1668*5113495bSYour Name 		hist[HIF_REG_WRITE_SCHED_DELAY_GT_5000us]++;
1669*5113495bSYour Name }
1670*5113495bSYour Name 
1671*5113495bSYour Name /**
1672*5113495bSYour Name  * hif_process_reg_write_q_elem() - process a register write queue element
1673*5113495bSYour Name  * @scn: hif_softc pointer
1674*5113495bSYour Name  * @q_elem: pointer to hal register write queue element
1675*5113495bSYour Name  *
1676*5113495bSYour Name  * Return: The value which was written to the address
1677*5113495bSYour Name  */
1678*5113495bSYour Name static int32_t
hif_process_reg_write_q_elem(struct hif_softc * scn,struct hif_reg_write_q_elem * q_elem)1679*5113495bSYour Name hif_process_reg_write_q_elem(struct hif_softc *scn,
1680*5113495bSYour Name 			     struct hif_reg_write_q_elem *q_elem)
1681*5113495bSYour Name {
1682*5113495bSYour Name 	struct CE_state *ce_state = q_elem->ce_state;
1683*5113495bSYour Name 	uint32_t write_val = -1;
1684*5113495bSYour Name 
1685*5113495bSYour Name 	qdf_spin_lock_bh(&ce_state->ce_index_lock);
1686*5113495bSYour Name 
1687*5113495bSYour Name 	ce_state->reg_write_in_progress = false;
1688*5113495bSYour Name 	ce_state->wstats.dequeues++;
1689*5113495bSYour Name 
1690*5113495bSYour Name 	if (ce_state->src_ring) {
1691*5113495bSYour Name 		q_elem->dequeue_val = ce_state->src_ring->write_index;
1692*5113495bSYour Name 		hal_write32_mb(scn->hal_soc, ce_state->ce_wrt_idx_offset,
1693*5113495bSYour Name 			       ce_state->src_ring->write_index);
1694*5113495bSYour Name 		write_val = ce_state->src_ring->write_index;
1695*5113495bSYour Name 	} else if (ce_state->dest_ring) {
1696*5113495bSYour Name 		q_elem->dequeue_val = ce_state->dest_ring->write_index;
1697*5113495bSYour Name 		hal_write32_mb(scn->hal_soc, ce_state->ce_wrt_idx_offset,
1698*5113495bSYour Name 			       ce_state->dest_ring->write_index);
1699*5113495bSYour Name 		write_val = ce_state->dest_ring->write_index;
1700*5113495bSYour Name 	} else {
1701*5113495bSYour Name 		hif_debug("invalid reg write received");
1702*5113495bSYour Name 		qdf_assert(0);
1703*5113495bSYour Name 	}
1704*5113495bSYour Name 
1705*5113495bSYour Name 	q_elem->valid = 0;
1706*5113495bSYour Name 	ce_state->last_dequeue_time = q_elem->dequeue_time;
1707*5113495bSYour Name 
1708*5113495bSYour Name 	qdf_spin_unlock_bh(&ce_state->ce_index_lock);
1709*5113495bSYour Name 
1710*5113495bSYour Name 	return write_val;
1711*5113495bSYour Name }
1712*5113495bSYour Name 
1713*5113495bSYour Name /**
1714*5113495bSYour Name  * hif_reg_write_work() - Worker to process delayed writes
1715*5113495bSYour Name  * @arg: hif_softc pointer
1716*5113495bSYour Name  *
1717*5113495bSYour Name  * Return: None
1718*5113495bSYour Name  */
hif_reg_write_work(void * arg)1719*5113495bSYour Name static void hif_reg_write_work(void *arg)
1720*5113495bSYour Name {
1721*5113495bSYour Name 	struct hif_softc *scn = arg;
1722*5113495bSYour Name 	struct hif_reg_write_q_elem *q_elem;
1723*5113495bSYour Name 	uint32_t offset;
1724*5113495bSYour Name 	uint64_t delta_us;
1725*5113495bSYour Name 	int32_t q_depth, write_val;
1726*5113495bSYour Name 	uint32_t num_processed = 0;
1727*5113495bSYour Name 	int32_t ring_id;
1728*5113495bSYour Name 
1729*5113495bSYour Name 	q_elem = &scn->reg_write_queue[scn->read_idx];
1730*5113495bSYour Name 	q_elem->work_scheduled_time = qdf_get_log_timestamp();
1731*5113495bSYour Name 	q_elem->cpu_id = qdf_get_cpu();
1732*5113495bSYour Name 
1733*5113495bSYour Name 	/* Make sure q_elem consistent in the memory for multi-cores */
1734*5113495bSYour Name 	qdf_rmb();
1735*5113495bSYour Name 	if (!q_elem->valid)
1736*5113495bSYour Name 		return;
1737*5113495bSYour Name 
1738*5113495bSYour Name 	q_depth = qdf_atomic_read(&scn->wstats.q_depth);
1739*5113495bSYour Name 	if (q_depth > scn->wstats.max_q_depth)
1740*5113495bSYour Name 		scn->wstats.max_q_depth =  q_depth;
1741*5113495bSYour Name 
1742*5113495bSYour Name 	if (hif_prevent_link_low_power_states(GET_HIF_OPAQUE_HDL(scn))) {
1743*5113495bSYour Name 		scn->wstats.prevent_l1_fails++;
1744*5113495bSYour Name 		return;
1745*5113495bSYour Name 	}
1746*5113495bSYour Name 
1747*5113495bSYour Name 	while (true) {
1748*5113495bSYour Name 		qdf_rmb();
1749*5113495bSYour Name 		if (!q_elem->valid)
1750*5113495bSYour Name 			break;
1751*5113495bSYour Name 
1752*5113495bSYour Name 		qdf_rmb();
1753*5113495bSYour Name 		q_elem->dequeue_time = qdf_get_log_timestamp();
1754*5113495bSYour Name 		ring_id = q_elem->ce_state->id;
1755*5113495bSYour Name 		offset = q_elem->offset;
1756*5113495bSYour Name 		delta_us = qdf_log_timestamp_to_usecs(q_elem->dequeue_time -
1757*5113495bSYour Name 						      q_elem->enqueue_time);
1758*5113495bSYour Name 		hif_reg_write_fill_sched_delay_hist(scn, delta_us);
1759*5113495bSYour Name 
1760*5113495bSYour Name 		scn->wstats.dequeues++;
1761*5113495bSYour Name 		qdf_atomic_dec(&scn->wstats.q_depth);
1762*5113495bSYour Name 
1763*5113495bSYour Name 		write_val = hif_process_reg_write_q_elem(scn, q_elem);
1764*5113495bSYour Name 		hif_debug("read_idx %u ce_id %d offset 0x%x dequeue_val %d",
1765*5113495bSYour Name 			  scn->read_idx, ring_id, offset, write_val);
1766*5113495bSYour Name 
1767*5113495bSYour Name 		qdf_trace_dp_del_reg_write(ring_id, q_elem->enqueue_val,
1768*5113495bSYour Name 					   q_elem->dequeue_val,
1769*5113495bSYour Name 					   q_elem->enqueue_time,
1770*5113495bSYour Name 					   q_elem->dequeue_time);
1771*5113495bSYour Name 		num_processed++;
1772*5113495bSYour Name 		scn->read_idx = (scn->read_idx + 1) &
1773*5113495bSYour Name 					(HIF_REG_WRITE_QUEUE_LEN - 1);
1774*5113495bSYour Name 		q_elem = &scn->reg_write_queue[scn->read_idx];
1775*5113495bSYour Name 	}
1776*5113495bSYour Name 
1777*5113495bSYour Name 	hif_allow_link_low_power_states(GET_HIF_OPAQUE_HDL(scn));
1778*5113495bSYour Name 
1779*5113495bSYour Name 	/*
1780*5113495bSYour Name 	 * Decrement active_work_cnt by the number of elements dequeued after
1781*5113495bSYour Name 	 * hif_allow_link_low_power_states.
1782*5113495bSYour Name 	 * This makes sure that hif_try_complete_tasks will wait till we make
1783*5113495bSYour Name 	 * the bus access in hif_allow_link_low_power_states. This will avoid
1784*5113495bSYour Name 	 * race condition between delayed register worker and bus suspend
1785*5113495bSYour Name 	 * (system suspend or runtime suspend).
1786*5113495bSYour Name 	 *
1787*5113495bSYour Name 	 * The following decrement should be done at the end!
1788*5113495bSYour Name 	 */
1789*5113495bSYour Name 	qdf_atomic_sub(num_processed, &scn->active_work_cnt);
1790*5113495bSYour Name }
1791*5113495bSYour Name 
1792*5113495bSYour Name /**
1793*5113495bSYour Name  * hif_delayed_reg_write_deinit() - De-Initialize delayed reg write processing
1794*5113495bSYour Name  * @scn: hif_softc pointer
1795*5113495bSYour Name  *
1796*5113495bSYour Name  * De-initialize main data structures to process register writes in a delayed
1797*5113495bSYour Name  * workqueue.
1798*5113495bSYour Name  *
1799*5113495bSYour Name  * Return: None
1800*5113495bSYour Name  */
hif_delayed_reg_write_deinit(struct hif_softc * scn)1801*5113495bSYour Name static void hif_delayed_reg_write_deinit(struct hif_softc *scn)
1802*5113495bSYour Name {
1803*5113495bSYour Name 	qdf_flush_work(&scn->reg_write_work);
1804*5113495bSYour Name 	qdf_disable_work(&scn->reg_write_work);
1805*5113495bSYour Name 	qdf_flush_workqueue(0, scn->reg_write_wq);
1806*5113495bSYour Name 	qdf_destroy_workqueue(0, scn->reg_write_wq);
1807*5113495bSYour Name 	qdf_mem_free(scn->reg_write_queue);
1808*5113495bSYour Name }
1809*5113495bSYour Name 
1810*5113495bSYour Name /**
1811*5113495bSYour Name  * hif_delayed_reg_write_init() - Initialization function for delayed reg writes
1812*5113495bSYour Name  * @scn: hif_softc pointer
1813*5113495bSYour Name  *
1814*5113495bSYour Name  * Initialize main data structures to process register writes in a delayed
1815*5113495bSYour Name  * workqueue.
1816*5113495bSYour Name  */
1817*5113495bSYour Name 
hif_delayed_reg_write_init(struct hif_softc * scn)1818*5113495bSYour Name static QDF_STATUS hif_delayed_reg_write_init(struct hif_softc *scn)
1819*5113495bSYour Name {
1820*5113495bSYour Name 	qdf_atomic_init(&scn->active_work_cnt);
1821*5113495bSYour Name 	scn->reg_write_wq =
1822*5113495bSYour Name 		qdf_alloc_high_prior_ordered_workqueue("hif_register_write_wq");
1823*5113495bSYour Name 	qdf_create_work(0, &scn->reg_write_work, hif_reg_write_work, scn);
1824*5113495bSYour Name 	scn->reg_write_queue = qdf_mem_malloc(HIF_REG_WRITE_QUEUE_LEN *
1825*5113495bSYour Name 					      sizeof(*scn->reg_write_queue));
1826*5113495bSYour Name 	if (!scn->reg_write_queue) {
1827*5113495bSYour Name 		hif_err("unable to allocate memory for delayed reg write");
1828*5113495bSYour Name 		QDF_BUG(0);
1829*5113495bSYour Name 		return QDF_STATUS_E_NOMEM;
1830*5113495bSYour Name 	}
1831*5113495bSYour Name 
1832*5113495bSYour Name 	/* Initial value of indices */
1833*5113495bSYour Name 	scn->read_idx = 0;
1834*5113495bSYour Name 	qdf_atomic_set(&scn->write_idx, -1);
1835*5113495bSYour Name 
1836*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
1837*5113495bSYour Name }
1838*5113495bSYour Name 
hif_reg_write_enqueue(struct hif_softc * scn,struct CE_state * ce_state,uint32_t value)1839*5113495bSYour Name static void hif_reg_write_enqueue(struct hif_softc *scn,
1840*5113495bSYour Name 				  struct CE_state *ce_state,
1841*5113495bSYour Name 				  uint32_t value)
1842*5113495bSYour Name {
1843*5113495bSYour Name 	struct hif_reg_write_q_elem *q_elem;
1844*5113495bSYour Name 	uint32_t write_idx;
1845*5113495bSYour Name 
1846*5113495bSYour Name 	if (ce_state->reg_write_in_progress) {
1847*5113495bSYour Name 		hif_debug("Already in progress ce_id %d offset 0x%x value %u",
1848*5113495bSYour Name 			  ce_state->id, ce_state->ce_wrt_idx_offset, value);
1849*5113495bSYour Name 		qdf_atomic_inc(&scn->wstats.coalesces);
1850*5113495bSYour Name 		ce_state->wstats.coalesces++;
1851*5113495bSYour Name 		return;
1852*5113495bSYour Name 	}
1853*5113495bSYour Name 
1854*5113495bSYour Name 	write_idx = qdf_atomic_inc_return(&scn->write_idx);
1855*5113495bSYour Name 	write_idx = write_idx & (HIF_REG_WRITE_QUEUE_LEN - 1);
1856*5113495bSYour Name 
1857*5113495bSYour Name 	q_elem = &scn->reg_write_queue[write_idx];
1858*5113495bSYour Name 	if (q_elem->valid) {
1859*5113495bSYour Name 		hif_err("queue full");
1860*5113495bSYour Name 		QDF_BUG(0);
1861*5113495bSYour Name 		return;
1862*5113495bSYour Name 	}
1863*5113495bSYour Name 
1864*5113495bSYour Name 	qdf_atomic_inc(&scn->wstats.enqueues);
1865*5113495bSYour Name 	ce_state->wstats.enqueues++;
1866*5113495bSYour Name 
1867*5113495bSYour Name 	qdf_atomic_inc(&scn->wstats.q_depth);
1868*5113495bSYour Name 
1869*5113495bSYour Name 	q_elem->ce_state = ce_state;
1870*5113495bSYour Name 	q_elem->offset = ce_state->ce_wrt_idx_offset;
1871*5113495bSYour Name 	q_elem->enqueue_val = value;
1872*5113495bSYour Name 	q_elem->enqueue_time = qdf_get_log_timestamp();
1873*5113495bSYour Name 
1874*5113495bSYour Name 	/*
1875*5113495bSYour Name 	 * Before the valid flag is set to true, all the other
1876*5113495bSYour Name 	 * fields in the q_elem needs to be updated in memory.
1877*5113495bSYour Name 	 * Else there is a chance that the dequeuing worker thread
1878*5113495bSYour Name 	 * might read stale entries and process incorrect srng.
1879*5113495bSYour Name 	 */
1880*5113495bSYour Name 	qdf_wmb();
1881*5113495bSYour Name 	q_elem->valid = true;
1882*5113495bSYour Name 
1883*5113495bSYour Name 	/*
1884*5113495bSYour Name 	 * After all other fields in the q_elem has been updated
1885*5113495bSYour Name 	 * in memory successfully, the valid flag needs to be updated
1886*5113495bSYour Name 	 * in memory in time too.
1887*5113495bSYour Name 	 * Else there is a chance that the dequeuing worker thread
1888*5113495bSYour Name 	 * might read stale valid flag and the work will be bypassed
1889*5113495bSYour Name 	 * for this round. And if there is no other work scheduled
1890*5113495bSYour Name 	 * later, this hal register writing won't be updated any more.
1891*5113495bSYour Name 	 */
1892*5113495bSYour Name 	qdf_wmb();
1893*5113495bSYour Name 
1894*5113495bSYour Name 	ce_state->reg_write_in_progress  = true;
1895*5113495bSYour Name 	qdf_atomic_inc(&scn->active_work_cnt);
1896*5113495bSYour Name 
1897*5113495bSYour Name 	hif_debug("write_idx %u ce_id %d offset 0x%x value %u",
1898*5113495bSYour Name 		  write_idx, ce_state->id, ce_state->ce_wrt_idx_offset, value);
1899*5113495bSYour Name 
1900*5113495bSYour Name 	qdf_queue_work(scn->qdf_dev, scn->reg_write_wq,
1901*5113495bSYour Name 		       &scn->reg_write_work);
1902*5113495bSYour Name }
1903*5113495bSYour Name 
hif_delayed_reg_write(struct hif_softc * scn,uint32_t ctrl_addr,uint32_t val)1904*5113495bSYour Name void hif_delayed_reg_write(struct hif_softc *scn, uint32_t ctrl_addr,
1905*5113495bSYour Name 			   uint32_t val)
1906*5113495bSYour Name {
1907*5113495bSYour Name 	struct CE_state *ce_state;
1908*5113495bSYour Name 	int ce_id = COPY_ENGINE_ID(ctrl_addr);
1909*5113495bSYour Name 
1910*5113495bSYour Name 	ce_state = scn->ce_id_to_state[ce_id];
1911*5113495bSYour Name 
1912*5113495bSYour Name 	if (!ce_state->htt_tx_data && !ce_state->htt_rx_data) {
1913*5113495bSYour Name 		hif_reg_write_enqueue(scn, ce_state, val);
1914*5113495bSYour Name 		return;
1915*5113495bSYour Name 	}
1916*5113495bSYour Name 
1917*5113495bSYour Name 	if (hif_is_reg_write_tput_level_high(scn) ||
1918*5113495bSYour Name 	    (PLD_MHI_STATE_L0 == pld_get_mhi_state(scn->qdf_dev->dev))) {
1919*5113495bSYour Name 		hal_write32_mb(scn->hal_soc, ce_state->ce_wrt_idx_offset, val);
1920*5113495bSYour Name 		qdf_atomic_inc(&scn->wstats.direct);
1921*5113495bSYour Name 		ce_state->wstats.direct++;
1922*5113495bSYour Name 	} else {
1923*5113495bSYour Name 		hif_reg_write_enqueue(scn, ce_state, val);
1924*5113495bSYour Name 	}
1925*5113495bSYour Name }
1926*5113495bSYour Name #else
hif_delayed_reg_write_init(struct hif_softc * scn)1927*5113495bSYour Name static inline QDF_STATUS hif_delayed_reg_write_init(struct hif_softc *scn)
1928*5113495bSYour Name {
1929*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
1930*5113495bSYour Name }
1931*5113495bSYour Name 
hif_delayed_reg_write_deinit(struct hif_softc * scn)1932*5113495bSYour Name static inline void  hif_delayed_reg_write_deinit(struct hif_softc *scn)
1933*5113495bSYour Name {
1934*5113495bSYour Name }
1935*5113495bSYour Name #endif
1936*5113495bSYour Name 
1937*5113495bSYour Name #if defined(QCA_WIFI_WCN6450)
hif_hal_attach(struct hif_softc * scn)1938*5113495bSYour Name static QDF_STATUS hif_hal_attach(struct hif_softc *scn)
1939*5113495bSYour Name {
1940*5113495bSYour Name 	scn->hal_soc = hal_attach(hif_softc_to_hif_opaque_softc(scn),
1941*5113495bSYour Name 				  scn->qdf_dev);
1942*5113495bSYour Name 	if (!scn->hal_soc)
1943*5113495bSYour Name 		return QDF_STATUS_E_FAILURE;
1944*5113495bSYour Name 
1945*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
1946*5113495bSYour Name }
1947*5113495bSYour Name 
hif_hal_detach(struct hif_softc * scn)1948*5113495bSYour Name static QDF_STATUS hif_hal_detach(struct hif_softc *scn)
1949*5113495bSYour Name {
1950*5113495bSYour Name 	hal_detach(scn->hal_soc);
1951*5113495bSYour Name 	scn->hal_soc = NULL;
1952*5113495bSYour Name 
1953*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
1954*5113495bSYour Name }
1955*5113495bSYour Name #elif (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
1956*5113495bSYour Name 	defined(QCA_WIFI_QCA6290) || defined(QCA_WIFI_QCA6390) || \
1957*5113495bSYour Name 	defined(QCA_WIFI_QCN9000) || defined(QCA_WIFI_QCA6490) || \
1958*5113495bSYour Name 	defined(QCA_WIFI_QCA6750) || defined(QCA_WIFI_QCA5018) || \
1959*5113495bSYour Name 	defined(QCA_WIFI_KIWI) || defined(QCA_WIFI_QCN9224) || \
1960*5113495bSYour Name 	defined(QCA_WIFI_QCA9574)) || defined(QCA_WIFI_QCA5332)
hif_hal_attach(struct hif_softc * scn)1961*5113495bSYour Name static QDF_STATUS hif_hal_attach(struct hif_softc *scn)
1962*5113495bSYour Name {
1963*5113495bSYour Name 	if (ce_srng_based(scn)) {
1964*5113495bSYour Name 		scn->hal_soc = hal_attach(
1965*5113495bSYour Name 					hif_softc_to_hif_opaque_softc(scn),
1966*5113495bSYour Name 					scn->qdf_dev);
1967*5113495bSYour Name 		if (!scn->hal_soc)
1968*5113495bSYour Name 			return QDF_STATUS_E_FAILURE;
1969*5113495bSYour Name 	}
1970*5113495bSYour Name 
1971*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
1972*5113495bSYour Name }
1973*5113495bSYour Name 
hif_hal_detach(struct hif_softc * scn)1974*5113495bSYour Name static QDF_STATUS hif_hal_detach(struct hif_softc *scn)
1975*5113495bSYour Name {
1976*5113495bSYour Name 	if (ce_srng_based(scn)) {
1977*5113495bSYour Name 		hal_detach(scn->hal_soc);
1978*5113495bSYour Name 		scn->hal_soc = NULL;
1979*5113495bSYour Name 	}
1980*5113495bSYour Name 
1981*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
1982*5113495bSYour Name }
1983*5113495bSYour Name #else
hif_hal_attach(struct hif_softc * scn)1984*5113495bSYour Name static QDF_STATUS hif_hal_attach(struct hif_softc *scn)
1985*5113495bSYour Name {
1986*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
1987*5113495bSYour Name }
1988*5113495bSYour Name 
hif_hal_detach(struct hif_softc * scn)1989*5113495bSYour Name static QDF_STATUS hif_hal_detach(struct hif_softc *scn)
1990*5113495bSYour Name {
1991*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
1992*5113495bSYour Name }
1993*5113495bSYour Name #endif
1994*5113495bSYour Name 
hif_init_dma_mask(struct device * dev,enum qdf_bus_type bus_type)1995*5113495bSYour Name int hif_init_dma_mask(struct device *dev, enum qdf_bus_type bus_type)
1996*5113495bSYour Name {
1997*5113495bSYour Name 	int ret;
1998*5113495bSYour Name 
1999*5113495bSYour Name 	switch (bus_type) {
2000*5113495bSYour Name 	case QDF_BUS_TYPE_IPCI:
2001*5113495bSYour Name 		ret = qdf_set_dma_coherent_mask(dev,
2002*5113495bSYour Name 						DMA_COHERENT_MASK_DEFAULT);
2003*5113495bSYour Name 		if (ret) {
2004*5113495bSYour Name 			hif_err("Failed to set dma mask error = %d", ret);
2005*5113495bSYour Name 			return ret;
2006*5113495bSYour Name 		}
2007*5113495bSYour Name 
2008*5113495bSYour Name 		break;
2009*5113495bSYour Name 	default:
2010*5113495bSYour Name 		/* Follow the existing sequence for other targets */
2011*5113495bSYour Name 		break;
2012*5113495bSYour Name 	}
2013*5113495bSYour Name 
2014*5113495bSYour Name 	return 0;
2015*5113495bSYour Name }
2016*5113495bSYour Name 
2017*5113495bSYour Name /**
2018*5113495bSYour Name  * hif_enable(): hif_enable
2019*5113495bSYour Name  * @hif_ctx: hif_ctx
2020*5113495bSYour Name  * @dev: dev
2021*5113495bSYour Name  * @bdev: bus dev
2022*5113495bSYour Name  * @bid: bus ID
2023*5113495bSYour Name  * @bus_type: bus type
2024*5113495bSYour Name  * @type: enable type
2025*5113495bSYour Name  *
2026*5113495bSYour Name  * Return: QDF_STATUS
2027*5113495bSYour Name  */
hif_enable(struct hif_opaque_softc * hif_ctx,struct device * dev,void * bdev,const struct hif_bus_id * bid,enum qdf_bus_type bus_type,enum hif_enable_type type)2028*5113495bSYour Name QDF_STATUS hif_enable(struct hif_opaque_softc *hif_ctx, struct device *dev,
2029*5113495bSYour Name 					  void *bdev,
2030*5113495bSYour Name 					  const struct hif_bus_id *bid,
2031*5113495bSYour Name 					  enum qdf_bus_type bus_type,
2032*5113495bSYour Name 					  enum hif_enable_type type)
2033*5113495bSYour Name {
2034*5113495bSYour Name 	QDF_STATUS status;
2035*5113495bSYour Name 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2036*5113495bSYour Name 
2037*5113495bSYour Name 	if (!scn) {
2038*5113495bSYour Name 		hif_err("hif_ctx = NULL");
2039*5113495bSYour Name 		return QDF_STATUS_E_NULL_VALUE;
2040*5113495bSYour Name 	}
2041*5113495bSYour Name 
2042*5113495bSYour Name 	status = hif_enable_bus(scn, dev, bdev, bid, type);
2043*5113495bSYour Name 	if (status != QDF_STATUS_SUCCESS) {
2044*5113495bSYour Name 		hif_err("hif_enable_bus error = %d", status);
2045*5113495bSYour Name 		return status;
2046*5113495bSYour Name 	}
2047*5113495bSYour Name 
2048*5113495bSYour Name 	status = hif_hal_attach(scn);
2049*5113495bSYour Name 	if (status != QDF_STATUS_SUCCESS) {
2050*5113495bSYour Name 		hif_err("hal attach failed");
2051*5113495bSYour Name 		goto disable_bus;
2052*5113495bSYour Name 	}
2053*5113495bSYour Name 
2054*5113495bSYour Name 	if (hif_delayed_reg_write_init(scn) != QDF_STATUS_SUCCESS) {
2055*5113495bSYour Name 		hif_err("unable to initialize delayed reg write");
2056*5113495bSYour Name 		goto hal_detach;
2057*5113495bSYour Name 	}
2058*5113495bSYour Name 
2059*5113495bSYour Name 	if (hif_bus_configure(scn)) {
2060*5113495bSYour Name 		hif_err("Target probe failed");
2061*5113495bSYour Name 		status = QDF_STATUS_E_FAILURE;
2062*5113495bSYour Name 		goto hal_detach;
2063*5113495bSYour Name 	}
2064*5113495bSYour Name 
2065*5113495bSYour Name 	hif_ut_suspend_init(scn);
2066*5113495bSYour Name 	hif_register_recovery_notifier(scn);
2067*5113495bSYour Name 	hif_latency_detect_timer_start(hif_ctx);
2068*5113495bSYour Name 
2069*5113495bSYour Name 	/*
2070*5113495bSYour Name 	 * Flag to avoid potential unallocated memory access from MSI
2071*5113495bSYour Name 	 * interrupt handler which could get scheduled as soon as MSI
2072*5113495bSYour Name 	 * is enabled, i.e to take care of the race due to the order
2073*5113495bSYour Name 	 * in where MSI is enabled before the memory, that will be
2074*5113495bSYour Name 	 * in interrupt handlers, is allocated.
2075*5113495bSYour Name 	 */
2076*5113495bSYour Name 
2077*5113495bSYour Name 	scn->hif_init_done = true;
2078*5113495bSYour Name 
2079*5113495bSYour Name 	hif_debug("OK");
2080*5113495bSYour Name 
2081*5113495bSYour Name 	return QDF_STATUS_SUCCESS;
2082*5113495bSYour Name 
2083*5113495bSYour Name hal_detach:
2084*5113495bSYour Name 	hif_hal_detach(scn);
2085*5113495bSYour Name disable_bus:
2086*5113495bSYour Name 	hif_disable_bus(scn);
2087*5113495bSYour Name 	return status;
2088*5113495bSYour Name }
2089*5113495bSYour Name 
hif_disable(struct hif_opaque_softc * hif_ctx,enum hif_disable_type type)2090*5113495bSYour Name void hif_disable(struct hif_opaque_softc *hif_ctx, enum hif_disable_type type)
2091*5113495bSYour Name {
2092*5113495bSYour Name 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2093*5113495bSYour Name 
2094*5113495bSYour Name 	if (!scn)
2095*5113495bSYour Name 		return;
2096*5113495bSYour Name 
2097*5113495bSYour Name 	hif_delayed_reg_write_deinit(scn);
2098*5113495bSYour Name 	hif_set_enable_detection(hif_ctx, false);
2099*5113495bSYour Name 	hif_latency_detect_timer_stop(hif_ctx);
2100*5113495bSYour Name 
2101*5113495bSYour Name 	hif_unregister_recovery_notifier(scn);
2102*5113495bSYour Name 
2103*5113495bSYour Name 	hif_nointrs(scn);
2104*5113495bSYour Name 	if (scn->hif_init_done == false)
2105*5113495bSYour Name 		hif_shutdown_device(hif_ctx);
2106*5113495bSYour Name 	else
2107*5113495bSYour Name 		hif_stop(hif_ctx);
2108*5113495bSYour Name 
2109*5113495bSYour Name 	hif_hal_detach(scn);
2110*5113495bSYour Name 
2111*5113495bSYour Name 	hif_disable_bus(scn);
2112*5113495bSYour Name 
2113*5113495bSYour Name 	hif_wlan_disable(scn);
2114*5113495bSYour Name 
2115*5113495bSYour Name 	scn->notice_send = false;
2116*5113495bSYour Name 
2117*5113495bSYour Name 	hif_debug("X");
2118*5113495bSYour Name }
2119*5113495bSYour Name 
2120*5113495bSYour Name #ifdef CE_TASKLET_DEBUG_ENABLE
hif_enable_ce_latency_stats(struct hif_opaque_softc * hif_ctx,uint8_t val)2121*5113495bSYour Name void hif_enable_ce_latency_stats(struct hif_opaque_softc *hif_ctx, uint8_t val)
2122*5113495bSYour Name {
2123*5113495bSYour Name 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2124*5113495bSYour Name 
2125*5113495bSYour Name 	if (!scn)
2126*5113495bSYour Name 		return;
2127*5113495bSYour Name 
2128*5113495bSYour Name 	scn->ce_latency_stats = val;
2129*5113495bSYour Name }
2130*5113495bSYour Name #endif
2131*5113495bSYour Name 
hif_display_stats(struct hif_opaque_softc * hif_ctx)2132*5113495bSYour Name void hif_display_stats(struct hif_opaque_softc *hif_ctx)
2133*5113495bSYour Name {
2134*5113495bSYour Name 	hif_display_bus_stats(hif_ctx);
2135*5113495bSYour Name }
2136*5113495bSYour Name 
2137*5113495bSYour Name qdf_export_symbol(hif_display_stats);
2138*5113495bSYour Name 
hif_clear_stats(struct hif_opaque_softc * hif_ctx)2139*5113495bSYour Name void hif_clear_stats(struct hif_opaque_softc *hif_ctx)
2140*5113495bSYour Name {
2141*5113495bSYour Name 	hif_clear_bus_stats(hif_ctx);
2142*5113495bSYour Name }
2143*5113495bSYour Name 
2144*5113495bSYour Name /**
2145*5113495bSYour Name  * hif_crash_shutdown_dump_bus_register() - dump bus registers
2146*5113495bSYour Name  * @hif_ctx: hif_ctx
2147*5113495bSYour Name  *
2148*5113495bSYour Name  * Return: n/a
2149*5113495bSYour Name  */
2150*5113495bSYour Name #if defined(TARGET_RAMDUMP_AFTER_KERNEL_PANIC) && defined(WLAN_FEATURE_BMI)
2151*5113495bSYour Name 
hif_crash_shutdown_dump_bus_register(void * hif_ctx)2152*5113495bSYour Name static void hif_crash_shutdown_dump_bus_register(void *hif_ctx)
2153*5113495bSYour Name {
2154*5113495bSYour Name 	struct hif_opaque_softc *scn = hif_ctx;
2155*5113495bSYour Name 
2156*5113495bSYour Name 	if (hif_check_soc_status(scn))
2157*5113495bSYour Name 		return;
2158*5113495bSYour Name 
2159*5113495bSYour Name 	if (hif_dump_registers(scn))
2160*5113495bSYour Name 		hif_err("Failed to dump bus registers!");
2161*5113495bSYour Name }
2162*5113495bSYour Name 
2163*5113495bSYour Name /**
2164*5113495bSYour Name  * hif_crash_shutdown(): hif_crash_shutdown
2165*5113495bSYour Name  *
2166*5113495bSYour Name  * This function is called by the platform driver to dump CE registers
2167*5113495bSYour Name  *
2168*5113495bSYour Name  * @hif_ctx: hif_ctx
2169*5113495bSYour Name  *
2170*5113495bSYour Name  * Return: n/a
2171*5113495bSYour Name  */
hif_crash_shutdown(struct hif_opaque_softc * hif_ctx)2172*5113495bSYour Name void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx)
2173*5113495bSYour Name {
2174*5113495bSYour Name 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2175*5113495bSYour Name 
2176*5113495bSYour Name 	if (!hif_ctx)
2177*5113495bSYour Name 		return;
2178*5113495bSYour Name 
2179*5113495bSYour Name 	if (scn->bus_type == QDF_BUS_TYPE_SNOC) {
2180*5113495bSYour Name 		hif_warn("RAM dump disabled for bustype %d", scn->bus_type);
2181*5113495bSYour Name 		return;
2182*5113495bSYour Name 	}
2183*5113495bSYour Name 
2184*5113495bSYour Name 	if (TARGET_STATUS_RESET == scn->target_status) {
2185*5113495bSYour Name 		hif_warn("Target is already asserted, ignore!");
2186*5113495bSYour Name 		return;
2187*5113495bSYour Name 	}
2188*5113495bSYour Name 
2189*5113495bSYour Name 	if (hif_is_load_or_unload_in_progress(scn)) {
2190*5113495bSYour Name 		hif_err("Load/unload is in progress, ignore!");
2191*5113495bSYour Name 		return;
2192*5113495bSYour Name 	}
2193*5113495bSYour Name 
2194*5113495bSYour Name 	hif_crash_shutdown_dump_bus_register(hif_ctx);
2195*5113495bSYour Name 	hif_set_target_status(hif_ctx, TARGET_STATUS_RESET);
2196*5113495bSYour Name 
2197*5113495bSYour Name 	if (ol_copy_ramdump(hif_ctx))
2198*5113495bSYour Name 		goto out;
2199*5113495bSYour Name 
2200*5113495bSYour Name 	hif_info("RAM dump collecting completed!");
2201*5113495bSYour Name 
2202*5113495bSYour Name out:
2203*5113495bSYour Name 	return;
2204*5113495bSYour Name }
2205*5113495bSYour Name #else
hif_crash_shutdown(struct hif_opaque_softc * hif_ctx)2206*5113495bSYour Name void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx)
2207*5113495bSYour Name {
2208*5113495bSYour Name 	hif_debug("Collecting target RAM dump disabled");
2209*5113495bSYour Name }
2210*5113495bSYour Name #endif /* TARGET_RAMDUMP_AFTER_KERNEL_PANIC */
2211*5113495bSYour Name 
2212*5113495bSYour Name #ifdef QCA_WIFI_3_0
2213*5113495bSYour Name /**
2214*5113495bSYour Name  * hif_check_fw_reg(): hif_check_fw_reg
2215*5113495bSYour Name  * @scn: scn
2216*5113495bSYour Name  *
2217*5113495bSYour Name  * Return: int
2218*5113495bSYour Name  */
hif_check_fw_reg(struct hif_opaque_softc * scn)2219*5113495bSYour Name int hif_check_fw_reg(struct hif_opaque_softc *scn)
2220*5113495bSYour Name {
2221*5113495bSYour Name 	return 0;
2222*5113495bSYour Name }
2223*5113495bSYour Name #endif
2224*5113495bSYour Name 
2225*5113495bSYour Name /**
2226*5113495bSYour Name  * hif_read_phy_mem_base(): hif_read_phy_mem_base
2227*5113495bSYour Name  * @scn: scn
2228*5113495bSYour Name  * @phy_mem_base: physical mem base
2229*5113495bSYour Name  *
2230*5113495bSYour Name  * Return: n/a
2231*5113495bSYour Name  */
hif_read_phy_mem_base(struct hif_softc * scn,qdf_dma_addr_t * phy_mem_base)2232*5113495bSYour Name void hif_read_phy_mem_base(struct hif_softc *scn, qdf_dma_addr_t *phy_mem_base)
2233*5113495bSYour Name {
2234*5113495bSYour Name 	*phy_mem_base = scn->mem_pa;
2235*5113495bSYour Name }
2236*5113495bSYour Name qdf_export_symbol(hif_read_phy_mem_base);
2237*5113495bSYour Name 
2238*5113495bSYour Name /**
2239*5113495bSYour Name  * hif_get_device_type(): hif_get_device_type
2240*5113495bSYour Name  * @device_id: device_id
2241*5113495bSYour Name  * @revision_id: revision_id
2242*5113495bSYour Name  * @hif_type: returned hif_type
2243*5113495bSYour Name  * @target_type: returned target_type
2244*5113495bSYour Name  *
2245*5113495bSYour Name  * Return: int
2246*5113495bSYour Name  */
hif_get_device_type(uint32_t device_id,uint32_t revision_id,uint32_t * hif_type,uint32_t * target_type)2247*5113495bSYour Name int hif_get_device_type(uint32_t device_id,
2248*5113495bSYour Name 			uint32_t revision_id,
2249*5113495bSYour Name 			uint32_t *hif_type, uint32_t *target_type)
2250*5113495bSYour Name {
2251*5113495bSYour Name 	int ret = 0;
2252*5113495bSYour Name 
2253*5113495bSYour Name 	switch (device_id) {
2254*5113495bSYour Name 	case ADRASTEA_DEVICE_ID_P2_E12:
2255*5113495bSYour Name 
2256*5113495bSYour Name 		*hif_type = HIF_TYPE_ADRASTEA;
2257*5113495bSYour Name 		*target_type = TARGET_TYPE_ADRASTEA;
2258*5113495bSYour Name 		break;
2259*5113495bSYour Name 
2260*5113495bSYour Name 	case AR9888_DEVICE_ID:
2261*5113495bSYour Name 		*hif_type = HIF_TYPE_AR9888;
2262*5113495bSYour Name 		*target_type = TARGET_TYPE_AR9888;
2263*5113495bSYour Name 		break;
2264*5113495bSYour Name 
2265*5113495bSYour Name 	case AR6320_DEVICE_ID:
2266*5113495bSYour Name 		switch (revision_id) {
2267*5113495bSYour Name 		case AR6320_FW_1_1:
2268*5113495bSYour Name 		case AR6320_FW_1_3:
2269*5113495bSYour Name 			*hif_type = HIF_TYPE_AR6320;
2270*5113495bSYour Name 			*target_type = TARGET_TYPE_AR6320;
2271*5113495bSYour Name 			break;
2272*5113495bSYour Name 
2273*5113495bSYour Name 		case AR6320_FW_2_0:
2274*5113495bSYour Name 		case AR6320_FW_3_0:
2275*5113495bSYour Name 		case AR6320_FW_3_2:
2276*5113495bSYour Name 			*hif_type = HIF_TYPE_AR6320V2;
2277*5113495bSYour Name 			*target_type = TARGET_TYPE_AR6320V2;
2278*5113495bSYour Name 			break;
2279*5113495bSYour Name 
2280*5113495bSYour Name 		default:
2281*5113495bSYour Name 			hif_err("dev_id = 0x%x, rev_id = 0x%x",
2282*5113495bSYour Name 				device_id, revision_id);
2283*5113495bSYour Name 			ret = -ENODEV;
2284*5113495bSYour Name 			goto end;
2285*5113495bSYour Name 		}
2286*5113495bSYour Name 		break;
2287*5113495bSYour Name 
2288*5113495bSYour Name 	case AR9887_DEVICE_ID:
2289*5113495bSYour Name 		*hif_type = HIF_TYPE_AR9888;
2290*5113495bSYour Name 		*target_type = TARGET_TYPE_AR9888;
2291*5113495bSYour Name 		hif_info(" *********** AR9887 **************");
2292*5113495bSYour Name 		break;
2293*5113495bSYour Name 
2294*5113495bSYour Name 	case QCA9984_DEVICE_ID:
2295*5113495bSYour Name 		*hif_type = HIF_TYPE_QCA9984;
2296*5113495bSYour Name 		*target_type = TARGET_TYPE_QCA9984;
2297*5113495bSYour Name 		hif_info(" *********** QCA9984 *************");
2298*5113495bSYour Name 		break;
2299*5113495bSYour Name 
2300*5113495bSYour Name 	case QCA9888_DEVICE_ID:
2301*5113495bSYour Name 		*hif_type = HIF_TYPE_QCA9888;
2302*5113495bSYour Name 		*target_type = TARGET_TYPE_QCA9888;
2303*5113495bSYour Name 		hif_info(" *********** QCA9888 *************");
2304*5113495bSYour Name 		break;
2305*5113495bSYour Name 
2306*5113495bSYour Name 	case AR900B_DEVICE_ID:
2307*5113495bSYour Name 		*hif_type = HIF_TYPE_AR900B;
2308*5113495bSYour Name 		*target_type = TARGET_TYPE_AR900B;
2309*5113495bSYour Name 		hif_info(" *********** AR900B *************");
2310*5113495bSYour Name 		break;
2311*5113495bSYour Name 
2312*5113495bSYour Name 	case QCA8074_DEVICE_ID:
2313*5113495bSYour Name 		*hif_type = HIF_TYPE_QCA8074;
2314*5113495bSYour Name 		*target_type = TARGET_TYPE_QCA8074;
2315*5113495bSYour Name 		hif_info(" *********** QCA8074  *************");
2316*5113495bSYour Name 		break;
2317*5113495bSYour Name 
2318*5113495bSYour Name 	case QCA6290_EMULATION_DEVICE_ID:
2319*5113495bSYour Name 	case QCA6290_DEVICE_ID:
2320*5113495bSYour Name 		*hif_type = HIF_TYPE_QCA6290;
2321*5113495bSYour Name 		*target_type = TARGET_TYPE_QCA6290;
2322*5113495bSYour Name 		hif_info(" *********** QCA6290EMU *************");
2323*5113495bSYour Name 		break;
2324*5113495bSYour Name 
2325*5113495bSYour Name 	case QCN9000_DEVICE_ID:
2326*5113495bSYour Name 		*hif_type = HIF_TYPE_QCN9000;
2327*5113495bSYour Name 		*target_type = TARGET_TYPE_QCN9000;
2328*5113495bSYour Name 		hif_info(" *********** QCN9000 *************");
2329*5113495bSYour Name 		break;
2330*5113495bSYour Name 
2331*5113495bSYour Name 	case QCN9224_DEVICE_ID:
2332*5113495bSYour Name 		*hif_type = HIF_TYPE_QCN9224;
2333*5113495bSYour Name 		*target_type = TARGET_TYPE_QCN9224;
2334*5113495bSYour Name 		hif_info(" *********** QCN9224 *************");
2335*5113495bSYour Name 		break;
2336*5113495bSYour Name 
2337*5113495bSYour Name 	case QCN6122_DEVICE_ID:
2338*5113495bSYour Name 		*hif_type = HIF_TYPE_QCN6122;
2339*5113495bSYour Name 		*target_type = TARGET_TYPE_QCN6122;
2340*5113495bSYour Name 		hif_info(" *********** QCN6122 *************");
2341*5113495bSYour Name 		break;
2342*5113495bSYour Name 
2343*5113495bSYour Name 	case QCN9160_DEVICE_ID:
2344*5113495bSYour Name 		*hif_type = HIF_TYPE_QCN9160;
2345*5113495bSYour Name 		*target_type = TARGET_TYPE_QCN9160;
2346*5113495bSYour Name 		hif_info(" *********** QCN9160 *************");
2347*5113495bSYour Name 		break;
2348*5113495bSYour Name 
2349*5113495bSYour Name 	case QCN6432_DEVICE_ID:
2350*5113495bSYour Name 		*hif_type = HIF_TYPE_QCN6432;
2351*5113495bSYour Name 		*target_type = TARGET_TYPE_QCN6432;
2352*5113495bSYour Name 		hif_info(" *********** QCN6432 *************");
2353*5113495bSYour Name 		break;
2354*5113495bSYour Name 
2355*5113495bSYour Name 	case QCN7605_DEVICE_ID:
2356*5113495bSYour Name 	case QCN7605_COMPOSITE:
2357*5113495bSYour Name 	case QCN7605_STANDALONE:
2358*5113495bSYour Name 	case QCN7605_STANDALONE_V2:
2359*5113495bSYour Name 	case QCN7605_COMPOSITE_V2:
2360*5113495bSYour Name 		*hif_type = HIF_TYPE_QCN7605;
2361*5113495bSYour Name 		*target_type = TARGET_TYPE_QCN7605;
2362*5113495bSYour Name 		hif_info(" *********** QCN7605 *************");
2363*5113495bSYour Name 		break;
2364*5113495bSYour Name 
2365*5113495bSYour Name 	case QCA6390_DEVICE_ID:
2366*5113495bSYour Name 	case QCA6390_EMULATION_DEVICE_ID:
2367*5113495bSYour Name 		*hif_type = HIF_TYPE_QCA6390;
2368*5113495bSYour Name 		*target_type = TARGET_TYPE_QCA6390;
2369*5113495bSYour Name 		hif_info(" *********** QCA6390 *************");
2370*5113495bSYour Name 		break;
2371*5113495bSYour Name 
2372*5113495bSYour Name 	case QCA6490_DEVICE_ID:
2373*5113495bSYour Name 	case QCA6490_EMULATION_DEVICE_ID:
2374*5113495bSYour Name 		*hif_type = HIF_TYPE_QCA6490;
2375*5113495bSYour Name 		*target_type = TARGET_TYPE_QCA6490;
2376*5113495bSYour Name 		hif_info(" *********** QCA6490 *************");
2377*5113495bSYour Name 		break;
2378*5113495bSYour Name 
2379*5113495bSYour Name 	case QCA6750_DEVICE_ID:
2380*5113495bSYour Name 	case QCA6750_EMULATION_DEVICE_ID:
2381*5113495bSYour Name 		*hif_type = HIF_TYPE_QCA6750;
2382*5113495bSYour Name 		*target_type = TARGET_TYPE_QCA6750;
2383*5113495bSYour Name 		hif_info(" *********** QCA6750 *************");
2384*5113495bSYour Name 		break;
2385*5113495bSYour Name 
2386*5113495bSYour Name 	case KIWI_DEVICE_ID:
2387*5113495bSYour Name 		*hif_type = HIF_TYPE_KIWI;
2388*5113495bSYour Name 		*target_type = TARGET_TYPE_KIWI;
2389*5113495bSYour Name 		hif_info(" *********** KIWI *************");
2390*5113495bSYour Name 		break;
2391*5113495bSYour Name 
2392*5113495bSYour Name 	case MANGO_DEVICE_ID:
2393*5113495bSYour Name 		*hif_type = HIF_TYPE_MANGO;
2394*5113495bSYour Name 		*target_type = TARGET_TYPE_MANGO;
2395*5113495bSYour Name 		hif_info(" *********** MANGO *************");
2396*5113495bSYour Name 		break;
2397*5113495bSYour Name 
2398*5113495bSYour Name 	case PEACH_DEVICE_ID:
2399*5113495bSYour Name 		*hif_type = HIF_TYPE_PEACH;
2400*5113495bSYour Name 		*target_type = TARGET_TYPE_PEACH;
2401*5113495bSYour Name 		hif_info(" *********** PEACH *************");
2402*5113495bSYour Name 		break;
2403*5113495bSYour Name 
2404*5113495bSYour Name 	case QCA8074V2_DEVICE_ID:
2405*5113495bSYour Name 		*hif_type = HIF_TYPE_QCA8074V2;
2406*5113495bSYour Name 		*target_type = TARGET_TYPE_QCA8074V2;
2407*5113495bSYour Name 		hif_info(" *********** QCA8074V2 *************");
2408*5113495bSYour Name 		break;
2409*5113495bSYour Name 
2410*5113495bSYour Name 	case QCA6018_DEVICE_ID:
2411*5113495bSYour Name 	case RUMIM2M_DEVICE_ID_NODE0:
2412*5113495bSYour Name 	case RUMIM2M_DEVICE_ID_NODE1:
2413*5113495bSYour Name 	case RUMIM2M_DEVICE_ID_NODE2:
2414*5113495bSYour Name 	case RUMIM2M_DEVICE_ID_NODE3:
2415*5113495bSYour Name 	case RUMIM2M_DEVICE_ID_NODE4:
2416*5113495bSYour Name 	case RUMIM2M_DEVICE_ID_NODE5:
2417*5113495bSYour Name 		*hif_type = HIF_TYPE_QCA6018;
2418*5113495bSYour Name 		*target_type = TARGET_TYPE_QCA6018;
2419*5113495bSYour Name 		hif_info(" *********** QCA6018 *************");
2420*5113495bSYour Name 		break;
2421*5113495bSYour Name 
2422*5113495bSYour Name 	case QCA5018_DEVICE_ID:
2423*5113495bSYour Name 		*hif_type = HIF_TYPE_QCA5018;
2424*5113495bSYour Name 		*target_type = TARGET_TYPE_QCA5018;
2425*5113495bSYour Name 		hif_info(" *********** qca5018 *************");
2426*5113495bSYour Name 		break;
2427*5113495bSYour Name 
2428*5113495bSYour Name 	case QCA5332_DEVICE_ID:
2429*5113495bSYour Name 		*hif_type = HIF_TYPE_QCA5332;
2430*5113495bSYour Name 		*target_type = TARGET_TYPE_QCA5332;
2431*5113495bSYour Name 		hif_info(" *********** QCA5332 *************");
2432*5113495bSYour Name 		break;
2433*5113495bSYour Name 
2434*5113495bSYour Name 	case QCA9574_DEVICE_ID:
2435*5113495bSYour Name 		*hif_type = HIF_TYPE_QCA9574;
2436*5113495bSYour Name 		*target_type = TARGET_TYPE_QCA9574;
2437*5113495bSYour Name 		hif_info(" *********** QCA9574 *************");
2438*5113495bSYour Name 		break;
2439*5113495bSYour Name 
2440*5113495bSYour Name 	case WCN6450_DEVICE_ID:
2441*5113495bSYour Name 		*hif_type = HIF_TYPE_WCN6450;
2442*5113495bSYour Name 		*target_type = TARGET_TYPE_WCN6450;
2443*5113495bSYour Name 		hif_info(" *********** WCN6450 *************");
2444*5113495bSYour Name 		break;
2445*5113495bSYour Name 
2446*5113495bSYour Name 	default:
2447*5113495bSYour Name 		hif_err("Unsupported device ID = 0x%x!", device_id);
2448*5113495bSYour Name 		ret = -ENODEV;
2449*5113495bSYour Name 		break;
2450*5113495bSYour Name 	}
2451*5113495bSYour Name 
2452*5113495bSYour Name 	if (*target_type == TARGET_TYPE_UNKNOWN) {
2453*5113495bSYour Name 		hif_err("Unsupported target_type!");
2454*5113495bSYour Name 		ret = -ENODEV;
2455*5113495bSYour Name 	}
2456*5113495bSYour Name end:
2457*5113495bSYour Name 	return ret;
2458*5113495bSYour Name }
2459*5113495bSYour Name 
2460*5113495bSYour Name /**
2461*5113495bSYour Name  * hif_get_bus_type() - return the bus type
2462*5113495bSYour Name  * @hif_hdl: HIF Context
2463*5113495bSYour Name  *
2464*5113495bSYour Name  * Return: enum qdf_bus_type
2465*5113495bSYour Name  */
hif_get_bus_type(struct hif_opaque_softc * hif_hdl)2466*5113495bSYour Name enum qdf_bus_type hif_get_bus_type(struct hif_opaque_softc *hif_hdl)
2467*5113495bSYour Name {
2468*5113495bSYour Name 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
2469*5113495bSYour Name 
2470*5113495bSYour Name 	return scn->bus_type;
2471*5113495bSYour Name }
2472*5113495bSYour Name 
2473*5113495bSYour Name /*
2474*5113495bSYour Name  * Target info and ini parameters are global to the driver
2475*5113495bSYour Name  * Hence these structures are exposed to all the modules in
2476*5113495bSYour Name  * the driver and they don't need to maintains multiple copies
2477*5113495bSYour Name  * of the same info, instead get the handle from hif and
2478*5113495bSYour Name  * modify them in hif
2479*5113495bSYour Name  */
2480*5113495bSYour Name 
2481*5113495bSYour Name /**
2482*5113495bSYour Name  * hif_get_ini_handle() - API to get hif_config_param handle
2483*5113495bSYour Name  * @hif_ctx: HIF Context
2484*5113495bSYour Name  *
2485*5113495bSYour Name  * Return: pointer to hif_config_info
2486*5113495bSYour Name  */
hif_get_ini_handle(struct hif_opaque_softc * hif_ctx)2487*5113495bSYour Name struct hif_config_info *hif_get_ini_handle(struct hif_opaque_softc *hif_ctx)
2488*5113495bSYour Name {
2489*5113495bSYour Name 	struct hif_softc *sc = HIF_GET_SOFTC(hif_ctx);
2490*5113495bSYour Name 
2491*5113495bSYour Name 	return &sc->hif_config;
2492*5113495bSYour Name }
2493*5113495bSYour Name 
2494*5113495bSYour Name /**
2495*5113495bSYour Name  * hif_get_target_info_handle() - API to get hif_target_info handle
2496*5113495bSYour Name  * @hif_ctx: HIF context
2497*5113495bSYour Name  *
2498*5113495bSYour Name  * Return: Pointer to hif_target_info
2499*5113495bSYour Name  */
hif_get_target_info_handle(struct hif_opaque_softc * hif_ctx)2500*5113495bSYour Name struct hif_target_info *hif_get_target_info_handle(
2501*5113495bSYour Name 					struct hif_opaque_softc *hif_ctx)
2502*5113495bSYour Name {
2503*5113495bSYour Name 	struct hif_softc *sc = HIF_GET_SOFTC(hif_ctx);
2504*5113495bSYour Name 
2505*5113495bSYour Name 	return &sc->target_info;
2506*5113495bSYour Name 
2507*5113495bSYour Name }
2508*5113495bSYour Name qdf_export_symbol(hif_get_target_info_handle);
2509*5113495bSYour Name 
2510*5113495bSYour Name #ifdef RECEIVE_OFFLOAD
hif_offld_flush_cb_register(struct hif_opaque_softc * scn,void (offld_flush_handler)(void *))2511*5113495bSYour Name void hif_offld_flush_cb_register(struct hif_opaque_softc *scn,
2512*5113495bSYour Name 				 void (offld_flush_handler)(void *))
2513*5113495bSYour Name {
2514*5113495bSYour Name 	if (hif_napi_enabled(scn, -1))
2515*5113495bSYour Name 		hif_napi_rx_offld_flush_cb_register(scn, offld_flush_handler);
2516*5113495bSYour Name 	else
2517*5113495bSYour Name 		hif_err("NAPI not enabled");
2518*5113495bSYour Name }
2519*5113495bSYour Name qdf_export_symbol(hif_offld_flush_cb_register);
2520*5113495bSYour Name 
hif_offld_flush_cb_deregister(struct hif_opaque_softc * scn)2521*5113495bSYour Name void hif_offld_flush_cb_deregister(struct hif_opaque_softc *scn)
2522*5113495bSYour Name {
2523*5113495bSYour Name 	if (hif_napi_enabled(scn, -1))
2524*5113495bSYour Name 		hif_napi_rx_offld_flush_cb_deregister(scn);
2525*5113495bSYour Name 	else
2526*5113495bSYour Name 		hif_err("NAPI not enabled");
2527*5113495bSYour Name }
2528*5113495bSYour Name qdf_export_symbol(hif_offld_flush_cb_deregister);
2529*5113495bSYour Name 
hif_get_rx_ctx_id(int ctx_id,struct hif_opaque_softc * hif_hdl)2530*5113495bSYour Name int hif_get_rx_ctx_id(int ctx_id, struct hif_opaque_softc *hif_hdl)
2531*5113495bSYour Name {
2532*5113495bSYour Name 	if (hif_napi_enabled(hif_hdl, -1))
2533*5113495bSYour Name 		return NAPI_PIPE2ID(ctx_id);
2534*5113495bSYour Name 	else
2535*5113495bSYour Name 		return ctx_id;
2536*5113495bSYour Name }
2537*5113495bSYour Name #else /* RECEIVE_OFFLOAD */
hif_get_rx_ctx_id(int ctx_id,struct hif_opaque_softc * hif_hdl)2538*5113495bSYour Name int hif_get_rx_ctx_id(int ctx_id, struct hif_opaque_softc *hif_hdl)
2539*5113495bSYour Name {
2540*5113495bSYour Name 	return 0;
2541*5113495bSYour Name }
2542*5113495bSYour Name qdf_export_symbol(hif_get_rx_ctx_id);
2543*5113495bSYour Name #endif /* RECEIVE_OFFLOAD */
2544*5113495bSYour Name 
2545*5113495bSYour Name #if defined(FEATURE_LRO)
2546*5113495bSYour Name 
2547*5113495bSYour Name /**
2548*5113495bSYour Name  * hif_get_lro_info - Returns LRO instance for instance ID
2549*5113495bSYour Name  * @ctx_id: LRO instance ID
2550*5113495bSYour Name  * @hif_hdl: HIF Context
2551*5113495bSYour Name  *
2552*5113495bSYour Name  * Return: Pointer to LRO instance.
2553*5113495bSYour Name  */
hif_get_lro_info(int ctx_id,struct hif_opaque_softc * hif_hdl)2554*5113495bSYour Name void *hif_get_lro_info(int ctx_id, struct hif_opaque_softc *hif_hdl)
2555*5113495bSYour Name {
2556*5113495bSYour Name 	void *data;
2557*5113495bSYour Name 
2558*5113495bSYour Name 	if (hif_napi_enabled(hif_hdl, -1))
2559*5113495bSYour Name 		data = hif_napi_get_lro_info(hif_hdl, ctx_id);
2560*5113495bSYour Name 	else
2561*5113495bSYour Name 		data = hif_ce_get_lro_ctx(hif_hdl, ctx_id);
2562*5113495bSYour Name 
2563*5113495bSYour Name 	return data;
2564*5113495bSYour Name }
2565*5113495bSYour Name #endif
2566*5113495bSYour Name 
2567*5113495bSYour Name /**
2568*5113495bSYour Name  * hif_get_target_status - API to get target status
2569*5113495bSYour Name  * @hif_ctx: HIF Context
2570*5113495bSYour Name  *
2571*5113495bSYour Name  * Return: enum hif_target_status
2572*5113495bSYour Name  */
hif_get_target_status(struct hif_opaque_softc * hif_ctx)2573*5113495bSYour Name enum hif_target_status hif_get_target_status(struct hif_opaque_softc *hif_ctx)
2574*5113495bSYour Name {
2575*5113495bSYour Name 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2576*5113495bSYour Name 
2577*5113495bSYour Name 	return scn->target_status;
2578*5113495bSYour Name }
2579*5113495bSYour Name qdf_export_symbol(hif_get_target_status);
2580*5113495bSYour Name 
2581*5113495bSYour Name /**
2582*5113495bSYour Name  * hif_set_target_status() - API to set target status
2583*5113495bSYour Name  * @hif_ctx: HIF Context
2584*5113495bSYour Name  * @status: Target Status
2585*5113495bSYour Name  *
2586*5113495bSYour Name  * Return: void
2587*5113495bSYour Name  */
hif_set_target_status(struct hif_opaque_softc * hif_ctx,enum hif_target_status status)2588*5113495bSYour Name void hif_set_target_status(struct hif_opaque_softc *hif_ctx, enum
2589*5113495bSYour Name 			   hif_target_status status)
2590*5113495bSYour Name {
2591*5113495bSYour Name 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2592*5113495bSYour Name 
2593*5113495bSYour Name 	scn->target_status = status;
2594*5113495bSYour Name }
2595*5113495bSYour Name 
2596*5113495bSYour Name /**
2597*5113495bSYour Name  * hif_init_ini_config() - API to initialize HIF configuration parameters
2598*5113495bSYour Name  * @hif_ctx: HIF Context
2599*5113495bSYour Name  * @cfg: HIF Configuration
2600*5113495bSYour Name  *
2601*5113495bSYour Name  * Return: void
2602*5113495bSYour Name  */
hif_init_ini_config(struct hif_opaque_softc * hif_ctx,struct hif_config_info * cfg)2603*5113495bSYour Name void hif_init_ini_config(struct hif_opaque_softc *hif_ctx,
2604*5113495bSYour Name 			 struct hif_config_info *cfg)
2605*5113495bSYour Name {
2606*5113495bSYour Name 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2607*5113495bSYour Name 
2608*5113495bSYour Name 	qdf_mem_copy(&scn->hif_config, cfg, sizeof(struct hif_config_info));
2609*5113495bSYour Name }
2610*5113495bSYour Name 
2611*5113495bSYour Name /**
2612*5113495bSYour Name  * hif_get_conparam() - API to get driver mode in HIF
2613*5113495bSYour Name  * @scn: HIF Context
2614*5113495bSYour Name  *
2615*5113495bSYour Name  * Return: driver mode of operation
2616*5113495bSYour Name  */
hif_get_conparam(struct hif_softc * scn)2617*5113495bSYour Name uint32_t hif_get_conparam(struct hif_softc *scn)
2618*5113495bSYour Name {
2619*5113495bSYour Name 	if (!scn)
2620*5113495bSYour Name 		return 0;
2621*5113495bSYour Name 
2622*5113495bSYour Name 	return scn->hif_con_param;
2623*5113495bSYour Name }
2624*5113495bSYour Name 
2625*5113495bSYour Name /**
2626*5113495bSYour Name  * hif_get_callbacks_handle() - API to get callbacks Handle
2627*5113495bSYour Name  * @scn: HIF Context
2628*5113495bSYour Name  *
2629*5113495bSYour Name  * Return: pointer to HIF Callbacks
2630*5113495bSYour Name  */
hif_get_callbacks_handle(struct hif_softc * scn)2631*5113495bSYour Name struct hif_driver_state_callbacks *hif_get_callbacks_handle(
2632*5113495bSYour Name 							struct hif_softc *scn)
2633*5113495bSYour Name {
2634*5113495bSYour Name 	return &scn->callbacks;
2635*5113495bSYour Name }
2636*5113495bSYour Name 
2637*5113495bSYour Name /**
2638*5113495bSYour Name  * hif_is_driver_unloading() - API to query upper layers if driver is unloading
2639*5113495bSYour Name  * @scn: HIF Context
2640*5113495bSYour Name  *
2641*5113495bSYour Name  * Return: True/False
2642*5113495bSYour Name  */
hif_is_driver_unloading(struct hif_softc * scn)2643*5113495bSYour Name bool hif_is_driver_unloading(struct hif_softc *scn)
2644*5113495bSYour Name {
2645*5113495bSYour Name 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
2646*5113495bSYour Name 
2647*5113495bSYour Name 	if (cbk && cbk->is_driver_unloading)
2648*5113495bSYour Name 		return cbk->is_driver_unloading(cbk->context);
2649*5113495bSYour Name 
2650*5113495bSYour Name 	return false;
2651*5113495bSYour Name }
2652*5113495bSYour Name 
2653*5113495bSYour Name /**
2654*5113495bSYour Name  * hif_is_load_or_unload_in_progress() - API to query upper layers if
2655*5113495bSYour Name  * load/unload in progress
2656*5113495bSYour Name  * @scn: HIF Context
2657*5113495bSYour Name  *
2658*5113495bSYour Name  * Return: True/False
2659*5113495bSYour Name  */
hif_is_load_or_unload_in_progress(struct hif_softc * scn)2660*5113495bSYour Name bool hif_is_load_or_unload_in_progress(struct hif_softc *scn)
2661*5113495bSYour Name {
2662*5113495bSYour Name 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
2663*5113495bSYour Name 
2664*5113495bSYour Name 	if (cbk && cbk->is_load_unload_in_progress)
2665*5113495bSYour Name 		return cbk->is_load_unload_in_progress(cbk->context);
2666*5113495bSYour Name 
2667*5113495bSYour Name 	return false;
2668*5113495bSYour Name }
2669*5113495bSYour Name 
2670*5113495bSYour Name /**
2671*5113495bSYour Name  * hif_is_recovery_in_progress() - API to query upper layers if recovery in
2672*5113495bSYour Name  * progress
2673*5113495bSYour Name  * @scn: HIF Context
2674*5113495bSYour Name  *
2675*5113495bSYour Name  * Return: True/False
2676*5113495bSYour Name  */
hif_is_recovery_in_progress(struct hif_softc * scn)2677*5113495bSYour Name bool hif_is_recovery_in_progress(struct hif_softc *scn)
2678*5113495bSYour Name {
2679*5113495bSYour Name 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
2680*5113495bSYour Name 
2681*5113495bSYour Name 	if (cbk && cbk->is_recovery_in_progress)
2682*5113495bSYour Name 		return cbk->is_recovery_in_progress(cbk->context);
2683*5113495bSYour Name 
2684*5113495bSYour Name 	return false;
2685*5113495bSYour Name }
2686*5113495bSYour Name 
2687*5113495bSYour Name #if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) || \
2688*5113495bSYour Name     defined(HIF_IPCI)
2689*5113495bSYour Name 
2690*5113495bSYour Name /**
2691*5113495bSYour Name  * hif_update_pipe_callback() - API to register pipe specific callbacks
2692*5113495bSYour Name  * @osc: Opaque softc
2693*5113495bSYour Name  * @pipeid: pipe id
2694*5113495bSYour Name  * @callbacks: callbacks to register
2695*5113495bSYour Name  *
2696*5113495bSYour Name  * Return: void
2697*5113495bSYour Name  */
2698*5113495bSYour Name 
hif_update_pipe_callback(struct hif_opaque_softc * osc,u_int8_t pipeid,struct hif_msg_callbacks * callbacks)2699*5113495bSYour Name void hif_update_pipe_callback(struct hif_opaque_softc *osc,
2700*5113495bSYour Name 					u_int8_t pipeid,
2701*5113495bSYour Name 					struct hif_msg_callbacks *callbacks)
2702*5113495bSYour Name {
2703*5113495bSYour Name 	struct hif_softc *scn = HIF_GET_SOFTC(osc);
2704*5113495bSYour Name 	struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
2705*5113495bSYour Name 	struct HIF_CE_pipe_info *pipe_info;
2706*5113495bSYour Name 
2707*5113495bSYour Name 	QDF_BUG(pipeid < CE_COUNT_MAX);
2708*5113495bSYour Name 
2709*5113495bSYour Name 	hif_debug("pipeid: %d", pipeid);
2710*5113495bSYour Name 
2711*5113495bSYour Name 	pipe_info = &hif_state->pipe_info[pipeid];
2712*5113495bSYour Name 
2713*5113495bSYour Name 	qdf_mem_copy(&pipe_info->pipe_callbacks,
2714*5113495bSYour Name 			callbacks, sizeof(pipe_info->pipe_callbacks));
2715*5113495bSYour Name }
2716*5113495bSYour Name qdf_export_symbol(hif_update_pipe_callback);
2717*5113495bSYour Name 
2718*5113495bSYour Name /**
2719*5113495bSYour Name  * hif_is_target_ready() - API to query if target is in ready state
2720*5113495bSYour Name  * progress
2721*5113495bSYour Name  * @scn: HIF Context
2722*5113495bSYour Name  *
2723*5113495bSYour Name  * Return: True/False
2724*5113495bSYour Name  */
hif_is_target_ready(struct hif_softc * scn)2725*5113495bSYour Name bool hif_is_target_ready(struct hif_softc *scn)
2726*5113495bSYour Name {
2727*5113495bSYour Name 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
2728*5113495bSYour Name 
2729*5113495bSYour Name 	if (cbk && cbk->is_target_ready)
2730*5113495bSYour Name 		return cbk->is_target_ready(cbk->context);
2731*5113495bSYour Name 	/*
2732*5113495bSYour Name 	 * if callback is not registered then there is no way to determine
2733*5113495bSYour Name 	 * if target is ready. In-such case return true to indicate that
2734*5113495bSYour Name 	 * target is ready.
2735*5113495bSYour Name 	 */
2736*5113495bSYour Name 	return true;
2737*5113495bSYour Name }
2738*5113495bSYour Name qdf_export_symbol(hif_is_target_ready);
2739*5113495bSYour Name 
hif_get_bandwidth_level(struct hif_opaque_softc * hif_handle)2740*5113495bSYour Name int hif_get_bandwidth_level(struct hif_opaque_softc *hif_handle)
2741*5113495bSYour Name {
2742*5113495bSYour Name 	struct hif_softc *scn = HIF_GET_SOFTC(hif_handle);
2743*5113495bSYour Name 	struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
2744*5113495bSYour Name 
2745*5113495bSYour Name 	if (cbk && cbk->get_bandwidth_level)
2746*5113495bSYour Name 		return cbk->get_bandwidth_level(cbk->context);
2747*5113495bSYour Name 
2748*5113495bSYour Name 	return 0;
2749*5113495bSYour Name }
2750*5113495bSYour Name 
2751*5113495bSYour Name qdf_export_symbol(hif_get_bandwidth_level);
2752*5113495bSYour Name 
2753*5113495bSYour Name #ifdef DP_MEM_PRE_ALLOC
hif_mem_alloc_consistent_unaligned(struct hif_softc * scn,qdf_size_t size,qdf_dma_addr_t * paddr,uint32_t ring_type,uint8_t * is_mem_prealloc)2754*5113495bSYour Name void *hif_mem_alloc_consistent_unaligned(struct hif_softc *scn,
2755*5113495bSYour Name 					 qdf_size_t size,
2756*5113495bSYour Name 					 qdf_dma_addr_t *paddr,
2757*5113495bSYour Name 					 uint32_t ring_type,
2758*5113495bSYour Name 					 uint8_t *is_mem_prealloc)
2759*5113495bSYour Name {
2760*5113495bSYour Name 	void *vaddr = NULL;
2761*5113495bSYour Name 	struct hif_driver_state_callbacks *cbk =
2762*5113495bSYour Name 				hif_get_callbacks_handle(scn);
2763*5113495bSYour Name 
2764*5113495bSYour Name 	*is_mem_prealloc = false;
2765*5113495bSYour Name 	if (cbk && cbk->prealloc_get_consistent_mem_unaligned) {
2766*5113495bSYour Name 		vaddr = cbk->prealloc_get_consistent_mem_unaligned(size,
2767*5113495bSYour Name 								   paddr,
2768*5113495bSYour Name 								   ring_type);
2769*5113495bSYour Name 		if (vaddr) {
2770*5113495bSYour Name 			*is_mem_prealloc = true;
2771*5113495bSYour Name 			goto end;
2772*5113495bSYour Name 		}
2773*5113495bSYour Name 	}
2774*5113495bSYour Name 
2775*5113495bSYour Name 	vaddr = qdf_mem_alloc_consistent(scn->qdf_dev,
2776*5113495bSYour Name 					 scn->qdf_dev->dev,
2777*5113495bSYour Name 					 size,
2778*5113495bSYour Name 					 paddr);
2779*5113495bSYour Name end:
2780*5113495bSYour Name 	dp_info("%s va_unaligned %pK pa_unaligned %pK size %d ring_type %d",
2781*5113495bSYour Name 		*is_mem_prealloc ? "pre-alloc" : "dynamic-alloc", vaddr,
2782*5113495bSYour Name 		(void *)*paddr, (int)size, ring_type);
2783*5113495bSYour Name 
2784*5113495bSYour Name 	return vaddr;
2785*5113495bSYour Name }
2786*5113495bSYour Name 
hif_mem_free_consistent_unaligned(struct hif_softc * scn,qdf_size_t size,void * vaddr,qdf_dma_addr_t paddr,qdf_dma_context_t memctx,uint8_t is_mem_prealloc)2787*5113495bSYour Name void hif_mem_free_consistent_unaligned(struct hif_softc *scn,
2788*5113495bSYour Name 				       qdf_size_t size,
2789*5113495bSYour Name 				       void *vaddr,
2790*5113495bSYour Name 				       qdf_dma_addr_t paddr,
2791*5113495bSYour Name 				       qdf_dma_context_t memctx,
2792*5113495bSYour Name 				       uint8_t is_mem_prealloc)
2793*5113495bSYour Name {
2794*5113495bSYour Name 	struct hif_driver_state_callbacks *cbk =
2795*5113495bSYour Name 				hif_get_callbacks_handle(scn);
2796*5113495bSYour Name 
2797*5113495bSYour Name 	if (is_mem_prealloc) {
2798*5113495bSYour Name 		if (cbk && cbk->prealloc_put_consistent_mem_unaligned) {
2799*5113495bSYour Name 			cbk->prealloc_put_consistent_mem_unaligned(vaddr);
2800*5113495bSYour Name 		} else {
2801*5113495bSYour Name 			dp_warn("dp_prealloc_put_consistent_unligned NULL");
2802*5113495bSYour Name 			QDF_BUG(0);
2803*5113495bSYour Name 		}
2804*5113495bSYour Name 	} else {
2805*5113495bSYour Name 		qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
2806*5113495bSYour Name 					size, vaddr, paddr, memctx);
2807*5113495bSYour Name 	}
2808*5113495bSYour Name }
2809*5113495bSYour Name 
hif_prealloc_get_multi_pages(struct hif_softc * scn,uint32_t desc_type,qdf_size_t elem_size,uint16_t elem_num,struct qdf_mem_multi_page_t * pages,bool cacheable)2810*5113495bSYour Name void hif_prealloc_get_multi_pages(struct hif_softc *scn, uint32_t desc_type,
2811*5113495bSYour Name 				  qdf_size_t elem_size, uint16_t elem_num,
2812*5113495bSYour Name 				  struct qdf_mem_multi_page_t *pages,
2813*5113495bSYour Name 				  bool cacheable)
2814*5113495bSYour Name {
2815*5113495bSYour Name 	struct hif_driver_state_callbacks *cbk =
2816*5113495bSYour Name 			hif_get_callbacks_handle(scn);
2817*5113495bSYour Name 
2818*5113495bSYour Name 	if (cbk && cbk->prealloc_get_multi_pages)
2819*5113495bSYour Name 		cbk->prealloc_get_multi_pages(desc_type, elem_size, elem_num,
2820*5113495bSYour Name 					      pages, cacheable);
2821*5113495bSYour Name 
2822*5113495bSYour Name 	if (!pages->num_pages)
2823*5113495bSYour Name 		qdf_mem_multi_pages_alloc(scn->qdf_dev, pages,
2824*5113495bSYour Name 					  elem_size, elem_num, 0, cacheable);
2825*5113495bSYour Name }
2826*5113495bSYour Name 
hif_prealloc_put_multi_pages(struct hif_softc * scn,uint32_t desc_type,struct qdf_mem_multi_page_t * pages,bool cacheable)2827*5113495bSYour Name void hif_prealloc_put_multi_pages(struct hif_softc *scn, uint32_t desc_type,
2828*5113495bSYour Name 				  struct qdf_mem_multi_page_t *pages,
2829*5113495bSYour Name 				  bool cacheable)
2830*5113495bSYour Name {
2831*5113495bSYour Name 	struct hif_driver_state_callbacks *cbk =
2832*5113495bSYour Name 			hif_get_callbacks_handle(scn);
2833*5113495bSYour Name 
2834*5113495bSYour Name 	if (cbk && cbk->prealloc_put_multi_pages &&
2835*5113495bSYour Name 	    pages->is_mem_prealloc)
2836*5113495bSYour Name 		cbk->prealloc_put_multi_pages(desc_type, pages);
2837*5113495bSYour Name 
2838*5113495bSYour Name 	if (!pages->is_mem_prealloc)
2839*5113495bSYour Name 		qdf_mem_multi_pages_free(scn->qdf_dev, pages, 0,
2840*5113495bSYour Name 					 cacheable);
2841*5113495bSYour Name }
2842*5113495bSYour Name #endif
2843*5113495bSYour Name 
2844*5113495bSYour Name /**
2845*5113495bSYour Name  * hif_batch_send() - API to access hif specific function
2846*5113495bSYour Name  * ce_batch_send.
2847*5113495bSYour Name  * @osc: HIF Context
2848*5113495bSYour Name  * @msdu: list of msdus to be sent
2849*5113495bSYour Name  * @transfer_id: transfer id
2850*5113495bSYour Name  * @len: downloaded length
2851*5113495bSYour Name  * @sendhead:
2852*5113495bSYour Name  *
2853*5113495bSYour Name  * Return: list of msds not sent
2854*5113495bSYour Name  */
hif_batch_send(struct hif_opaque_softc * osc,qdf_nbuf_t msdu,uint32_t transfer_id,u_int32_t len,uint32_t sendhead)2855*5113495bSYour Name qdf_nbuf_t hif_batch_send(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
2856*5113495bSYour Name 		uint32_t transfer_id, u_int32_t len, uint32_t sendhead)
2857*5113495bSYour Name {
2858*5113495bSYour Name 	void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
2859*5113495bSYour Name 
2860*5113495bSYour Name 	if (!ce_tx_hdl)
2861*5113495bSYour Name 		return NULL;
2862*5113495bSYour Name 
2863*5113495bSYour Name 	return ce_batch_send((struct CE_handle *)ce_tx_hdl, msdu, transfer_id,
2864*5113495bSYour Name 			len, sendhead);
2865*5113495bSYour Name }
2866*5113495bSYour Name qdf_export_symbol(hif_batch_send);
2867*5113495bSYour Name 
2868*5113495bSYour Name /**
2869*5113495bSYour Name  * hif_update_tx_ring() - API to access hif specific function
2870*5113495bSYour Name  * ce_update_tx_ring.
2871*5113495bSYour Name  * @osc: HIF Context
2872*5113495bSYour Name  * @num_htt_cmpls: number of htt compl received.
2873*5113495bSYour Name  *
2874*5113495bSYour Name  * Return: void
2875*5113495bSYour Name  */
hif_update_tx_ring(struct hif_opaque_softc * osc,u_int32_t num_htt_cmpls)2876*5113495bSYour Name void hif_update_tx_ring(struct hif_opaque_softc *osc, u_int32_t num_htt_cmpls)
2877*5113495bSYour Name {
2878*5113495bSYour Name 	void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
2879*5113495bSYour Name 
2880*5113495bSYour Name 	ce_update_tx_ring(ce_tx_hdl, num_htt_cmpls);
2881*5113495bSYour Name }
2882*5113495bSYour Name qdf_export_symbol(hif_update_tx_ring);
2883*5113495bSYour Name 
2884*5113495bSYour Name 
2885*5113495bSYour Name /**
2886*5113495bSYour Name  * hif_send_single() - API to access hif specific function
2887*5113495bSYour Name  * ce_send_single.
2888*5113495bSYour Name  * @osc: HIF Context
2889*5113495bSYour Name  * @msdu : msdu to be sent
2890*5113495bSYour Name  * @transfer_id: transfer id
2891*5113495bSYour Name  * @len : downloaded length
2892*5113495bSYour Name  *
2893*5113495bSYour Name  * Return: msdu sent status
2894*5113495bSYour Name  */
hif_send_single(struct hif_opaque_softc * osc,qdf_nbuf_t msdu,uint32_t transfer_id,u_int32_t len)2895*5113495bSYour Name QDF_STATUS hif_send_single(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
2896*5113495bSYour Name 			   uint32_t transfer_id, u_int32_t len)
2897*5113495bSYour Name {
2898*5113495bSYour Name 	void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
2899*5113495bSYour Name 
2900*5113495bSYour Name 	if (!ce_tx_hdl)
2901*5113495bSYour Name 		return QDF_STATUS_E_NULL_VALUE;
2902*5113495bSYour Name 
2903*5113495bSYour Name 	return ce_send_single((struct CE_handle *)ce_tx_hdl, msdu, transfer_id,
2904*5113495bSYour Name 			len);
2905*5113495bSYour Name }
2906*5113495bSYour Name qdf_export_symbol(hif_send_single);
2907*5113495bSYour Name #endif
2908*5113495bSYour Name 
2909*5113495bSYour Name /**
2910*5113495bSYour Name  * hif_reg_write() - API to access hif specific function
2911*5113495bSYour Name  * hif_write32_mb.
2912*5113495bSYour Name  * @hif_ctx : HIF Context
2913*5113495bSYour Name  * @offset : offset on which value has to be written
2914*5113495bSYour Name  * @value : value to be written
2915*5113495bSYour Name  *
2916*5113495bSYour Name  * Return: None
2917*5113495bSYour Name  */
hif_reg_write(struct hif_opaque_softc * hif_ctx,uint32_t offset,uint32_t value)2918*5113495bSYour Name void hif_reg_write(struct hif_opaque_softc *hif_ctx, uint32_t offset,
2919*5113495bSYour Name 		uint32_t value)
2920*5113495bSYour Name {
2921*5113495bSYour Name 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2922*5113495bSYour Name 
2923*5113495bSYour Name 	hif_write32_mb(scn, scn->mem + offset, value);
2924*5113495bSYour Name 
2925*5113495bSYour Name }
2926*5113495bSYour Name qdf_export_symbol(hif_reg_write);
2927*5113495bSYour Name 
2928*5113495bSYour Name /**
2929*5113495bSYour Name  * hif_reg_read() - API to access hif specific function
2930*5113495bSYour Name  * hif_read32_mb.
2931*5113495bSYour Name  * @hif_ctx : HIF Context
2932*5113495bSYour Name  * @offset : offset from which value has to be read
2933*5113495bSYour Name  *
2934*5113495bSYour Name  * Return: Read value
2935*5113495bSYour Name  */
hif_reg_read(struct hif_opaque_softc * hif_ctx,uint32_t offset)2936*5113495bSYour Name uint32_t hif_reg_read(struct hif_opaque_softc *hif_ctx, uint32_t offset)
2937*5113495bSYour Name {
2938*5113495bSYour Name 
2939*5113495bSYour Name 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2940*5113495bSYour Name 
2941*5113495bSYour Name 	return hif_read32_mb(scn, scn->mem + offset);
2942*5113495bSYour Name }
2943*5113495bSYour Name qdf_export_symbol(hif_reg_read);
2944*5113495bSYour Name 
2945*5113495bSYour Name /**
2946*5113495bSYour Name  * hif_ramdump_handler(): generic ramdump handler
2947*5113495bSYour Name  * @scn: struct hif_opaque_softc
2948*5113495bSYour Name  *
2949*5113495bSYour Name  * Return: None
2950*5113495bSYour Name  */
hif_ramdump_handler(struct hif_opaque_softc * scn)2951*5113495bSYour Name void hif_ramdump_handler(struct hif_opaque_softc *scn)
2952*5113495bSYour Name {
2953*5113495bSYour Name 	if (hif_get_bus_type(scn) == QDF_BUS_TYPE_USB)
2954*5113495bSYour Name 		hif_usb_ramdump_handler(scn);
2955*5113495bSYour Name }
2956*5113495bSYour Name 
hif_pm_get_wake_irq_type(struct hif_opaque_softc * hif_ctx)2957*5113495bSYour Name hif_pm_wake_irq_type hif_pm_get_wake_irq_type(struct hif_opaque_softc *hif_ctx)
2958*5113495bSYour Name {
2959*5113495bSYour Name 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2960*5113495bSYour Name 
2961*5113495bSYour Name 	return scn->wake_irq_type;
2962*5113495bSYour Name }
2963*5113495bSYour Name 
hif_wake_interrupt_handler(int irq,void * context)2964*5113495bSYour Name irqreturn_t hif_wake_interrupt_handler(int irq, void *context)
2965*5113495bSYour Name {
2966*5113495bSYour Name 	struct hif_softc *scn = context;
2967*5113495bSYour Name 
2968*5113495bSYour Name 	hif_info("wake interrupt received on irq %d", irq);
2969*5113495bSYour Name 
2970*5113495bSYour Name 	hif_rtpm_set_monitor_wake_intr(0);
2971*5113495bSYour Name 	hif_rtpm_request_resume();
2972*5113495bSYour Name 
2973*5113495bSYour Name 	if (scn->initial_wakeup_cb)
2974*5113495bSYour Name 		scn->initial_wakeup_cb(scn->initial_wakeup_priv);
2975*5113495bSYour Name 
2976*5113495bSYour Name 	if (hif_is_ut_suspended(scn))
2977*5113495bSYour Name 		hif_ut_fw_resume(scn);
2978*5113495bSYour Name 
2979*5113495bSYour Name 	qdf_pm_system_wakeup();
2980*5113495bSYour Name 
2981*5113495bSYour Name 	return IRQ_HANDLED;
2982*5113495bSYour Name }
2983*5113495bSYour Name 
hif_set_initial_wakeup_cb(struct hif_opaque_softc * hif_ctx,void (* callback)(void *),void * priv)2984*5113495bSYour Name void hif_set_initial_wakeup_cb(struct hif_opaque_softc *hif_ctx,
2985*5113495bSYour Name 			       void (*callback)(void *),
2986*5113495bSYour Name 			       void *priv)
2987*5113495bSYour Name {
2988*5113495bSYour Name 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
2989*5113495bSYour Name 
2990*5113495bSYour Name 	scn->initial_wakeup_cb = callback;
2991*5113495bSYour Name 	scn->initial_wakeup_priv = priv;
2992*5113495bSYour Name }
2993*5113495bSYour Name 
hif_set_ce_service_max_yield_time(struct hif_opaque_softc * hif,uint32_t ce_service_max_yield_time)2994*5113495bSYour Name void hif_set_ce_service_max_yield_time(struct hif_opaque_softc *hif,
2995*5113495bSYour Name 				       uint32_t ce_service_max_yield_time)
2996*5113495bSYour Name {
2997*5113495bSYour Name 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
2998*5113495bSYour Name 
2999*5113495bSYour Name 	hif_ctx->ce_service_max_yield_time =
3000*5113495bSYour Name 		ce_service_max_yield_time * 1000;
3001*5113495bSYour Name }
3002*5113495bSYour Name 
3003*5113495bSYour Name unsigned long long
hif_get_ce_service_max_yield_time(struct hif_opaque_softc * hif)3004*5113495bSYour Name hif_get_ce_service_max_yield_time(struct hif_opaque_softc *hif)
3005*5113495bSYour Name {
3006*5113495bSYour Name 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
3007*5113495bSYour Name 
3008*5113495bSYour Name 	return hif_ctx->ce_service_max_yield_time;
3009*5113495bSYour Name }
3010*5113495bSYour Name 
hif_set_ce_service_max_rx_ind_flush(struct hif_opaque_softc * hif,uint8_t ce_service_max_rx_ind_flush)3011*5113495bSYour Name void hif_set_ce_service_max_rx_ind_flush(struct hif_opaque_softc *hif,
3012*5113495bSYour Name 				       uint8_t ce_service_max_rx_ind_flush)
3013*5113495bSYour Name {
3014*5113495bSYour Name 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
3015*5113495bSYour Name 
3016*5113495bSYour Name 	if (ce_service_max_rx_ind_flush == 0 ||
3017*5113495bSYour Name 	    ce_service_max_rx_ind_flush > MSG_FLUSH_NUM)
3018*5113495bSYour Name 		hif_ctx->ce_service_max_rx_ind_flush = MSG_FLUSH_NUM;
3019*5113495bSYour Name 	else
3020*5113495bSYour Name 		hif_ctx->ce_service_max_rx_ind_flush =
3021*5113495bSYour Name 						ce_service_max_rx_ind_flush;
3022*5113495bSYour Name }
3023*5113495bSYour Name 
3024*5113495bSYour Name #ifdef SYSTEM_PM_CHECK
__hif_system_pm_set_state(struct hif_opaque_softc * hif,enum hif_system_pm_state state)3025*5113495bSYour Name void __hif_system_pm_set_state(struct hif_opaque_softc *hif,
3026*5113495bSYour Name 			       enum hif_system_pm_state state)
3027*5113495bSYour Name {
3028*5113495bSYour Name 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
3029*5113495bSYour Name 
3030*5113495bSYour Name 	qdf_atomic_set(&hif_ctx->sys_pm_state, state);
3031*5113495bSYour Name }
3032*5113495bSYour Name 
hif_system_pm_get_state(struct hif_opaque_softc * hif)3033*5113495bSYour Name int32_t hif_system_pm_get_state(struct hif_opaque_softc *hif)
3034*5113495bSYour Name {
3035*5113495bSYour Name 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
3036*5113495bSYour Name 
3037*5113495bSYour Name 	return qdf_atomic_read(&hif_ctx->sys_pm_state);
3038*5113495bSYour Name }
3039*5113495bSYour Name 
hif_system_pm_state_check(struct hif_opaque_softc * hif)3040*5113495bSYour Name int hif_system_pm_state_check(struct hif_opaque_softc *hif)
3041*5113495bSYour Name {
3042*5113495bSYour Name 	struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
3043*5113495bSYour Name 	int32_t sys_pm_state;
3044*5113495bSYour Name 
3045*5113495bSYour Name 	if (!hif_ctx) {
3046*5113495bSYour Name 		hif_err("hif context is null");
3047*5113495bSYour Name 		return -EFAULT;
3048*5113495bSYour Name 	}
3049*5113495bSYour Name 
3050*5113495bSYour Name 	sys_pm_state = qdf_atomic_read(&hif_ctx->sys_pm_state);
3051*5113495bSYour Name 	if (sys_pm_state == HIF_SYSTEM_PM_STATE_BUS_SUSPENDING ||
3052*5113495bSYour Name 	    sys_pm_state == HIF_SYSTEM_PM_STATE_BUS_SUSPENDED) {
3053*5113495bSYour Name 		hif_info("Triggering system wakeup");
3054*5113495bSYour Name 		qdf_pm_system_wakeup();
3055*5113495bSYour Name 		return -EAGAIN;
3056*5113495bSYour Name 	}
3057*5113495bSYour Name 
3058*5113495bSYour Name 	return 0;
3059*5113495bSYour Name }
3060*5113495bSYour Name #endif
3061*5113495bSYour Name #ifdef WLAN_FEATURE_AFFINITY_MGR
3062*5113495bSYour Name /*
3063*5113495bSYour Name  * hif_audio_cpu_affinity_allowed() - Check if audio cpu affinity allowed
3064*5113495bSYour Name  *
3065*5113495bSYour Name  * @scn: hif handle
3066*5113495bSYour Name  * @cfg: hif affinity manager configuration for IRQ
3067*5113495bSYour Name  * @audio_taken_cpu: Current CPUs which are taken by audio.
3068*5113495bSYour Name  * @current_time: Current system time.
3069*5113495bSYour Name  *
3070*5113495bSYour Name  * This API checks for 2 conditions
3071*5113495bSYour Name  *  1) Last audio taken mask and current taken mask are different
3072*5113495bSYour Name  *  2) Last time when IRQ was affined away due to audio taken CPUs is
3073*5113495bSYour Name  *     more than time threshold (5 Seconds in current case).
3074*5113495bSYour Name  * If both condition satisfies then only return true.
3075*5113495bSYour Name  *
3076*5113495bSYour Name  * Return: bool: true if it is allowed to affine away audio taken cpus.
3077*5113495bSYour Name  */
3078*5113495bSYour Name static inline bool
hif_audio_cpu_affinity_allowed(struct hif_softc * scn,struct hif_cpu_affinity * cfg,qdf_cpu_mask audio_taken_cpu,uint64_t current_time)3079*5113495bSYour Name hif_audio_cpu_affinity_allowed(struct hif_softc *scn,
3080*5113495bSYour Name 			       struct hif_cpu_affinity *cfg,
3081*5113495bSYour Name 			       qdf_cpu_mask audio_taken_cpu,
3082*5113495bSYour Name 			       uint64_t current_time)
3083*5113495bSYour Name {
3084*5113495bSYour Name 	if (!qdf_cpumask_equal(&audio_taken_cpu, &cfg->walt_taken_mask) &&
3085*5113495bSYour Name 	    (qdf_log_timestamp_to_usecs(current_time -
3086*5113495bSYour Name 			 cfg->last_affined_away)
3087*5113495bSYour Name 		< scn->time_threshold))
3088*5113495bSYour Name 		return false;
3089*5113495bSYour Name 	return true;
3090*5113495bSYour Name }
3091*5113495bSYour Name 
3092*5113495bSYour Name /*
3093*5113495bSYour Name  * hif_affinity_mgr_check_update_mask() - Check if cpu mask need to be updated
3094*5113495bSYour Name  *
3095*5113495bSYour Name  * @scn: hif handle
3096*5113495bSYour Name  * @cfg: hif affinity manager configuration for IRQ
3097*5113495bSYour Name  * @audio_taken_cpu: Current CPUs which are taken by audio.
3098*5113495bSYour Name  * @cpu_mask: CPU mask which need to be updated.
3099*5113495bSYour Name  * @current_time: Current system time.
3100*5113495bSYour Name  *
3101*5113495bSYour Name  * This API checks if Pro audio use case is running and if cpu_mask need
3102*5113495bSYour Name  * to be updated
3103*5113495bSYour Name  *
3104*5113495bSYour Name  * Return: QDF_STATUS
3105*5113495bSYour Name  */
3106*5113495bSYour Name static inline QDF_STATUS
hif_affinity_mgr_check_update_mask(struct hif_softc * scn,struct hif_cpu_affinity * cfg,qdf_cpu_mask audio_taken_cpu,qdf_cpu_mask * cpu_mask,uint64_t current_time)3107*5113495bSYour Name hif_affinity_mgr_check_update_mask(struct hif_softc *scn,
3108*5113495bSYour Name 				   struct hif_cpu_affinity *cfg,
3109*5113495bSYour Name 				   qdf_cpu_mask audio_taken_cpu,
3110*5113495bSYour Name 				   qdf_cpu_mask *cpu_mask,
3111*5113495bSYour Name 				   uint64_t current_time)
3112*5113495bSYour Name {
3113*5113495bSYour Name 	qdf_cpu_mask allowed_mask;
3114*5113495bSYour Name 
3115*5113495bSYour Name 	/*
3116*5113495bSYour Name 	 * Case 1: audio_taken_mask is empty
3117*5113495bSYour Name 	 *   Check if passed cpu_mask and wlan_requested_mask is same or not.
3118*5113495bSYour Name 	 *      If both mask are different copy wlan_requested_mask(IRQ affinity
3119*5113495bSYour Name 	 *      mask requested by WLAN) to cpu_mask.
3120*5113495bSYour Name 	 *
3121*5113495bSYour Name 	 * Case 2: audio_taken_mask is not empty
3122*5113495bSYour Name 	 *   1. Only allow update if last time when IRQ was affined away due to
3123*5113495bSYour Name 	 *      audio taken CPUs is more than 5 seconds or update is requested
3124*5113495bSYour Name 	 *      by WLAN
3125*5113495bSYour Name 	 *   2. Only allow silver cores to be affined away.
3126*5113495bSYour Name 	 *   3. Check if any allowed CPUs for audio use case is set in cpu_mask.
3127*5113495bSYour Name 	 *       i. If any CPU mask is set, mask out that CPU from the cpu_mask
3128*5113495bSYour Name 	 *       ii. If after masking out audio taken cpu(Silver cores) cpu_mask
3129*5113495bSYour Name 	 *           is empty, set mask to all cpu except cpus taken by audio.
3130*5113495bSYour Name 	 * Example:
3131*5113495bSYour Name 	 *| Audio mask | mask allowed | cpu_mask | WLAN req mask | new cpu_mask|
3132*5113495bSYour Name 	 *|  0x00      |       0x00   |   0x0C   |       0x0C    |      0x0C   |
3133*5113495bSYour Name 	 *|  0x00      |       0x00   |   0x03   |       0x03    |      0x03   |
3134*5113495bSYour Name 	 *|  0x00      |       0x00   |   0xFC   |       0x03    |      0x03   |
3135*5113495bSYour Name 	 *|  0x00      |       0x00   |   0x03   |       0x0C    |      0x0C   |
3136*5113495bSYour Name 	 *|  0x0F      |       0x03   |   0x0C   |       0x0C    |      0x0C   |
3137*5113495bSYour Name 	 *|  0x0F      |       0x03   |   0x03   |       0x03    |      0xFC   |
3138*5113495bSYour Name 	 *|  0x03      |       0x03   |   0x0C   |       0x0C    |      0x0C   |
3139*5113495bSYour Name 	 *|  0x03      |       0x03   |   0x03   |       0x03    |      0xFC   |
3140*5113495bSYour Name 	 *|  0x03      |       0x03   |   0xFC   |       0x03    |      0xFC   |
3141*5113495bSYour Name 	 *|  0xF0      |       0x00   |   0x0C   |       0x0C    |      0x0C   |
3142*5113495bSYour Name 	 *|  0xF0      |       0x00   |   0x03   |       0x03    |      0x03   |
3143*5113495bSYour Name 	 */
3144*5113495bSYour Name 
3145*5113495bSYour Name 	/* Check if audio taken mask is empty*/
3146*5113495bSYour Name 	if (qdf_likely(qdf_cpumask_empty(&audio_taken_cpu))) {
3147*5113495bSYour Name 		/* If CPU mask requested by WLAN for the IRQ and
3148*5113495bSYour Name 		 * cpu_mask passed CPU mask set for IRQ is different
3149*5113495bSYour Name 		 * Copy requested mask into cpu_mask and return
3150*5113495bSYour Name 		 */
3151*5113495bSYour Name 		if (qdf_unlikely(!qdf_cpumask_equal(cpu_mask,
3152*5113495bSYour Name 						    &cfg->wlan_requested_mask))) {
3153*5113495bSYour Name 			qdf_cpumask_copy(cpu_mask, &cfg->wlan_requested_mask);
3154*5113495bSYour Name 			return QDF_STATUS_SUCCESS;
3155*5113495bSYour Name 		}
3156*5113495bSYour Name 		return QDF_STATUS_E_ALREADY;
3157*5113495bSYour Name 	}
3158*5113495bSYour Name 
3159*5113495bSYour Name 	if (!(hif_audio_cpu_affinity_allowed(scn, cfg, audio_taken_cpu,
3160*5113495bSYour Name 					     current_time) ||
3161*5113495bSYour Name 	      cfg->update_requested))
3162*5113495bSYour Name 		return QDF_STATUS_E_AGAIN;
3163*5113495bSYour Name 
3164*5113495bSYour Name 	/* Only allow Silver cores to be affine away */
3165*5113495bSYour Name 	qdf_cpumask_and(&allowed_mask, &scn->allowed_mask, &audio_taken_cpu);
3166*5113495bSYour Name 	if (qdf_cpumask_intersects(cpu_mask, &allowed_mask)) {
3167*5113495bSYour Name 		/* If any of taken CPU(Silver cores) mask is set in cpu_mask,
3168*5113495bSYour Name 		 *  mask out the audio taken CPUs from the cpu_mask.
3169*5113495bSYour Name 		 */
3170*5113495bSYour Name 		qdf_cpumask_andnot(cpu_mask, &cfg->wlan_requested_mask,
3171*5113495bSYour Name 				   &allowed_mask);
3172*5113495bSYour Name 		/* If cpu_mask is empty set it to all CPUs
3173*5113495bSYour Name 		 * except taken by audio(Silver cores)
3174*5113495bSYour Name 		 */
3175*5113495bSYour Name 		if (qdf_unlikely(qdf_cpumask_empty(cpu_mask)))
3176*5113495bSYour Name 			qdf_cpumask_complement(cpu_mask, &allowed_mask);
3177*5113495bSYour Name 		return QDF_STATUS_SUCCESS;
3178*5113495bSYour Name 	}
3179*5113495bSYour Name 
3180*5113495bSYour Name 	return QDF_STATUS_E_ALREADY;
3181*5113495bSYour Name }
3182*5113495bSYour Name 
3183*5113495bSYour Name static inline QDF_STATUS
hif_check_and_affine_irq(struct hif_softc * scn,struct hif_cpu_affinity * cfg,qdf_cpu_mask audio_taken_cpu,qdf_cpu_mask cpu_mask,uint64_t current_time)3184*5113495bSYour Name hif_check_and_affine_irq(struct hif_softc *scn, struct hif_cpu_affinity *cfg,
3185*5113495bSYour Name 			 qdf_cpu_mask audio_taken_cpu, qdf_cpu_mask cpu_mask,
3186*5113495bSYour Name 			 uint64_t current_time)
3187*5113495bSYour Name {
3188*5113495bSYour Name 	QDF_STATUS status;
3189*5113495bSYour Name 
3190*5113495bSYour Name 	status = hif_affinity_mgr_check_update_mask(scn, cfg,
3191*5113495bSYour Name 						    audio_taken_cpu,
3192*5113495bSYour Name 						    &cpu_mask,
3193*5113495bSYour Name 						    current_time);
3194*5113495bSYour Name 	/* Set IRQ affinity if CPU mask was updated */
3195*5113495bSYour Name 	if (QDF_IS_STATUS_SUCCESS(status)) {
3196*5113495bSYour Name 		status = hif_irq_set_affinity_hint(cfg->irq,
3197*5113495bSYour Name 						   &cpu_mask);
3198*5113495bSYour Name 		if (QDF_IS_STATUS_SUCCESS(status)) {
3199*5113495bSYour Name 			/* Store audio taken CPU mask */
3200*5113495bSYour Name 			qdf_cpumask_copy(&cfg->walt_taken_mask,
3201*5113495bSYour Name 					 &audio_taken_cpu);
3202*5113495bSYour Name 			/* Store CPU mask which was set for IRQ*/
3203*5113495bSYour Name 			qdf_cpumask_copy(&cfg->current_irq_mask,
3204*5113495bSYour Name 					 &cpu_mask);
3205*5113495bSYour Name 			/* Set time when IRQ affinity was updated */
3206*5113495bSYour Name 			cfg->last_updated = current_time;
3207*5113495bSYour Name 			if (hif_audio_cpu_affinity_allowed(scn, cfg,
3208*5113495bSYour Name 							   audio_taken_cpu,
3209*5113495bSYour Name 							   current_time))
3210*5113495bSYour Name 				/* If CPU mask was updated due to CPU
3211*5113495bSYour Name 				 * taken by audio, update
3212*5113495bSYour Name 				 * last_affined_away time
3213*5113495bSYour Name 				 */
3214*5113495bSYour Name 				cfg->last_affined_away = current_time;
3215*5113495bSYour Name 		}
3216*5113495bSYour Name 	}
3217*5113495bSYour Name 
3218*5113495bSYour Name 	return status;
3219*5113495bSYour Name }
3220*5113495bSYour Name 
hif_affinity_mgr_affine_irq(struct hif_softc * scn)3221*5113495bSYour Name void hif_affinity_mgr_affine_irq(struct hif_softc *scn)
3222*5113495bSYour Name {
3223*5113495bSYour Name 	bool audio_affinity_allowed = false;
3224*5113495bSYour Name 	int i, j, ce_id;
3225*5113495bSYour Name 	uint64_t current_time;
3226*5113495bSYour Name 	char cpu_str[10];
3227*5113495bSYour Name 	QDF_STATUS status;
3228*5113495bSYour Name 	qdf_cpu_mask cpu_mask, audio_taken_cpu;
3229*5113495bSYour Name 	struct HIF_CE_state *hif_state;
3230*5113495bSYour Name 	struct hif_exec_context *hif_ext_group;
3231*5113495bSYour Name 	struct CE_attr *host_ce_conf;
3232*5113495bSYour Name 	struct HIF_CE_state *ce_sc;
3233*5113495bSYour Name 	struct hif_cpu_affinity *cfg;
3234*5113495bSYour Name 
3235*5113495bSYour Name 	if (!scn->affinity_mgr_supported)
3236*5113495bSYour Name 		return;
3237*5113495bSYour Name 
3238*5113495bSYour Name 	current_time = hif_get_log_timestamp();
3239*5113495bSYour Name 	/* Get CPU mask for audio taken CPUs */
3240*5113495bSYour Name 	audio_taken_cpu = qdf_walt_get_cpus_taken();
3241*5113495bSYour Name 
3242*5113495bSYour Name 	ce_sc = HIF_GET_CE_STATE(scn);
3243*5113495bSYour Name 	host_ce_conf = ce_sc->host_ce_config;
3244*5113495bSYour Name 	for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
3245*5113495bSYour Name 		if (host_ce_conf[ce_id].flags & CE_ATTR_DISABLE_INTR)
3246*5113495bSYour Name 			continue;
3247*5113495bSYour Name 		cfg = &scn->ce_irq_cpu_mask[ce_id];
3248*5113495bSYour Name 		qdf_cpumask_copy(&cpu_mask, &cfg->current_irq_mask);
3249*5113495bSYour Name 		status =
3250*5113495bSYour Name 			hif_check_and_affine_irq(scn, cfg, audio_taken_cpu,
3251*5113495bSYour Name 						 cpu_mask, current_time);
3252*5113495bSYour Name 		if (QDF_IS_STATUS_SUCCESS(status))
3253*5113495bSYour Name 			audio_affinity_allowed = true;
3254*5113495bSYour Name 	}
3255*5113495bSYour Name 
3256*5113495bSYour Name 	hif_state = HIF_GET_CE_STATE(scn);
3257*5113495bSYour Name 	for (i = 0; i < hif_state->hif_num_extgroup; i++) {
3258*5113495bSYour Name 		hif_ext_group = hif_state->hif_ext_group[i];
3259*5113495bSYour Name 		for (j = 0; j < hif_ext_group->numirq; j++) {
3260*5113495bSYour Name 			cfg = &scn->irq_cpu_mask[hif_ext_group->grp_id][j];
3261*5113495bSYour Name 			qdf_cpumask_copy(&cpu_mask, &cfg->current_irq_mask);
3262*5113495bSYour Name 			status =
3263*5113495bSYour Name 				hif_check_and_affine_irq(scn, cfg, audio_taken_cpu,
3264*5113495bSYour Name 							 cpu_mask, current_time);
3265*5113495bSYour Name 			if (QDF_IS_STATUS_SUCCESS(status)) {
3266*5113495bSYour Name 				qdf_atomic_set(&hif_ext_group->force_napi_complete, -1);
3267*5113495bSYour Name 				audio_affinity_allowed = true;
3268*5113495bSYour Name 			}
3269*5113495bSYour Name 		}
3270*5113495bSYour Name 	}
3271*5113495bSYour Name 	if (audio_affinity_allowed) {
3272*5113495bSYour Name 		qdf_thread_cpumap_print_to_pagebuf(false, cpu_str,
3273*5113495bSYour Name 						   &audio_taken_cpu);
3274*5113495bSYour Name 		hif_info("Audio taken CPU mask: %s", cpu_str);
3275*5113495bSYour Name 	}
3276*5113495bSYour Name }
3277*5113495bSYour Name 
3278*5113495bSYour Name static inline QDF_STATUS
hif_affinity_mgr_set_irq_affinity(struct hif_softc * scn,uint32_t irq,struct hif_cpu_affinity * cfg,qdf_cpu_mask * cpu_mask)3279*5113495bSYour Name hif_affinity_mgr_set_irq_affinity(struct hif_softc *scn, uint32_t irq,
3280*5113495bSYour Name 				  struct hif_cpu_affinity *cfg,
3281*5113495bSYour Name 				  qdf_cpu_mask *cpu_mask)
3282*5113495bSYour Name {
3283*5113495bSYour Name 	uint64_t current_time;
3284*5113495bSYour Name 	char cpu_str[10];
3285*5113495bSYour Name 	QDF_STATUS status, mask_updated;
3286*5113495bSYour Name 	qdf_cpu_mask audio_taken_cpu = qdf_walt_get_cpus_taken();
3287*5113495bSYour Name 
3288*5113495bSYour Name 	current_time = hif_get_log_timestamp();
3289*5113495bSYour Name 	qdf_cpumask_copy(&cfg->wlan_requested_mask, cpu_mask);
3290*5113495bSYour Name 	cfg->update_requested = true;
3291*5113495bSYour Name 	mask_updated = hif_affinity_mgr_check_update_mask(scn, cfg,
3292*5113495bSYour Name 							  audio_taken_cpu,
3293*5113495bSYour Name 							  cpu_mask,
3294*5113495bSYour Name 							  current_time);
3295*5113495bSYour Name 	status = hif_irq_set_affinity_hint(irq, cpu_mask);
3296*5113495bSYour Name 	if (QDF_IS_STATUS_SUCCESS(status)) {
3297*5113495bSYour Name 		qdf_cpumask_copy(&cfg->walt_taken_mask, &audio_taken_cpu);
3298*5113495bSYour Name 		qdf_cpumask_copy(&cfg->current_irq_mask, cpu_mask);
3299*5113495bSYour Name 		if (QDF_IS_STATUS_SUCCESS(mask_updated)) {
3300*5113495bSYour Name 			cfg->last_updated = current_time;
3301*5113495bSYour Name 			if (hif_audio_cpu_affinity_allowed(scn, cfg,
3302*5113495bSYour Name 							   audio_taken_cpu,
3303*5113495bSYour Name 							   current_time)) {
3304*5113495bSYour Name 				cfg->last_affined_away = current_time;
3305*5113495bSYour Name 				qdf_thread_cpumap_print_to_pagebuf(false,
3306*5113495bSYour Name 								   cpu_str,
3307*5113495bSYour Name 								   &audio_taken_cpu);
3308*5113495bSYour Name 				hif_info_rl("Audio taken CPU mask: %s",
3309*5113495bSYour Name 					    cpu_str);
3310*5113495bSYour Name 			}
3311*5113495bSYour Name 		}
3312*5113495bSYour Name 	}
3313*5113495bSYour Name 	cfg->update_requested = false;
3314*5113495bSYour Name 	return status;
3315*5113495bSYour Name }
3316*5113495bSYour Name 
3317*5113495bSYour Name QDF_STATUS
hif_affinity_mgr_set_qrg_irq_affinity(struct hif_softc * scn,uint32_t irq,uint32_t grp_id,uint32_t irq_index,qdf_cpu_mask * cpu_mask)3318*5113495bSYour Name hif_affinity_mgr_set_qrg_irq_affinity(struct hif_softc *scn, uint32_t irq,
3319*5113495bSYour Name 				      uint32_t grp_id, uint32_t irq_index,
3320*5113495bSYour Name 				      qdf_cpu_mask *cpu_mask)
3321*5113495bSYour Name {
3322*5113495bSYour Name 	struct hif_cpu_affinity *cfg;
3323*5113495bSYour Name 
3324*5113495bSYour Name 	if (!scn->affinity_mgr_supported)
3325*5113495bSYour Name 		return hif_irq_set_affinity_hint(irq, cpu_mask);
3326*5113495bSYour Name 
3327*5113495bSYour Name 	cfg = &scn->irq_cpu_mask[grp_id][irq_index];
3328*5113495bSYour Name 	return hif_affinity_mgr_set_irq_affinity(scn, irq, cfg, cpu_mask);
3329*5113495bSYour Name }
3330*5113495bSYour Name 
3331*5113495bSYour Name QDF_STATUS
hif_affinity_mgr_set_ce_irq_affinity(struct hif_softc * scn,uint32_t irq,uint32_t ce_id,qdf_cpu_mask * cpu_mask)3332*5113495bSYour Name hif_affinity_mgr_set_ce_irq_affinity(struct hif_softc *scn, uint32_t irq,
3333*5113495bSYour Name 				     uint32_t ce_id, qdf_cpu_mask *cpu_mask)
3334*5113495bSYour Name {
3335*5113495bSYour Name 	struct hif_cpu_affinity *cfg;
3336*5113495bSYour Name 
3337*5113495bSYour Name 	if (!scn->affinity_mgr_supported)
3338*5113495bSYour Name 		return hif_irq_set_affinity_hint(irq, cpu_mask);
3339*5113495bSYour Name 
3340*5113495bSYour Name 	cfg = &scn->ce_irq_cpu_mask[ce_id];
3341*5113495bSYour Name 	return hif_affinity_mgr_set_irq_affinity(scn, irq, cfg, cpu_mask);
3342*5113495bSYour Name }
3343*5113495bSYour Name 
3344*5113495bSYour Name void
hif_affinity_mgr_init_ce_irq(struct hif_softc * scn,int id,int irq)3345*5113495bSYour Name hif_affinity_mgr_init_ce_irq(struct hif_softc *scn, int id, int irq)
3346*5113495bSYour Name {
3347*5113495bSYour Name 	unsigned int cpus;
3348*5113495bSYour Name 	qdf_cpu_mask cpu_mask = {0};
3349*5113495bSYour Name 	struct hif_cpu_affinity *cfg = NULL;
3350*5113495bSYour Name 
3351*5113495bSYour Name 	if (!scn->affinity_mgr_supported)
3352*5113495bSYour Name 		return;
3353*5113495bSYour Name 
3354*5113495bSYour Name 	/* Set CPU Mask to Silver core */
3355*5113495bSYour Name 	qdf_for_each_possible_cpu(cpus)
3356*5113495bSYour Name 		if (qdf_topology_physical_package_id(cpus) ==
3357*5113495bSYour Name 		    CPU_CLUSTER_TYPE_LITTLE)
3358*5113495bSYour Name 			qdf_cpumask_set_cpu(cpus, &cpu_mask);
3359*5113495bSYour Name 
3360*5113495bSYour Name 	cfg = &scn->ce_irq_cpu_mask[id];
3361*5113495bSYour Name 	qdf_cpumask_copy(&cfg->current_irq_mask, &cpu_mask);
3362*5113495bSYour Name 	qdf_cpumask_copy(&cfg->wlan_requested_mask, &cpu_mask);
3363*5113495bSYour Name 	cfg->irq = irq;
3364*5113495bSYour Name 	cfg->last_updated = 0;
3365*5113495bSYour Name 	cfg->last_affined_away = 0;
3366*5113495bSYour Name 	cfg->update_requested = false;
3367*5113495bSYour Name }
3368*5113495bSYour Name 
3369*5113495bSYour Name void
hif_affinity_mgr_init_grp_irq(struct hif_softc * scn,int grp_id,int irq_num,int irq)3370*5113495bSYour Name hif_affinity_mgr_init_grp_irq(struct hif_softc *scn, int grp_id,
3371*5113495bSYour Name 			      int irq_num, int irq)
3372*5113495bSYour Name {
3373*5113495bSYour Name 	unsigned int cpus;
3374*5113495bSYour Name 	qdf_cpu_mask cpu_mask = {0};
3375*5113495bSYour Name 	struct hif_cpu_affinity *cfg = NULL;
3376*5113495bSYour Name 
3377*5113495bSYour Name 	if (!scn->affinity_mgr_supported)
3378*5113495bSYour Name 		return;
3379*5113495bSYour Name 
3380*5113495bSYour Name 	/* Set CPU Mask to Silver core */
3381*5113495bSYour Name 	qdf_for_each_possible_cpu(cpus)
3382*5113495bSYour Name 		if (qdf_topology_physical_package_id(cpus) ==
3383*5113495bSYour Name 		    CPU_CLUSTER_TYPE_LITTLE)
3384*5113495bSYour Name 			qdf_cpumask_set_cpu(cpus, &cpu_mask);
3385*5113495bSYour Name 
3386*5113495bSYour Name 	cfg = &scn->irq_cpu_mask[grp_id][irq_num];
3387*5113495bSYour Name 	qdf_cpumask_copy(&cfg->current_irq_mask, &cpu_mask);
3388*5113495bSYour Name 	qdf_cpumask_copy(&cfg->wlan_requested_mask, &cpu_mask);
3389*5113495bSYour Name 	cfg->irq = irq;
3390*5113495bSYour Name 	cfg->last_updated = 0;
3391*5113495bSYour Name 	cfg->last_affined_away = 0;
3392*5113495bSYour Name 	cfg->update_requested = false;
3393*5113495bSYour Name }
3394*5113495bSYour Name #endif
3395*5113495bSYour Name 
3396*5113495bSYour Name #if defined(HIF_CPU_PERF_AFFINE_MASK) || \
3397*5113495bSYour Name 	defined(FEATURE_ENABLE_CE_DP_IRQ_AFFINE)
hif_config_irq_set_perf_affinity_hint(struct hif_opaque_softc * hif_ctx)3398*5113495bSYour Name void hif_config_irq_set_perf_affinity_hint(
3399*5113495bSYour Name 	struct hif_opaque_softc *hif_ctx)
3400*5113495bSYour Name {
3401*5113495bSYour Name 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
3402*5113495bSYour Name 
3403*5113495bSYour Name 	hif_config_irq_affinity(scn);
3404*5113495bSYour Name }
3405*5113495bSYour Name 
3406*5113495bSYour Name qdf_export_symbol(hif_config_irq_set_perf_affinity_hint);
3407*5113495bSYour Name #endif
3408