xref: /wlan-driver/qca-wifi-host-cmn/hif/src/hif_napi.c (revision 5113495b16420b49004c444715d2daae2066e7dc)
1*5113495bSYour Name /*
2*5113495bSYour Name  * Copyright (c) 2015-2021 The Linux Foundation. All rights reserved.
3*5113495bSYour Name  * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4*5113495bSYour Name  *
5*5113495bSYour Name  * Permission to use, copy, modify, and/or distribute this software for
6*5113495bSYour Name  * any purpose with or without fee is hereby granted, provided that the
7*5113495bSYour Name  * above copyright notice and this permission notice appear in all
8*5113495bSYour Name  * copies.
9*5113495bSYour Name  *
10*5113495bSYour Name  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11*5113495bSYour Name  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12*5113495bSYour Name  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13*5113495bSYour Name  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14*5113495bSYour Name  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15*5113495bSYour Name  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16*5113495bSYour Name  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17*5113495bSYour Name  * PERFORMANCE OF THIS SOFTWARE.
18*5113495bSYour Name  */
19*5113495bSYour Name 
20*5113495bSYour Name /**
21*5113495bSYour Name  * DOC: hif_napi.c
22*5113495bSYour Name  *
23*5113495bSYour Name  * HIF NAPI interface implementation
24*5113495bSYour Name  */
25*5113495bSYour Name 
26*5113495bSYour Name #include <linux/string.h> /* memset */
27*5113495bSYour Name 
28*5113495bSYour Name /* Linux headers */
29*5113495bSYour Name #include <linux/cpumask.h>
30*5113495bSYour Name #include <linux/cpufreq.h>
31*5113495bSYour Name #include <linux/cpu.h>
32*5113495bSYour Name #include <linux/topology.h>
33*5113495bSYour Name #include <linux/interrupt.h>
34*5113495bSYour Name #ifdef CONFIG_SCHED_CORE_CTL
35*5113495bSYour Name #include <linux/sched/core_ctl.h>
36*5113495bSYour Name #endif
37*5113495bSYour Name #include <pld_common.h>
38*5113495bSYour Name #include <linux/pm.h>
39*5113495bSYour Name 
40*5113495bSYour Name /* Driver headers */
41*5113495bSYour Name #include <hif_napi.h>
42*5113495bSYour Name #include <hif_debug.h>
43*5113495bSYour Name #include <hif_io32.h>
44*5113495bSYour Name #include <ce_api.h>
45*5113495bSYour Name #include <ce_internal.h>
46*5113495bSYour Name #include <hif_irq_affinity.h>
47*5113495bSYour Name #include "qdf_cpuhp.h"
48*5113495bSYour Name #include "qdf_module.h"
49*5113495bSYour Name #include "qdf_net_if.h"
50*5113495bSYour Name #include "qdf_dev.h"
51*5113495bSYour Name #include "qdf_irq.h"
52*5113495bSYour Name 
53*5113495bSYour Name enum napi_decision_vector {
54*5113495bSYour Name 	HIF_NAPI_NOEVENT = 0,
55*5113495bSYour Name 	HIF_NAPI_INITED  = 1,
56*5113495bSYour Name 	HIF_NAPI_CONF_UP = 2
57*5113495bSYour Name };
58*5113495bSYour Name #define ENABLE_NAPI_MASK (HIF_NAPI_INITED | HIF_NAPI_CONF_UP)
59*5113495bSYour Name 
60*5113495bSYour Name #ifdef RECEIVE_OFFLOAD
61*5113495bSYour Name /**
62*5113495bSYour Name  * hif_rxthread_napi_poll() - dummy napi poll for rx_thread NAPI
63*5113495bSYour Name  * @napi: Rx_thread NAPI
64*5113495bSYour Name  * @budget: NAPI BUDGET
65*5113495bSYour Name  *
66*5113495bSYour Name  * Return: 0 as it is not supposed to be polled at all as it is not scheduled.
67*5113495bSYour Name  */
hif_rxthread_napi_poll(struct napi_struct * napi,int budget)68*5113495bSYour Name static int hif_rxthread_napi_poll(struct napi_struct *napi, int budget)
69*5113495bSYour Name {
70*5113495bSYour Name 	hif_err("This napi_poll should not be polled as we don't schedule it");
71*5113495bSYour Name 	QDF_ASSERT(0);
72*5113495bSYour Name 	return 0;
73*5113495bSYour Name }
74*5113495bSYour Name 
75*5113495bSYour Name /**
76*5113495bSYour Name  * hif_init_rx_thread_napi() - Initialize dummy Rx_thread NAPI
77*5113495bSYour Name  * @napii: Handle to napi_info holding rx_thread napi
78*5113495bSYour Name  *
79*5113495bSYour Name  * Return: None
80*5113495bSYour Name  */
hif_init_rx_thread_napi(struct qca_napi_info * napii)81*5113495bSYour Name static void hif_init_rx_thread_napi(struct qca_napi_info *napii)
82*5113495bSYour Name {
83*5113495bSYour Name 	struct qdf_net_if *nd = (struct qdf_net_if *)&napii->rx_thread_netdev;
84*5113495bSYour Name 
85*5113495bSYour Name 	qdf_net_if_create_dummy_if(nd);
86*5113495bSYour Name 	qdf_netif_napi_add(&napii->rx_thread_netdev, &napii->rx_thread_napi,
87*5113495bSYour Name 			   hif_rxthread_napi_poll, 64);
88*5113495bSYour Name 	qdf_napi_enable(&napii->rx_thread_napi);
89*5113495bSYour Name }
90*5113495bSYour Name 
91*5113495bSYour Name /**
92*5113495bSYour Name  * hif_deinit_rx_thread_napi() - Deinitialize dummy Rx_thread NAPI
93*5113495bSYour Name  * @napii: Handle to napi_info holding rx_thread napi
94*5113495bSYour Name  *
95*5113495bSYour Name  * Return: None
96*5113495bSYour Name  */
hif_deinit_rx_thread_napi(struct qca_napi_info * napii)97*5113495bSYour Name static void hif_deinit_rx_thread_napi(struct qca_napi_info *napii)
98*5113495bSYour Name {
99*5113495bSYour Name 	qdf_netif_napi_del(&napii->rx_thread_napi);
100*5113495bSYour Name }
101*5113495bSYour Name #else /* RECEIVE_OFFLOAD */
hif_init_rx_thread_napi(struct qca_napi_info * napii)102*5113495bSYour Name static void hif_init_rx_thread_napi(struct qca_napi_info *napii)
103*5113495bSYour Name {
104*5113495bSYour Name }
105*5113495bSYour Name 
hif_deinit_rx_thread_napi(struct qca_napi_info * napii)106*5113495bSYour Name static void hif_deinit_rx_thread_napi(struct qca_napi_info *napii)
107*5113495bSYour Name {
108*5113495bSYour Name }
109*5113495bSYour Name #endif
110*5113495bSYour Name 
111*5113495bSYour Name /**
112*5113495bSYour Name  * hif_napi_create() - creates the NAPI structures for a given CE
113*5113495bSYour Name  * @hif_ctx: pointer to hif context
114*5113495bSYour Name  * @poll: poll function to be used for this NAPI instance
115*5113495bSYour Name  * @budget: budget to be registered with the NAPI instance
116*5113495bSYour Name  * @scale: scale factor on the weight (to scaler budget to 1000)
117*5113495bSYour Name  * @flags: feature flags
118*5113495bSYour Name  *
119*5113495bSYour Name  * Description:
120*5113495bSYour Name  *    Creates NAPI instances. This function is called
121*5113495bSYour Name  *    unconditionally during initialization. It creates
122*5113495bSYour Name  *    napi structures through the proper HTC/HIF calls.
123*5113495bSYour Name  *    The structures are disabled on creation.
124*5113495bSYour Name  *    Note that for each NAPI instance a separate dummy netdev is used
125*5113495bSYour Name  *
126*5113495bSYour Name  * Return:
127*5113495bSYour Name  * < 0: error
128*5113495bSYour Name  * = 0: <should never happen>
129*5113495bSYour Name  * > 0: id of the created object (for multi-NAPI, number of objects created)
130*5113495bSYour Name  */
hif_napi_create(struct hif_opaque_softc * hif_ctx,int (* poll)(struct napi_struct *,int),int budget,int scale,uint8_t flags)131*5113495bSYour Name int hif_napi_create(struct hif_opaque_softc   *hif_ctx,
132*5113495bSYour Name 		    int (*poll)(struct napi_struct *, int),
133*5113495bSYour Name 		    int                budget,
134*5113495bSYour Name 		    int                scale,
135*5113495bSYour Name 		    uint8_t            flags)
136*5113495bSYour Name {
137*5113495bSYour Name 	int i;
138*5113495bSYour Name 	struct qca_napi_data *napid;
139*5113495bSYour Name 	struct qca_napi_info *napii;
140*5113495bSYour Name 	struct CE_state      *ce_state;
141*5113495bSYour Name 	struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx);
142*5113495bSYour Name 	int    rc = 0;
143*5113495bSYour Name 
144*5113495bSYour Name 	NAPI_DEBUG("-->(budget=%d, scale=%d)",
145*5113495bSYour Name 		   budget, scale);
146*5113495bSYour Name 	NAPI_DEBUG("hif->napi_data.state = 0x%08x",
147*5113495bSYour Name 		   hif->napi_data.state);
148*5113495bSYour Name 	NAPI_DEBUG("hif->napi_data.ce_map = 0x%08x",
149*5113495bSYour Name 		   hif->napi_data.ce_map);
150*5113495bSYour Name 
151*5113495bSYour Name 	napid = &(hif->napi_data);
152*5113495bSYour Name 	if (0 == (napid->state &  HIF_NAPI_INITED)) {
153*5113495bSYour Name 		memset(napid, 0, sizeof(struct qca_napi_data));
154*5113495bSYour Name 		qdf_spinlock_create(&(napid->lock));
155*5113495bSYour Name 
156*5113495bSYour Name 		napid->state |= HIF_NAPI_INITED;
157*5113495bSYour Name 		napid->flags = flags;
158*5113495bSYour Name 
159*5113495bSYour Name 		rc = hif_napi_cpu_init(hif_ctx);
160*5113495bSYour Name 		if (rc != 0 && rc != -EALREADY) {
161*5113495bSYour Name 			hif_err("NAPI_initialization failed(rc=%d)", rc);
162*5113495bSYour Name 			rc = napid->ce_map;
163*5113495bSYour Name 			goto hnc_err;
164*5113495bSYour Name 		} else
165*5113495bSYour Name 			rc = 0;
166*5113495bSYour Name 
167*5113495bSYour Name 		hif_debug("NAPI structures initialized, rc=%d", rc);
168*5113495bSYour Name 	}
169*5113495bSYour Name 	for (i = 0; i < hif->ce_count; i++) {
170*5113495bSYour Name 		ce_state = hif->ce_id_to_state[i];
171*5113495bSYour Name 		NAPI_DEBUG("ce %d: htt_rx=%d htt_tx=%d",
172*5113495bSYour Name 			   i, ce_state->htt_rx_data,
173*5113495bSYour Name 			   ce_state->htt_tx_data);
174*5113495bSYour Name 		if (ce_srng_based(hif))
175*5113495bSYour Name 			continue;
176*5113495bSYour Name 
177*5113495bSYour Name 		if (!ce_state->htt_rx_data)
178*5113495bSYour Name 			continue;
179*5113495bSYour Name 
180*5113495bSYour Name 		/* Now this is a CE where we need NAPI on */
181*5113495bSYour Name 		NAPI_DEBUG("Creating NAPI on pipe %d", i);
182*5113495bSYour Name 		napii = qdf_mem_malloc(sizeof(*napii));
183*5113495bSYour Name 		napid->napis[i] = napii;
184*5113495bSYour Name 		if (!napii) {
185*5113495bSYour Name 			rc = -ENOMEM;
186*5113495bSYour Name 			goto napii_free;
187*5113495bSYour Name 		}
188*5113495bSYour Name 	}
189*5113495bSYour Name 
190*5113495bSYour Name 	for (i = 0; i < hif->ce_count; i++) {
191*5113495bSYour Name 		napii = napid->napis[i];
192*5113495bSYour Name 		if (!napii)
193*5113495bSYour Name 			continue;
194*5113495bSYour Name 
195*5113495bSYour Name 		NAPI_DEBUG("initializing NAPI for pipe %d", i);
196*5113495bSYour Name 		memset(napii, 0, sizeof(struct qca_napi_info));
197*5113495bSYour Name 		napii->scale = scale;
198*5113495bSYour Name 		napii->id    = NAPI_PIPE2ID(i);
199*5113495bSYour Name 		napii->hif_ctx = hif_ctx;
200*5113495bSYour Name 		napii->irq   = pld_get_irq(hif->qdf_dev->dev, i);
201*5113495bSYour Name 
202*5113495bSYour Name 		if (napii->irq < 0)
203*5113495bSYour Name 			hif_warn("bad IRQ value for CE %d: %d", i, napii->irq);
204*5113495bSYour Name 
205*5113495bSYour Name 		qdf_net_if_create_dummy_if((struct qdf_net_if *)&napii->netdev);
206*5113495bSYour Name 
207*5113495bSYour Name 		NAPI_DEBUG("adding napi=%pK to netdev=%pK (poll=%pK, bdgt=%d)",
208*5113495bSYour Name 			   &(napii->napi), &(napii->netdev), poll, budget);
209*5113495bSYour Name 		qdf_netif_napi_add(&(napii->netdev), &(napii->napi),
210*5113495bSYour Name 				   poll, budget);
211*5113495bSYour Name 
212*5113495bSYour Name 		NAPI_DEBUG("after napi_add");
213*5113495bSYour Name 		NAPI_DEBUG("napi=0x%pK, netdev=0x%pK",
214*5113495bSYour Name 			   &(napii->napi), &(napii->netdev));
215*5113495bSYour Name 		NAPI_DEBUG("napi.dev_list.prev=0x%pK, next=0x%pK",
216*5113495bSYour Name 			   napii->napi.dev_list.prev,
217*5113495bSYour Name 			   napii->napi.dev_list.next);
218*5113495bSYour Name 		NAPI_DEBUG("dev.napi_list.prev=0x%pK, next=0x%pK",
219*5113495bSYour Name 			   napii->netdev.napi_list.prev,
220*5113495bSYour Name 			   napii->netdev.napi_list.next);
221*5113495bSYour Name 
222*5113495bSYour Name 		hif_init_rx_thread_napi(napii);
223*5113495bSYour Name 		napii->lro_ctx = qdf_lro_init();
224*5113495bSYour Name 		NAPI_DEBUG("Registering LRO for ce_id %d NAPI callback for %d lro_ctx %pK\n",
225*5113495bSYour Name 				i, napii->id, napii->lro_ctx);
226*5113495bSYour Name 
227*5113495bSYour Name 		/* It is OK to change the state variable below without
228*5113495bSYour Name 		 * protection as there should be no-one around yet
229*5113495bSYour Name 		 */
230*5113495bSYour Name 		napid->ce_map |= (0x01 << i);
231*5113495bSYour Name 		hif_debug("NAPI id %d created for pipe %d", napii->id, i);
232*5113495bSYour Name 	}
233*5113495bSYour Name 
234*5113495bSYour Name 	/* no ces registered with the napi */
235*5113495bSYour Name 	if (!ce_srng_based(hif) && napid->ce_map == 0) {
236*5113495bSYour Name 		hif_warn("no napis created for copy engines");
237*5113495bSYour Name 		rc = -EFAULT;
238*5113495bSYour Name 		goto napii_free;
239*5113495bSYour Name 	}
240*5113495bSYour Name 
241*5113495bSYour Name 	NAPI_DEBUG("napi map = %x", napid->ce_map);
242*5113495bSYour Name 	NAPI_DEBUG("NAPI ids created for all applicable pipes");
243*5113495bSYour Name 	return napid->ce_map;
244*5113495bSYour Name 
245*5113495bSYour Name napii_free:
246*5113495bSYour Name 	for (i = 0; i < hif->ce_count; i++) {
247*5113495bSYour Name 		napii = napid->napis[i];
248*5113495bSYour Name 		napid->napis[i] = NULL;
249*5113495bSYour Name 		if (napii)
250*5113495bSYour Name 			qdf_mem_free(napii);
251*5113495bSYour Name 	}
252*5113495bSYour Name 
253*5113495bSYour Name hnc_err:
254*5113495bSYour Name 	NAPI_DEBUG("<--napi_instances_map=%x]", napid->ce_map);
255*5113495bSYour Name 	return rc;
256*5113495bSYour Name }
257*5113495bSYour Name qdf_export_symbol(hif_napi_create);
258*5113495bSYour Name 
259*5113495bSYour Name #ifdef RECEIVE_OFFLOAD
hif_napi_rx_offld_flush_cb_register(struct hif_opaque_softc * hif_hdl,void (offld_flush_handler)(void *))260*5113495bSYour Name void hif_napi_rx_offld_flush_cb_register(struct hif_opaque_softc *hif_hdl,
261*5113495bSYour Name 					 void (offld_flush_handler)(void *))
262*5113495bSYour Name {
263*5113495bSYour Name 	int i;
264*5113495bSYour Name 	struct CE_state *ce_state;
265*5113495bSYour Name 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
266*5113495bSYour Name 	struct qca_napi_data *napid;
267*5113495bSYour Name 	struct qca_napi_info *napii;
268*5113495bSYour Name 
269*5113495bSYour Name 	if (!scn) {
270*5113495bSYour Name 		hif_err("hif_state NULL!");
271*5113495bSYour Name 		QDF_ASSERT(0);
272*5113495bSYour Name 		return;
273*5113495bSYour Name 	}
274*5113495bSYour Name 
275*5113495bSYour Name 	napid = hif_napi_get_all(hif_hdl);
276*5113495bSYour Name 	for (i = 0; i < scn->ce_count; i++) {
277*5113495bSYour Name 		ce_state = scn->ce_id_to_state[i];
278*5113495bSYour Name 		if (ce_state && (ce_state->htt_rx_data)) {
279*5113495bSYour Name 			napii = napid->napis[i];
280*5113495bSYour Name 			napii->offld_flush_cb = offld_flush_handler;
281*5113495bSYour Name 			hif_debug("Registering offload for ce_id %d NAPI callback for %d flush_cb %pK",
282*5113495bSYour Name 				i, napii->id, napii->offld_flush_cb);
283*5113495bSYour Name 		}
284*5113495bSYour Name 	}
285*5113495bSYour Name }
286*5113495bSYour Name 
hif_napi_rx_offld_flush_cb_deregister(struct hif_opaque_softc * hif_hdl)287*5113495bSYour Name void hif_napi_rx_offld_flush_cb_deregister(struct hif_opaque_softc *hif_hdl)
288*5113495bSYour Name {
289*5113495bSYour Name 	int i;
290*5113495bSYour Name 	struct CE_state *ce_state;
291*5113495bSYour Name 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
292*5113495bSYour Name 	struct qca_napi_data *napid;
293*5113495bSYour Name 	struct qca_napi_info *napii;
294*5113495bSYour Name 
295*5113495bSYour Name 	if (!scn) {
296*5113495bSYour Name 		hif_err("hif_state NULL!");
297*5113495bSYour Name 		QDF_ASSERT(0);
298*5113495bSYour Name 		return;
299*5113495bSYour Name 	}
300*5113495bSYour Name 
301*5113495bSYour Name 	napid = hif_napi_get_all(hif_hdl);
302*5113495bSYour Name 	for (i = 0; i < scn->ce_count; i++) {
303*5113495bSYour Name 		ce_state = scn->ce_id_to_state[i];
304*5113495bSYour Name 		if (ce_state && (ce_state->htt_rx_data)) {
305*5113495bSYour Name 			napii = napid->napis[i];
306*5113495bSYour Name 			hif_debug("deRegistering offld for ce_id %d NAPI callback for %d flush_cb %pK",
307*5113495bSYour Name 				 i, napii->id, napii->offld_flush_cb);
308*5113495bSYour Name 			/* Not required */
309*5113495bSYour Name 			napii->offld_flush_cb = NULL;
310*5113495bSYour Name 		}
311*5113495bSYour Name 	}
312*5113495bSYour Name }
313*5113495bSYour Name #endif /* RECEIVE_OFFLOAD */
314*5113495bSYour Name 
315*5113495bSYour Name /**
316*5113495bSYour Name  * hif_napi_destroy() - destroys the NAPI structures for a given instance
317*5113495bSYour Name  * @hif_ctx: pointer to hif context
318*5113495bSYour Name  * @id: the CE id whose napi instance will be destroyed
319*5113495bSYour Name  * @force: if set, will destroy even if entry is active (de-activates)
320*5113495bSYour Name  *
321*5113495bSYour Name  * Description:
322*5113495bSYour Name  *    Destroy a given NAPI instance. This function is called
323*5113495bSYour Name  *    unconditionally during cleanup.
324*5113495bSYour Name  *    Refuses to destroy an entry of it is still enabled (unless force=1)
325*5113495bSYour Name  *    Marks the whole napi_data invalid if all instances are destroyed.
326*5113495bSYour Name  *
327*5113495bSYour Name  * Return:
328*5113495bSYour Name  * -EINVAL: specific entry has not been created
329*5113495bSYour Name  * -EPERM : specific entry is still active
330*5113495bSYour Name  * 0 <    : error
331*5113495bSYour Name  * 0 =    : success
332*5113495bSYour Name  */
hif_napi_destroy(struct hif_opaque_softc * hif_ctx,uint8_t id,int force)333*5113495bSYour Name int hif_napi_destroy(struct hif_opaque_softc *hif_ctx,
334*5113495bSYour Name 		     uint8_t          id,
335*5113495bSYour Name 		     int              force)
336*5113495bSYour Name {
337*5113495bSYour Name 	uint8_t ce = NAPI_ID2PIPE(id);
338*5113495bSYour Name 	int rc = 0;
339*5113495bSYour Name 	struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx);
340*5113495bSYour Name 
341*5113495bSYour Name 	NAPI_DEBUG("-->(id=%d, force=%d)", id, force);
342*5113495bSYour Name 
343*5113495bSYour Name 	if (0 == (hif->napi_data.state & HIF_NAPI_INITED)) {
344*5113495bSYour Name 		hif_err("NAPI not initialized or entry %d not created", id);
345*5113495bSYour Name 		rc = -EINVAL;
346*5113495bSYour Name 	} else if (0 == (hif->napi_data.ce_map & (0x01 << ce))) {
347*5113495bSYour Name 		hif_err("NAPI instance %d (pipe %d) not created", id, ce);
348*5113495bSYour Name 		if (hif->napi_data.napis[ce])
349*5113495bSYour Name 			hif_err("memory allocated but ce_map not set %d (pipe %d)",
350*5113495bSYour Name 				id, ce);
351*5113495bSYour Name 		rc = -EINVAL;
352*5113495bSYour Name 	} else {
353*5113495bSYour Name 		struct qca_napi_data *napid;
354*5113495bSYour Name 		struct qca_napi_info *napii;
355*5113495bSYour Name 
356*5113495bSYour Name 		napid = &(hif->napi_data);
357*5113495bSYour Name 		napii = napid->napis[ce];
358*5113495bSYour Name 		if (!napii) {
359*5113495bSYour Name 			if (napid->ce_map & (0x01 << ce))
360*5113495bSYour Name 				hif_err("napii & ce_map out of sync(ce %d)", ce);
361*5113495bSYour Name 			return -EINVAL;
362*5113495bSYour Name 		}
363*5113495bSYour Name 
364*5113495bSYour Name 
365*5113495bSYour Name 		if (hif->napi_data.state == HIF_NAPI_CONF_UP) {
366*5113495bSYour Name 			if (force) {
367*5113495bSYour Name 				qdf_napi_disable(&(napii->napi));
368*5113495bSYour Name 				hif_debug("NAPI entry %d force disabled", id);
369*5113495bSYour Name 				NAPI_DEBUG("NAPI %d force disabled", id);
370*5113495bSYour Name 			} else {
371*5113495bSYour Name 				hif_err("Cannot destroy active NAPI %d", id);
372*5113495bSYour Name 				rc = -EPERM;
373*5113495bSYour Name 			}
374*5113495bSYour Name 		}
375*5113495bSYour Name 		if (0 == rc) {
376*5113495bSYour Name 			NAPI_DEBUG("before napi_del");
377*5113495bSYour Name 			NAPI_DEBUG("napi.dlist.prv=0x%pK, next=0x%pK",
378*5113495bSYour Name 				  napii->napi.dev_list.prev,
379*5113495bSYour Name 				  napii->napi.dev_list.next);
380*5113495bSYour Name 			NAPI_DEBUG("dev.napi_l.prv=0x%pK, next=0x%pK",
381*5113495bSYour Name 				   napii->netdev.napi_list.prev,
382*5113495bSYour Name 				   napii->netdev.napi_list.next);
383*5113495bSYour Name 
384*5113495bSYour Name 			qdf_lro_deinit(napii->lro_ctx);
385*5113495bSYour Name 			qdf_netif_napi_del(&(napii->napi));
386*5113495bSYour Name 			hif_deinit_rx_thread_napi(napii);
387*5113495bSYour Name 
388*5113495bSYour Name 			napid->ce_map &= ~(0x01 << ce);
389*5113495bSYour Name 			napid->napis[ce] = NULL;
390*5113495bSYour Name 			napii->scale  = 0;
391*5113495bSYour Name 			qdf_mem_free(napii);
392*5113495bSYour Name 			hif_debug("NAPI %d destroyed", id);
393*5113495bSYour Name 
394*5113495bSYour Name 			/* if there are no active instances and
395*5113495bSYour Name 			 * if they are all destroyed,
396*5113495bSYour Name 			 * set the whole structure to uninitialized state
397*5113495bSYour Name 			 */
398*5113495bSYour Name 			if (napid->ce_map == 0) {
399*5113495bSYour Name 				rc = hif_napi_cpu_deinit(hif_ctx);
400*5113495bSYour Name 				/* caller is tolerant to receiving !=0 rc */
401*5113495bSYour Name 
402*5113495bSYour Name 				qdf_spinlock_destroy(&(napid->lock));
403*5113495bSYour Name 				memset(napid,
404*5113495bSYour Name 				       0, sizeof(struct qca_napi_data));
405*5113495bSYour Name 				hif_debug("no NAPI instances. Zapped");
406*5113495bSYour Name 			}
407*5113495bSYour Name 		}
408*5113495bSYour Name 	}
409*5113495bSYour Name 
410*5113495bSYour Name 	return rc;
411*5113495bSYour Name }
412*5113495bSYour Name qdf_export_symbol(hif_napi_destroy);
413*5113495bSYour Name 
414*5113495bSYour Name #ifdef FEATURE_LRO
hif_napi_get_lro_info(struct hif_opaque_softc * hif_hdl,int napi_id)415*5113495bSYour Name void *hif_napi_get_lro_info(struct hif_opaque_softc *hif_hdl, int napi_id)
416*5113495bSYour Name {
417*5113495bSYour Name 	struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
418*5113495bSYour Name 	struct qca_napi_data *napid;
419*5113495bSYour Name 	struct qca_napi_info *napii;
420*5113495bSYour Name 
421*5113495bSYour Name 	napid = &(scn->napi_data);
422*5113495bSYour Name 	napii = napid->napis[NAPI_ID2PIPE(napi_id)];
423*5113495bSYour Name 
424*5113495bSYour Name 	if (napii)
425*5113495bSYour Name 		return napii->lro_ctx;
426*5113495bSYour Name 	return 0;
427*5113495bSYour Name }
428*5113495bSYour Name #endif
429*5113495bSYour Name 
430*5113495bSYour Name /**
431*5113495bSYour Name  * hif_napi_get_all() - returns the address of the whole HIF NAPI structure
432*5113495bSYour Name  * @hif_ctx: pointer to hif context
433*5113495bSYour Name  *
434*5113495bSYour Name  * Description:
435*5113495bSYour Name  *    Returns the address of the whole structure
436*5113495bSYour Name  *
437*5113495bSYour Name  * Return:
438*5113495bSYour Name  *  <addr>: address of the whole HIF NAPI structure
439*5113495bSYour Name  */
hif_napi_get_all(struct hif_opaque_softc * hif_ctx)440*5113495bSYour Name inline struct qca_napi_data *hif_napi_get_all(struct hif_opaque_softc *hif_ctx)
441*5113495bSYour Name {
442*5113495bSYour Name 	struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx);
443*5113495bSYour Name 
444*5113495bSYour Name 	return &(hif->napi_data);
445*5113495bSYour Name }
446*5113495bSYour Name 
hif_get_napi(int napi_id,struct qca_napi_data * napid)447*5113495bSYour Name struct qca_napi_info *hif_get_napi(int napi_id, struct qca_napi_data *napid)
448*5113495bSYour Name {
449*5113495bSYour Name 	int id = NAPI_ID2PIPE(napi_id);
450*5113495bSYour Name 
451*5113495bSYour Name 	return napid->napis[id];
452*5113495bSYour Name }
453*5113495bSYour Name 
454*5113495bSYour Name /**
455*5113495bSYour Name  * hif_napi_event() - reacts to events that impact NAPI
456*5113495bSYour Name  * @hif_ctx: pointer to hif context
457*5113495bSYour Name  * @event: event that has been detected
458*5113495bSYour Name  * @data: more data regarding the event
459*5113495bSYour Name  *
460*5113495bSYour Name  * Description:
461*5113495bSYour Name  *   This function handles two types of events:
462*5113495bSYour Name  *   1- Events that change the state of NAPI (enabled/disabled):
463*5113495bSYour Name  *      {NAPI_EVT_INI_FILE, NAPI_EVT_CMD_STATE}
464*5113495bSYour Name  *      The state is retrievable by "hdd_napi_enabled(-1)"
465*5113495bSYour Name  *    - NAPI will be on if either INI file is on and it has not been disabled
466*5113495bSYour Name  *                                by a subsequent vendor CMD,
467*5113495bSYour Name  *                         or     it has been enabled by a vendor CMD.
468*5113495bSYour Name  *   2- Events that change the CPU affinity of a NAPI instance/IRQ:
469*5113495bSYour Name  *      {NAPI_EVT_TPUT_STATE, NAPI_EVT_CPU_STATE}
470*5113495bSYour Name  *    - NAPI will support a throughput mode (HI/LO), kept at napid->napi_mode
471*5113495bSYour Name  *    - NAPI will switch throughput mode based on hdd_napi_throughput_policy()
472*5113495bSYour Name  *    - In LO tput mode, NAPI will yield control if its interrupts to the system
473*5113495bSYour Name  *      management functions. However in HI throughput mode, NAPI will actively
474*5113495bSYour Name  *      manage its interrupts/instances (by trying to disperse them out to
475*5113495bSYour Name  *      separate performance cores).
476*5113495bSYour Name  *    - CPU eligibility is kept up-to-date by NAPI_EVT_CPU_STATE events.
477*5113495bSYour Name  *
478*5113495bSYour Name  *    + In some cases (roaming peer management is the only case so far), a
479*5113495bSYour Name  *      a client can trigger a "SERIALIZE" event. Basically, this means that the
480*5113495bSYour Name  *      users is asking NAPI to go into a truly single execution context state.
481*5113495bSYour Name  *      So, NAPI indicates to msm-irqbalancer that it wants to be denylisted,
482*5113495bSYour Name  *      (if called for the first time) and then moves all IRQs (for NAPI
483*5113495bSYour Name  *      instances) to be collapsed to a single core. If called multiple times,
484*5113495bSYour Name  *      it will just re-collapse the CPUs. This is because denylist-on() API
485*5113495bSYour Name  *      is reference-counted, and because the API has already been called.
486*5113495bSYour Name  *
487*5113495bSYour Name  *      Such a user, should call "DESERIALIZE" (NORMAL) event, to set NAPI to go
488*5113495bSYour Name  *      to its "normal" operation. Optionally, they can give a timeout value (in
489*5113495bSYour Name  *      multiples of BusBandwidthCheckPeriod -- 100 msecs by default). In this
490*5113495bSYour Name  *      case, NAPI will just set the current throughput state to uninitialized
491*5113495bSYour Name  *      and set the delay period. Once policy handler is called, it would skip
492*5113495bSYour Name  *      applying the policy delay period times, and otherwise apply the policy.
493*5113495bSYour Name  *
494*5113495bSYour Name  * Return:
495*5113495bSYour Name  *  < 0: some error
496*5113495bSYour Name  *  = 0: event handled successfully
497*5113495bSYour Name  */
hif_napi_event(struct hif_opaque_softc * hif_ctx,enum qca_napi_event event,void * data)498*5113495bSYour Name int hif_napi_event(struct hif_opaque_softc *hif_ctx, enum qca_napi_event event,
499*5113495bSYour Name 		   void *data)
500*5113495bSYour Name {
501*5113495bSYour Name 	int      rc = 0;
502*5113495bSYour Name 	uint32_t prev_state;
503*5113495bSYour Name 	int      i;
504*5113495bSYour Name 	bool state_changed;
505*5113495bSYour Name 	struct napi_struct *napi;
506*5113495bSYour Name 	struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx);
507*5113495bSYour Name 	struct qca_napi_data *napid = &(hif->napi_data);
508*5113495bSYour Name 	enum qca_napi_tput_state tput_mode = QCA_NAPI_TPUT_UNINITIALIZED;
509*5113495bSYour Name 	enum {
510*5113495bSYour Name 		DENYLIST_NOT_PENDING,
511*5113495bSYour Name 		DENYLIST_ON_PENDING,
512*5113495bSYour Name 		DENYLIST_OFF_PENDING
513*5113495bSYour Name 	     } denylist_pending = DENYLIST_NOT_PENDING;
514*5113495bSYour Name 
515*5113495bSYour Name 	NAPI_DEBUG("%s: -->(event=%d, aux=%pK)", __func__, event, data);
516*5113495bSYour Name 
517*5113495bSYour Name 	if (ce_srng_based(hif))
518*5113495bSYour Name 		return hif_exec_event(hif_ctx, event, data);
519*5113495bSYour Name 
520*5113495bSYour Name 	if ((napid->state & HIF_NAPI_INITED) == 0) {
521*5113495bSYour Name 		NAPI_DEBUG("%s: got event when NAPI not initialized",
522*5113495bSYour Name 			   __func__);
523*5113495bSYour Name 		return -EINVAL;
524*5113495bSYour Name 	}
525*5113495bSYour Name 	qdf_spin_lock_bh(&(napid->lock));
526*5113495bSYour Name 	prev_state = napid->state;
527*5113495bSYour Name 	switch (event) {
528*5113495bSYour Name 	case NAPI_EVT_INI_FILE:
529*5113495bSYour Name 	case NAPI_EVT_CMD_STATE:
530*5113495bSYour Name 	case NAPI_EVT_INT_STATE: {
531*5113495bSYour Name 		int on = (data != ((void *)0));
532*5113495bSYour Name 
533*5113495bSYour Name 		hif_debug("recved evnt: STATE_CMD %d; v = %d (state=0x%0x)",
534*5113495bSYour Name 			 event, on, prev_state);
535*5113495bSYour Name 		if (on)
536*5113495bSYour Name 			if (prev_state & HIF_NAPI_CONF_UP) {
537*5113495bSYour Name 				hif_debug("Duplicate NAPI conf ON msg");
538*5113495bSYour Name 			} else {
539*5113495bSYour Name 				hif_debug("Setting state to ON");
540*5113495bSYour Name 				napid->state |= HIF_NAPI_CONF_UP;
541*5113495bSYour Name 			}
542*5113495bSYour Name 		else /* off request */
543*5113495bSYour Name 			if (prev_state & HIF_NAPI_CONF_UP) {
544*5113495bSYour Name 				hif_debug("Setting state to OFF");
545*5113495bSYour Name 				napid->state &= ~HIF_NAPI_CONF_UP;
546*5113495bSYour Name 			} else {
547*5113495bSYour Name 				hif_debug("Duplicate NAPI conf OFF msg");
548*5113495bSYour Name 			}
549*5113495bSYour Name 		break;
550*5113495bSYour Name 	}
551*5113495bSYour Name 	/* case NAPI_INIT_FILE/CMD_STATE */
552*5113495bSYour Name 
553*5113495bSYour Name 	case NAPI_EVT_CPU_STATE: {
554*5113495bSYour Name 		int cpu = ((unsigned long int)data >> 16);
555*5113495bSYour Name 		int val = ((unsigned long int)data & 0x0ff);
556*5113495bSYour Name 
557*5113495bSYour Name 		NAPI_DEBUG("%s: evt=CPU_STATE on CPU %d value=%d",
558*5113495bSYour Name 			   __func__, cpu, val);
559*5113495bSYour Name 
560*5113495bSYour Name 		/* state has already been set by hnc_cpu_notify_cb */
561*5113495bSYour Name 		if ((val == QCA_NAPI_CPU_DOWN) &&
562*5113495bSYour Name 		    (napid->napi_mode == QCA_NAPI_TPUT_HI) && /* we manage */
563*5113495bSYour Name 		    (napid->napi_cpu[cpu].napis != 0)) {
564*5113495bSYour Name 			NAPI_DEBUG("%s: Migrating NAPIs out of cpu %d",
565*5113495bSYour Name 				   __func__, cpu);
566*5113495bSYour Name 			rc = hif_napi_cpu_migrate(napid,
567*5113495bSYour Name 						  cpu,
568*5113495bSYour Name 						  HNC_ACT_RELOCATE);
569*5113495bSYour Name 			napid->napi_cpu[cpu].napis = 0;
570*5113495bSYour Name 		}
571*5113495bSYour Name 		/* in QCA_NAPI_TPUT_LO case, napis MUST == 0 */
572*5113495bSYour Name 		break;
573*5113495bSYour Name 	}
574*5113495bSYour Name 
575*5113495bSYour Name 	case NAPI_EVT_TPUT_STATE: {
576*5113495bSYour Name 		tput_mode = (enum qca_napi_tput_state)data;
577*5113495bSYour Name 		if (tput_mode == QCA_NAPI_TPUT_LO) {
578*5113495bSYour Name 			/* from TPUT_HI -> TPUT_LO */
579*5113495bSYour Name 			NAPI_DEBUG("%s: Moving to napi_tput_LO state",
580*5113495bSYour Name 				   __func__);
581*5113495bSYour Name 			denylist_pending = DENYLIST_OFF_PENDING;
582*5113495bSYour Name 			/*
583*5113495bSYour Name 			 * Ideally we should "collapse" interrupts here, since
584*5113495bSYour Name 			 * we are "dispersing" interrupts in the "else" case.
585*5113495bSYour Name 			 * This allows the possibility that our interrupts may
586*5113495bSYour Name 			 * still be on the perf cluster the next time we enter
587*5113495bSYour Name 			 * high tput mode. However, the irq_balancer is free
588*5113495bSYour Name 			 * to move our interrupts to power cluster once
589*5113495bSYour Name 			 * denylisting has been turned off in the "else" case.
590*5113495bSYour Name 			 */
591*5113495bSYour Name 		} else {
592*5113495bSYour Name 			/* from TPUT_LO -> TPUT->HI */
593*5113495bSYour Name 			NAPI_DEBUG("%s: Moving to napi_tput_HI state",
594*5113495bSYour Name 				   __func__);
595*5113495bSYour Name 			rc = hif_napi_cpu_migrate(napid,
596*5113495bSYour Name 						  HNC_ANY_CPU,
597*5113495bSYour Name 						  HNC_ACT_DISPERSE);
598*5113495bSYour Name 
599*5113495bSYour Name 			denylist_pending = DENYLIST_ON_PENDING;
600*5113495bSYour Name 		}
601*5113495bSYour Name 		napid->napi_mode = tput_mode;
602*5113495bSYour Name 		break;
603*5113495bSYour Name 	}
604*5113495bSYour Name 
605*5113495bSYour Name 	case NAPI_EVT_USR_SERIAL: {
606*5113495bSYour Name 		unsigned long users = (unsigned long)data;
607*5113495bSYour Name 
608*5113495bSYour Name 		NAPI_DEBUG("%s: User forced SERIALIZATION; users=%ld",
609*5113495bSYour Name 			   __func__, users);
610*5113495bSYour Name 
611*5113495bSYour Name 		rc = hif_napi_cpu_migrate(napid,
612*5113495bSYour Name 					  HNC_ANY_CPU,
613*5113495bSYour Name 					  HNC_ACT_COLLAPSE);
614*5113495bSYour Name 		if ((users == 0) && (rc == 0))
615*5113495bSYour Name 			denylist_pending = DENYLIST_ON_PENDING;
616*5113495bSYour Name 		break;
617*5113495bSYour Name 	}
618*5113495bSYour Name 	case NAPI_EVT_USR_NORMAL: {
619*5113495bSYour Name 		NAPI_DEBUG("%s: User forced DE-SERIALIZATION", __func__);
620*5113495bSYour Name 		if (!napid->user_cpu_affin_mask)
621*5113495bSYour Name 			denylist_pending = DENYLIST_OFF_PENDING;
622*5113495bSYour Name 		/*
623*5113495bSYour Name 		 * Deserialization timeout is handled at hdd layer;
624*5113495bSYour Name 		 * just mark current mode to uninitialized to ensure
625*5113495bSYour Name 		 * it will be set when the delay is over
626*5113495bSYour Name 		 */
627*5113495bSYour Name 		napid->napi_mode = QCA_NAPI_TPUT_UNINITIALIZED;
628*5113495bSYour Name 		break;
629*5113495bSYour Name 	}
630*5113495bSYour Name 	default: {
631*5113495bSYour Name 		hif_err("Unknown event: %d (data=0x%0lx)",
632*5113495bSYour Name 			event, (unsigned long) data);
633*5113495bSYour Name 		break;
634*5113495bSYour Name 	} /* default */
635*5113495bSYour Name 	}; /* switch */
636*5113495bSYour Name 
637*5113495bSYour Name 
638*5113495bSYour Name 	switch (denylist_pending) {
639*5113495bSYour Name 	case DENYLIST_ON_PENDING:
640*5113495bSYour Name 		/* assume the control of WLAN IRQs */
641*5113495bSYour Name 		hif_napi_cpu_denylist(napid, DENYLIST_ON);
642*5113495bSYour Name 		break;
643*5113495bSYour Name 	case DENYLIST_OFF_PENDING:
644*5113495bSYour Name 		/* yield the control of WLAN IRQs */
645*5113495bSYour Name 		hif_napi_cpu_denylist(napid, DENYLIST_OFF);
646*5113495bSYour Name 		break;
647*5113495bSYour Name 	default: /* nothing to do */
648*5113495bSYour Name 		break;
649*5113495bSYour Name 	} /* switch denylist_pending */
650*5113495bSYour Name 
651*5113495bSYour Name 	/* we want to perform the comparison in lock:
652*5113495bSYour Name 	 * there is a possibility of hif_napi_event get called
653*5113495bSYour Name 	 * from two different contexts (driver unload and cpu hotplug
654*5113495bSYour Name 	 * notification) and napid->state get changed
655*5113495bSYour Name 	 * in driver unload context and can lead to race condition
656*5113495bSYour Name 	 * in cpu hotplug context. Therefore, perform the napid->state
657*5113495bSYour Name 	 * comparison before releasing lock.
658*5113495bSYour Name 	 */
659*5113495bSYour Name 	state_changed = (prev_state != napid->state);
660*5113495bSYour Name 	qdf_spin_unlock_bh(&(napid->lock));
661*5113495bSYour Name 
662*5113495bSYour Name 	if (state_changed) {
663*5113495bSYour Name 		if (napid->state == ENABLE_NAPI_MASK) {
664*5113495bSYour Name 			rc = 1;
665*5113495bSYour Name 			for (i = 0; i < CE_COUNT_MAX; i++) {
666*5113495bSYour Name 				struct qca_napi_info *napii = napid->napis[i];
667*5113495bSYour Name 				if (napii) {
668*5113495bSYour Name 					napi = &(napii->napi);
669*5113495bSYour Name 					NAPI_DEBUG("%s: enabling NAPI %d",
670*5113495bSYour Name 						   __func__, i);
671*5113495bSYour Name 					qdf_napi_enable(napi);
672*5113495bSYour Name 				}
673*5113495bSYour Name 			}
674*5113495bSYour Name 		} else {
675*5113495bSYour Name 			rc = 0;
676*5113495bSYour Name 			for (i = 0; i < CE_COUNT_MAX; i++) {
677*5113495bSYour Name 				struct qca_napi_info *napii = napid->napis[i];
678*5113495bSYour Name 				if (napii) {
679*5113495bSYour Name 					napi = &(napii->napi);
680*5113495bSYour Name 					NAPI_DEBUG("%s: disabling NAPI %d",
681*5113495bSYour Name 						   __func__, i);
682*5113495bSYour Name 					qdf_napi_disable(napi);
683*5113495bSYour Name 					/* in case it is affined, remove it */
684*5113495bSYour Name 					qdf_dev_set_irq_affinity(napii->irq,
685*5113495bSYour Name 								 NULL);
686*5113495bSYour Name 				}
687*5113495bSYour Name 			}
688*5113495bSYour Name 		}
689*5113495bSYour Name 	} else {
690*5113495bSYour Name 		hif_debug("no change in hif napi state (still %d)", prev_state);
691*5113495bSYour Name 	}
692*5113495bSYour Name 
693*5113495bSYour Name 	NAPI_DEBUG("<--[rc=%d]", rc);
694*5113495bSYour Name 	return rc;
695*5113495bSYour Name }
696*5113495bSYour Name qdf_export_symbol(hif_napi_event);
697*5113495bSYour Name 
698*5113495bSYour Name /**
699*5113495bSYour Name  * hif_napi_enabled() - checks whether NAPI is enabled for given ce or not
700*5113495bSYour Name  * @hif_ctx: hif context
701*5113495bSYour Name  * @ce: CE instance (or -1, to check if any CEs are enabled)
702*5113495bSYour Name  *
703*5113495bSYour Name  * Return: bool
704*5113495bSYour Name  */
hif_napi_enabled(struct hif_opaque_softc * hif_ctx,int ce)705*5113495bSYour Name int hif_napi_enabled(struct hif_opaque_softc *hif_ctx, int ce)
706*5113495bSYour Name {
707*5113495bSYour Name 	int rc;
708*5113495bSYour Name 	struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx);
709*5113495bSYour Name 
710*5113495bSYour Name 	if (-1 == ce)
711*5113495bSYour Name 		rc = ((hif->napi_data.state == ENABLE_NAPI_MASK));
712*5113495bSYour Name 	else
713*5113495bSYour Name 		rc = ((hif->napi_data.state == ENABLE_NAPI_MASK) &&
714*5113495bSYour Name 		      (hif->napi_data.ce_map & (0x01 << ce)));
715*5113495bSYour Name 	return rc;
716*5113495bSYour Name }
717*5113495bSYour Name qdf_export_symbol(hif_napi_enabled);
718*5113495bSYour Name 
719*5113495bSYour Name /**
720*5113495bSYour Name  * hif_napi_created() - checks whether NAPI is created for given ce or not
721*5113495bSYour Name  * @hif_ctx: hif context
722*5113495bSYour Name  * @ce: CE instance
723*5113495bSYour Name  *
724*5113495bSYour Name  * Return: bool
725*5113495bSYour Name  */
hif_napi_created(struct hif_opaque_softc * hif_ctx,int ce)726*5113495bSYour Name bool hif_napi_created(struct hif_opaque_softc *hif_ctx, int ce)
727*5113495bSYour Name {
728*5113495bSYour Name 	int rc;
729*5113495bSYour Name 	struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx);
730*5113495bSYour Name 
731*5113495bSYour Name 	rc = (hif->napi_data.ce_map & (0x01 << ce));
732*5113495bSYour Name 
733*5113495bSYour Name 	return !!rc;
734*5113495bSYour Name }
735*5113495bSYour Name qdf_export_symbol(hif_napi_created);
736*5113495bSYour Name 
737*5113495bSYour Name /**
738*5113495bSYour Name  * hif_napi_enable_irq() - enables bus interrupts after napi_complete
739*5113495bSYour Name  *
740*5113495bSYour Name  * @hif: hif context
741*5113495bSYour Name  * @id: id of NAPI instance calling this (used to determine the CE)
742*5113495bSYour Name  *
743*5113495bSYour Name  * Return: void
744*5113495bSYour Name  */
hif_napi_enable_irq(struct hif_opaque_softc * hif,int id)745*5113495bSYour Name inline void hif_napi_enable_irq(struct hif_opaque_softc *hif, int id)
746*5113495bSYour Name {
747*5113495bSYour Name 	struct hif_softc *scn = HIF_GET_SOFTC(hif);
748*5113495bSYour Name 
749*5113495bSYour Name 	hif_irq_enable(scn, NAPI_ID2PIPE(id));
750*5113495bSYour Name }
751*5113495bSYour Name 
752*5113495bSYour Name #if defined(QCA_WIFI_WCN6450) && defined(HIF_LATENCY_PROFILE_ENABLE)
753*5113495bSYour Name /*
754*5113495bSYour Name  * hif_napi_latency_profile_start() - update the schedule start timestamp
755*5113495bSYour Name  *
756*5113495bSYour Name  * @scn: HIF context
757*5113495bSYour Name  * ce_id: Copyengine id
758*5113495bSYour Name  *
759*5113495bSYour Name  * Return: None
760*5113495bSYour Name  */
hif_napi_latency_profile_start(struct hif_softc * scn,int ce_id)761*5113495bSYour Name static inline void hif_napi_latency_profile_start(struct hif_softc *scn,
762*5113495bSYour Name 						  int ce_id)
763*5113495bSYour Name {
764*5113495bSYour Name 	struct qca_napi_info *napii;
765*5113495bSYour Name 
766*5113495bSYour Name 	napii = scn->napi_data.napis[ce_id];
767*5113495bSYour Name 	if (napii)
768*5113495bSYour Name 		napii->tstamp = qdf_ktime_to_ms(qdf_ktime_get());
769*5113495bSYour Name }
770*5113495bSYour Name 
771*5113495bSYour Name /*
772*5113495bSYour Name  * hif_napi_latency_profile_measure() - calculate the NAPI schedule latency
773*5113495bSYour Name  * and update histogram
774*5113495bSYour Name  *
775*5113495bSYour Name  * @napi_info: pointer to qca_napi_info for the napi instance
776*5113495bSYour Name  *
777*5113495bSYour Name  * Return: None
778*5113495bSYour Name  */
hif_napi_latency_profile_measure(struct qca_napi_info * napi_info)779*5113495bSYour Name static void hif_napi_latency_profile_measure(struct qca_napi_info *napi_info)
780*5113495bSYour Name {
781*5113495bSYour Name 	int64_t cur_tstamp;
782*5113495bSYour Name 	int64_t time_elapsed;
783*5113495bSYour Name 
784*5113495bSYour Name 	cur_tstamp = qdf_ktime_to_ms(qdf_ktime_get());
785*5113495bSYour Name 
786*5113495bSYour Name 	if (cur_tstamp > napi_info->tstamp)
787*5113495bSYour Name 		time_elapsed = (cur_tstamp - napi_info->tstamp);
788*5113495bSYour Name 	else
789*5113495bSYour Name 		time_elapsed = ~0x0 - (napi_info->tstamp - cur_tstamp);
790*5113495bSYour Name 
791*5113495bSYour Name 	napi_info->tstamp = cur_tstamp;
792*5113495bSYour Name 
793*5113495bSYour Name 	if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_0_2)
794*5113495bSYour Name 		napi_info->sched_latency_stats[0]++;
795*5113495bSYour Name 	else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_3_10)
796*5113495bSYour Name 		napi_info->sched_latency_stats[1]++;
797*5113495bSYour Name 	else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_11_20)
798*5113495bSYour Name 		napi_info->sched_latency_stats[2]++;
799*5113495bSYour Name 	else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_21_50)
800*5113495bSYour Name 		napi_info->sched_latency_stats[3]++;
801*5113495bSYour Name 	else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_51_100)
802*5113495bSYour Name 		napi_info->sched_latency_stats[4]++;
803*5113495bSYour Name 	else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_101_250)
804*5113495bSYour Name 		napi_info->sched_latency_stats[5]++;
805*5113495bSYour Name 	else if (time_elapsed <= HIF_SCHED_LATENCY_BUCKET_251_500)
806*5113495bSYour Name 		napi_info->sched_latency_stats[6]++;
807*5113495bSYour Name 	else
808*5113495bSYour Name 		napi_info->sched_latency_stats[7]++;
809*5113495bSYour Name }
810*5113495bSYour Name 
hif_print_napi_latency_stats(struct qca_napi_info * napii,int ce_id)811*5113495bSYour Name static void hif_print_napi_latency_stats(struct qca_napi_info *napii, int ce_id)
812*5113495bSYour Name {
813*5113495bSYour Name 	int i;
814*5113495bSYour Name 	int64_t cur_tstamp;
815*5113495bSYour Name 
816*5113495bSYour Name 	const char time_str[HIF_SCHED_LATENCY_BUCKETS][15] =  {
817*5113495bSYour Name 		"0-2   ms",
818*5113495bSYour Name 		"3-10  ms",
819*5113495bSYour Name 		"11-20 ms",
820*5113495bSYour Name 		"21-50 ms",
821*5113495bSYour Name 		"51-100 ms",
822*5113495bSYour Name 		"101-250 ms",
823*5113495bSYour Name 		"251-500 ms",
824*5113495bSYour Name 		"> 500 ms"
825*5113495bSYour Name 	};
826*5113495bSYour Name 
827*5113495bSYour Name 	cur_tstamp = qdf_ktime_to_ms(qdf_ktime_get());
828*5113495bSYour Name 
829*5113495bSYour Name 	QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_INFO_HIGH,
830*5113495bSYour Name 		  "Current timestamp: %lld", cur_tstamp);
831*5113495bSYour Name 
832*5113495bSYour Name 	QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_INFO_HIGH,
833*5113495bSYour Name 		  "ce id %d Last serviced timestamp: %lld",
834*5113495bSYour Name 		  ce_id, napii->tstamp);
835*5113495bSYour Name 
836*5113495bSYour Name 	QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_INFO_HIGH,
837*5113495bSYour Name 		  "Latency Bucket     | Time elapsed");
838*5113495bSYour Name 
839*5113495bSYour Name 	for (i = 0; i < HIF_SCHED_LATENCY_BUCKETS; i++)
840*5113495bSYour Name 		QDF_TRACE(QDF_MODULE_ID_HIF,
841*5113495bSYour Name 			  QDF_TRACE_LEVEL_INFO_HIGH,
842*5113495bSYour Name 			  "%s     |    %lld",
843*5113495bSYour Name 			  time_str[i],
844*5113495bSYour Name 			  napii->sched_latency_stats[i]);
845*5113495bSYour Name }
846*5113495bSYour Name #else
847*5113495bSYour Name static inline void
hif_napi_latency_profile_start(struct hif_softc * scn,int ce_id)848*5113495bSYour Name hif_napi_latency_profile_start(struct hif_softc *scn, int ce_id)
849*5113495bSYour Name {
850*5113495bSYour Name }
851*5113495bSYour Name 
852*5113495bSYour Name static inline void
hif_napi_latency_profile_measure(struct qca_napi_info * napi_info)853*5113495bSYour Name hif_napi_latency_profile_measure(struct qca_napi_info *napi_info)
854*5113495bSYour Name {
855*5113495bSYour Name }
856*5113495bSYour Name 
857*5113495bSYour Name static inline void
hif_print_napi_latency_stats(struct qca_napi_info * napii,int ce_id)858*5113495bSYour Name hif_print_napi_latency_stats(struct qca_napi_info *napii, int ce_id)
859*5113495bSYour Name {
860*5113495bSYour Name }
861*5113495bSYour Name #endif
862*5113495bSYour Name 
863*5113495bSYour Name #ifdef QCA_WIFI_WCN6450
864*5113495bSYour Name #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
865*5113495bSYour Name /**
866*5113495bSYour Name  * hif_napi_update_service_start_time() - Update NAPI poll start time
867*5113495bSYour Name  *
868*5113495bSYour Name  * @napi_info: per NAPI instance data structure
869*5113495bSYour Name  *
870*5113495bSYour Name  * The function is called at the beginning of a NAPI poll to record the poll
871*5113495bSYour Name  * start time.
872*5113495bSYour Name  *
873*5113495bSYour Name  * Return: None
874*5113495bSYour Name  */
875*5113495bSYour Name static inline void
hif_napi_update_service_start_time(struct qca_napi_info * napi_info)876*5113495bSYour Name hif_napi_update_service_start_time(struct qca_napi_info *napi_info)
877*5113495bSYour Name {
878*5113495bSYour Name 	napi_info->poll_start_time = qdf_time_sched_clock();
879*5113495bSYour Name }
880*5113495bSYour Name 
881*5113495bSYour Name /**
882*5113495bSYour Name  * hif_napi_fill_poll_time_histogram() - fills poll time histogram for a NAPI
883*5113495bSYour Name  *
884*5113495bSYour Name  * @napi_info: per NAPI instance data structure
885*5113495bSYour Name  *
886*5113495bSYour Name  * The function is called at the end of a NAPI poll to calculate poll time
887*5113495bSYour Name  * buckets.
888*5113495bSYour Name  *
889*5113495bSYour Name  * Return: void
890*5113495bSYour Name  */
hif_napi_fill_poll_time_histogram(struct qca_napi_info * napi_info)891*5113495bSYour Name static void hif_napi_fill_poll_time_histogram(struct qca_napi_info *napi_info)
892*5113495bSYour Name {
893*5113495bSYour Name 	struct qca_napi_stat *napi_stat;
894*5113495bSYour Name 	unsigned long long poll_time_ns;
895*5113495bSYour Name 	uint32_t poll_time_us;
896*5113495bSYour Name 	uint32_t bucket_size_us = 500;
897*5113495bSYour Name 	uint32_t bucket;
898*5113495bSYour Name 	uint32_t cpu_id = qdf_get_cpu();
899*5113495bSYour Name 
900*5113495bSYour Name 	poll_time_ns = qdf_time_sched_clock() - napi_info->poll_start_time;
901*5113495bSYour Name 	poll_time_us = qdf_do_div(poll_time_ns, 1000);
902*5113495bSYour Name 
903*5113495bSYour Name 	napi_stat = &napi_info->stats[cpu_id];
904*5113495bSYour Name 	if (poll_time_ns > napi_info->stats[cpu_id].napi_max_poll_time)
905*5113495bSYour Name 		napi_info->stats[cpu_id].napi_max_poll_time = poll_time_ns;
906*5113495bSYour Name 
907*5113495bSYour Name 	bucket = poll_time_us / bucket_size_us;
908*5113495bSYour Name 	if (bucket >= QCA_NAPI_NUM_BUCKETS)
909*5113495bSYour Name 		bucket = QCA_NAPI_NUM_BUCKETS - 1;
910*5113495bSYour Name 
911*5113495bSYour Name 	++napi_stat->poll_time_buckets[bucket];
912*5113495bSYour Name }
913*5113495bSYour Name 
914*5113495bSYour Name /*
915*5113495bSYour Name  * hif_get_poll_times_hist_str() - Get HIF poll times histogram string
916*5113495bSYour Name  * @stats: NAPI stats to get poll time buckets
917*5113495bSYour Name  * @buf: buffer to fill histogram string
918*5113495bSYour Name  * @buf_len: length of the buffer
919*5113495bSYour Name  *
920*5113495bSYour Name  * Return: void
921*5113495bSYour Name  */
hif_get_poll_times_hist_str(struct qca_napi_stat * stats,char * buf,uint8_t buf_len)922*5113495bSYour Name static void hif_get_poll_times_hist_str(struct qca_napi_stat *stats, char *buf,
923*5113495bSYour Name 					uint8_t buf_len)
924*5113495bSYour Name {
925*5113495bSYour Name 	int i;
926*5113495bSYour Name 	int str_index = 0;
927*5113495bSYour Name 
928*5113495bSYour Name 	for (i = 0; i < QCA_NAPI_NUM_BUCKETS; i++)
929*5113495bSYour Name 		str_index += qdf_scnprintf(buf + str_index, buf_len - str_index,
930*5113495bSYour Name 					   "%u|", stats->poll_time_buckets[i]);
931*5113495bSYour Name }
932*5113495bSYour Name 
hif_print_napi_stats(struct hif_opaque_softc * hif_ctx)933*5113495bSYour Name void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx)
934*5113495bSYour Name {
935*5113495bSYour Name 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
936*5113495bSYour Name 	struct qca_napi_info *napii;
937*5113495bSYour Name 	struct qca_napi_stat *napi_stats;
938*5113495bSYour Name 	int ce_id, cpu;
939*5113495bSYour Name 
940*5113495bSYour Name 	/*
941*5113495bSYour Name 	 * Max value of uint_32 (poll_time_bucket) = 4294967295
942*5113495bSYour Name 	 * Thus we need 10 chars + 1 space =11 chars for each bucket value.
943*5113495bSYour Name 	 * +1 space for '\0'.
944*5113495bSYour Name 	 */
945*5113495bSYour Name 	char hist_str[(QCA_NAPI_NUM_BUCKETS * 11) + 1] = {'\0'};
946*5113495bSYour Name 
947*5113495bSYour Name 	QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_INFO_HIGH,
948*5113495bSYour Name 		  "NAPI[#]CPU[#] |scheds |polls  |comps  |dones  |t-lim  |max(us)|hist(500us buckets)");
949*5113495bSYour Name 
950*5113495bSYour Name 	for (ce_id = 0; ce_id < CE_COUNT_MAX; ce_id++) {
951*5113495bSYour Name 		if (!hif_napi_enabled(hif_ctx, ce_id))
952*5113495bSYour Name 			continue;
953*5113495bSYour Name 
954*5113495bSYour Name 		napii = scn->napi_data.napis[ce_id];
955*5113495bSYour Name 		if (napii) {
956*5113495bSYour Name 			for (cpu = 0; cpu < num_possible_cpus(); cpu++) {
957*5113495bSYour Name 				napi_stats = &napii->stats[cpu];
958*5113495bSYour Name 
959*5113495bSYour Name 				 hif_get_poll_times_hist_str(napi_stats,
960*5113495bSYour Name 							     hist_str,
961*5113495bSYour Name 							     sizeof(hist_str));
962*5113495bSYour Name 
963*5113495bSYour Name 				if (napi_stats->napi_schedules != 0)
964*5113495bSYour Name 					QDF_TRACE(QDF_MODULE_ID_HIF,
965*5113495bSYour Name 						  QDF_TRACE_LEVEL_INFO_HIGH,
966*5113495bSYour Name 						  "NAPI[%d]CPU[%d]: %7u %7u %7u %7u %7u %7llu %s",
967*5113495bSYour Name 						  ce_id, cpu,
968*5113495bSYour Name 						  napi_stats->napi_schedules,
969*5113495bSYour Name 						  napi_stats->napi_polls,
970*5113495bSYour Name 						  napi_stats->napi_completes,
971*5113495bSYour Name 						  napi_stats->napi_workdone,
972*5113495bSYour Name 						  napi_stats->time_limit_reached,
973*5113495bSYour Name 						  qdf_do_div(napi_stats->napi_max_poll_time, 1000),
974*5113495bSYour Name 						  hist_str);
975*5113495bSYour Name 			}
976*5113495bSYour Name 
977*5113495bSYour Name 			hif_print_napi_latency_stats(napii, ce_id);
978*5113495bSYour Name 		}
979*5113495bSYour Name 	}
980*5113495bSYour Name }
981*5113495bSYour Name #else
982*5113495bSYour Name static inline void
hif_napi_update_service_start_time(struct qca_napi_info * napi_info)983*5113495bSYour Name hif_napi_update_service_start_time(struct qca_napi_info *napi_info)
984*5113495bSYour Name {
985*5113495bSYour Name }
986*5113495bSYour Name 
987*5113495bSYour Name static inline void
hif_napi_fill_poll_time_histogram(struct qca_napi_info * napi_info)988*5113495bSYour Name hif_napi_fill_poll_time_histogram(struct qca_napi_info *napi_info)
989*5113495bSYour Name {
990*5113495bSYour Name }
991*5113495bSYour Name 
hif_print_napi_stats(struct hif_opaque_softc * hif_ctx)992*5113495bSYour Name void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx)
993*5113495bSYour Name {
994*5113495bSYour Name 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
995*5113495bSYour Name 	struct qca_napi_info *napii;
996*5113495bSYour Name 	struct qca_napi_stat *napi_stats;
997*5113495bSYour Name 	int ce_id, cpu;
998*5113495bSYour Name 
999*5113495bSYour Name 	QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_FATAL,
1000*5113495bSYour Name 		  "NAPI[#ctx]CPU[#] |schedules |polls |completes |workdone");
1001*5113495bSYour Name 
1002*5113495bSYour Name 	for (ce_id = 0; ce_id < CE_COUNT_MAX; ce_id++) {
1003*5113495bSYour Name 		if (!hif_napi_enabled(hif_ctx, ce_id))
1004*5113495bSYour Name 			continue;
1005*5113495bSYour Name 
1006*5113495bSYour Name 		napii = scn->napi_data.napis[ce_id];
1007*5113495bSYour Name 		if (napii) {
1008*5113495bSYour Name 			for (cpu = 0; cpu < num_possible_cpus(); cpu++) {
1009*5113495bSYour Name 				napi_stats = &napii->stats[cpu];
1010*5113495bSYour Name 
1011*5113495bSYour Name 				if (napi_stats->napi_schedules != 0)
1012*5113495bSYour Name 					QDF_TRACE(QDF_MODULE_ID_HIF,
1013*5113495bSYour Name 						  QDF_TRACE_LEVEL_FATAL,
1014*5113495bSYour Name 						  "NAPI[%2d]CPU[%d]: "
1015*5113495bSYour Name 						  "%7d %7d %7d %7d ",
1016*5113495bSYour Name 						  ce_id, cpu,
1017*5113495bSYour Name 						  napi_stats->napi_schedules,
1018*5113495bSYour Name 						  napi_stats->napi_polls,
1019*5113495bSYour Name 						  napi_stats->napi_completes,
1020*5113495bSYour Name 						  napi_stats->napi_workdone);
1021*5113495bSYour Name 			}
1022*5113495bSYour Name 
1023*5113495bSYour Name 			hif_print_napi_latency_stats(napii, ce_id);
1024*5113495bSYour Name 		}
1025*5113495bSYour Name 	}
1026*5113495bSYour Name }
1027*5113495bSYour Name #endif
1028*5113495bSYour Name 
1029*5113495bSYour Name #ifdef HIF_LATENCY_PROFILE_ENABLE
hif_clear_napi_stats(struct hif_opaque_softc * hif_ctx)1030*5113495bSYour Name void hif_clear_napi_stats(struct hif_opaque_softc *hif_ctx)
1031*5113495bSYour Name {
1032*5113495bSYour Name 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1033*5113495bSYour Name 	struct qca_napi_info *napii;
1034*5113495bSYour Name 	int ce_id;
1035*5113495bSYour Name 
1036*5113495bSYour Name 	for (ce_id = 0; ce_id < CE_COUNT_MAX; ce_id++) {
1037*5113495bSYour Name 		if (!hif_napi_enabled(hif_ctx, ce_id))
1038*5113495bSYour Name 			continue;
1039*5113495bSYour Name 
1040*5113495bSYour Name 		napii = scn->napi_data.napis[ce_id];
1041*5113495bSYour Name 		if (napii)
1042*5113495bSYour Name 			qdf_mem_set(napii->sched_latency_stats,
1043*5113495bSYour Name 				    sizeof(napii->sched_latency_stats), 0);
1044*5113495bSYour Name 	}
1045*5113495bSYour Name }
1046*5113495bSYour Name #else
hif_clear_napi_stats(struct hif_opaque_softc * hif_ctx)1047*5113495bSYour Name inline void hif_clear_napi_stats(struct hif_opaque_softc *hif_ctx)
1048*5113495bSYour Name {
1049*5113495bSYour Name }
1050*5113495bSYour Name #endif /* HIF_LATENCY_PROFILE_ENABLE */
1051*5113495bSYour Name 
1052*5113495bSYour Name #else
1053*5113495bSYour Name static inline void
hif_napi_update_service_start_time(struct qca_napi_info * napi_info)1054*5113495bSYour Name hif_napi_update_service_start_time(struct qca_napi_info *napi_info)
1055*5113495bSYour Name {
1056*5113495bSYour Name }
1057*5113495bSYour Name 
1058*5113495bSYour Name static inline void
hif_napi_fill_poll_time_histogram(struct qca_napi_info * napi_info)1059*5113495bSYour Name hif_napi_fill_poll_time_histogram(struct qca_napi_info *napi_info)
1060*5113495bSYour Name {
1061*5113495bSYour Name }
1062*5113495bSYour Name #endif
1063*5113495bSYour Name 
1064*5113495bSYour Name /**
1065*5113495bSYour Name  * hif_napi_schedule() - schedules napi, updates stats
1066*5113495bSYour Name  * @hif_ctx:  hif context
1067*5113495bSYour Name  * @ce_id: index of napi instance
1068*5113495bSYour Name  *
1069*5113495bSYour Name  * Return: false if napi didn't enable or already scheduled, otherwise true
1070*5113495bSYour Name  */
hif_napi_schedule(struct hif_opaque_softc * hif_ctx,int ce_id)1071*5113495bSYour Name bool hif_napi_schedule(struct hif_opaque_softc *hif_ctx, int ce_id)
1072*5113495bSYour Name {
1073*5113495bSYour Name 	int cpu = smp_processor_id();
1074*5113495bSYour Name 	struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
1075*5113495bSYour Name 	struct qca_napi_info *napii;
1076*5113495bSYour Name 
1077*5113495bSYour Name 	napii = scn->napi_data.napis[ce_id];
1078*5113495bSYour Name 	if (qdf_unlikely(!napii)) {
1079*5113495bSYour Name 		hif_err("scheduling unallocated napi (ce:%d)", ce_id);
1080*5113495bSYour Name 		qdf_atomic_dec(&scn->active_tasklet_cnt);
1081*5113495bSYour Name 		return false;
1082*5113495bSYour Name 	}
1083*5113495bSYour Name 
1084*5113495bSYour Name 	if (test_bit(NAPI_STATE_SCHED, &napii->napi.state)) {
1085*5113495bSYour Name 		NAPI_DEBUG("napi scheduled, return");
1086*5113495bSYour Name 		qdf_atomic_dec(&scn->active_tasklet_cnt);
1087*5113495bSYour Name 		return false;
1088*5113495bSYour Name 	}
1089*5113495bSYour Name 
1090*5113495bSYour Name 	hif_record_ce_desc_event(scn,  ce_id, NAPI_SCHEDULE,
1091*5113495bSYour Name 				 NULL, NULL, 0, 0);
1092*5113495bSYour Name 	napii->stats[cpu].napi_schedules++;
1093*5113495bSYour Name 	NAPI_DEBUG("scheduling napi %d (ce:%d)", napii->id, ce_id);
1094*5113495bSYour Name 	hif_napi_latency_profile_start(scn, ce_id);
1095*5113495bSYour Name 	napi_schedule(&(napii->napi));
1096*5113495bSYour Name 
1097*5113495bSYour Name 	return true;
1098*5113495bSYour Name }
1099*5113495bSYour Name qdf_export_symbol(hif_napi_schedule);
1100*5113495bSYour Name 
1101*5113495bSYour Name /**
1102*5113495bSYour Name  * hif_napi_correct_cpu() - correct the interrupt affinity for napi if needed
1103*5113495bSYour Name  * @napi_info: pointer to qca_napi_info for the napi instance
1104*5113495bSYour Name  *
1105*5113495bSYour Name  * Return: true  => interrupt already on correct cpu, no correction needed
1106*5113495bSYour Name  *         false => interrupt on wrong cpu, correction done for cpu affinity
1107*5113495bSYour Name  *                   of the interrupt
1108*5113495bSYour Name  */
1109*5113495bSYour Name static inline
hif_napi_correct_cpu(struct qca_napi_info * napi_info)1110*5113495bSYour Name bool hif_napi_correct_cpu(struct qca_napi_info *napi_info)
1111*5113495bSYour Name {
1112*5113495bSYour Name 	bool right_cpu = true;
1113*5113495bSYour Name 	int rc = 0;
1114*5113495bSYour Name 	int cpu;
1115*5113495bSYour Name 	struct qca_napi_data *napid;
1116*5113495bSYour Name 	QDF_STATUS ret;
1117*5113495bSYour Name 
1118*5113495bSYour Name 	napid = hif_napi_get_all(GET_HIF_OPAQUE_HDL(napi_info->hif_ctx));
1119*5113495bSYour Name 
1120*5113495bSYour Name 	if (napid->flags & QCA_NAPI_FEATURE_CPU_CORRECTION) {
1121*5113495bSYour Name 
1122*5113495bSYour Name 		cpu = qdf_get_cpu();
1123*5113495bSYour Name 		if (unlikely((hif_napi_cpu_denylist(napid,
1124*5113495bSYour Name 						    DENYLIST_QUERY) > 0) &&
1125*5113495bSYour Name 						    cpu != napi_info->cpu)) {
1126*5113495bSYour Name 			right_cpu = false;
1127*5113495bSYour Name 
1128*5113495bSYour Name 			NAPI_DEBUG("interrupt on wrong CPU, correcting");
1129*5113495bSYour Name 			napi_info->cpumask.bits[0] = (0x01 << napi_info->cpu);
1130*5113495bSYour Name 
1131*5113495bSYour Name 			qdf_dev_modify_irq_status(napi_info->irq,
1132*5113495bSYour Name 						  QDF_IRQ_NO_BALANCING, 0);
1133*5113495bSYour Name 			ret = qdf_dev_set_irq_affinity(napi_info->irq,
1134*5113495bSYour Name 						       (struct qdf_cpu_mask *)
1135*5113495bSYour Name 						       &napi_info->cpumask);
1136*5113495bSYour Name 			rc = qdf_status_to_os_return(ret);
1137*5113495bSYour Name 			qdf_dev_modify_irq_status(napi_info->irq, 0,
1138*5113495bSYour Name 						  QDF_IRQ_NO_BALANCING);
1139*5113495bSYour Name 
1140*5113495bSYour Name 			if (rc)
1141*5113495bSYour Name 				hif_err("Setting irq affinity hint: %d", rc);
1142*5113495bSYour Name 			else
1143*5113495bSYour Name 				napi_info->stats[cpu].cpu_corrected++;
1144*5113495bSYour Name 		}
1145*5113495bSYour Name 	}
1146*5113495bSYour Name 	return right_cpu;
1147*5113495bSYour Name }
1148*5113495bSYour Name 
1149*5113495bSYour Name #ifdef RECEIVE_OFFLOAD
1150*5113495bSYour Name /**
1151*5113495bSYour Name  * hif_napi_offld_flush_cb() - Call upper layer flush callback
1152*5113495bSYour Name  * @napi_info: Handle to hif_napi_info
1153*5113495bSYour Name  *
1154*5113495bSYour Name  * Return: None
1155*5113495bSYour Name  */
hif_napi_offld_flush_cb(struct qca_napi_info * napi_info)1156*5113495bSYour Name static void hif_napi_offld_flush_cb(struct qca_napi_info *napi_info)
1157*5113495bSYour Name {
1158*5113495bSYour Name 	if (napi_info->offld_flush_cb)
1159*5113495bSYour Name 		napi_info->offld_flush_cb(napi_info);
1160*5113495bSYour Name }
1161*5113495bSYour Name #else
hif_napi_offld_flush_cb(struct qca_napi_info * napi_info)1162*5113495bSYour Name static void hif_napi_offld_flush_cb(struct qca_napi_info *napi_info)
1163*5113495bSYour Name {
1164*5113495bSYour Name }
1165*5113495bSYour Name #endif
1166*5113495bSYour Name 
1167*5113495bSYour Name /**
1168*5113495bSYour Name  * hif_napi_poll() - NAPI poll routine
1169*5113495bSYour Name  * @hif_ctx: HIF context
1170*5113495bSYour Name  * @napi: pointer to NAPI struct as kernel holds it
1171*5113495bSYour Name  * @budget:
1172*5113495bSYour Name  *
1173*5113495bSYour Name  * This is the body of the poll function.
1174*5113495bSYour Name  * The poll function is called by kernel. So, there is a wrapper
1175*5113495bSYour Name  * function in HDD, which in turn calls this function.
1176*5113495bSYour Name  * Two main reasons why the whole thing is not implemented in HDD:
1177*5113495bSYour Name  * a) references to things like ce_service that HDD is not aware of
1178*5113495bSYour Name  * b) proximity to the implementation of ce_tasklet, which the body
1179*5113495bSYour Name  *    of this function should be very close to.
1180*5113495bSYour Name  *
1181*5113495bSYour Name  * NOTE TO THE MAINTAINER:
1182*5113495bSYour Name  *  Consider this function and ce_tasklet very tightly coupled pairs.
1183*5113495bSYour Name  *  Any changes to ce_tasklet or this function may likely need to be
1184*5113495bSYour Name  *  reflected in the counterpart.
1185*5113495bSYour Name  *
1186*5113495bSYour Name  * Returns:
1187*5113495bSYour Name  *  int: the amount of work done in this poll (<= budget)
1188*5113495bSYour Name  */
hif_napi_poll(struct hif_opaque_softc * hif_ctx,struct napi_struct * napi,int budget)1189*5113495bSYour Name int hif_napi_poll(struct hif_opaque_softc *hif_ctx,
1190*5113495bSYour Name 		  struct napi_struct *napi,
1191*5113495bSYour Name 		  int budget)
1192*5113495bSYour Name {
1193*5113495bSYour Name 	int    rc = 0; /* default: no work done, also takes care of error */
1194*5113495bSYour Name 	int    normalized = 0;
1195*5113495bSYour Name 	int    bucket;
1196*5113495bSYour Name 	int    cpu = smp_processor_id();
1197*5113495bSYour Name 	bool poll_on_right_cpu;
1198*5113495bSYour Name 	struct hif_softc      *hif = HIF_GET_SOFTC(hif_ctx);
1199*5113495bSYour Name 	struct qca_napi_info *napi_info;
1200*5113495bSYour Name 	struct CE_state *ce_state = NULL;
1201*5113495bSYour Name 
1202*5113495bSYour Name 	if (unlikely(!hif)) {
1203*5113495bSYour Name 		hif_err("hif context is NULL");
1204*5113495bSYour Name 		QDF_ASSERT(0);
1205*5113495bSYour Name 		goto out;
1206*5113495bSYour Name 	}
1207*5113495bSYour Name 
1208*5113495bSYour Name 	napi_info = (struct qca_napi_info *)
1209*5113495bSYour Name 		container_of(napi, struct qca_napi_info, napi);
1210*5113495bSYour Name 
1211*5113495bSYour Name 	hif_napi_update_service_start_time(napi_info);
1212*5113495bSYour Name 	hif_napi_latency_profile_measure(napi_info);
1213*5113495bSYour Name 
1214*5113495bSYour Name 	NAPI_DEBUG("%s -->(napi(%d, irq=%d), budget=%d)",
1215*5113495bSYour Name 		   __func__, napi_info->id, napi_info->irq, budget);
1216*5113495bSYour Name 
1217*5113495bSYour Name 	napi_info->stats[cpu].napi_polls++;
1218*5113495bSYour Name 
1219*5113495bSYour Name 	hif_record_ce_desc_event(hif, NAPI_ID2PIPE(napi_info->id),
1220*5113495bSYour Name 				 NAPI_POLL_ENTER, NULL, NULL, cpu, 0);
1221*5113495bSYour Name 
1222*5113495bSYour Name 	rc = ce_per_engine_service(hif, NAPI_ID2PIPE(napi_info->id));
1223*5113495bSYour Name 	NAPI_DEBUG("%s: ce_per_engine_service processed %d msgs",
1224*5113495bSYour Name 		    __func__, rc);
1225*5113495bSYour Name 
1226*5113495bSYour Name 	hif_napi_offld_flush_cb(napi_info);
1227*5113495bSYour Name 
1228*5113495bSYour Name 	/* do not return 0, if there was some work done,
1229*5113495bSYour Name 	 * even if it is below the scale
1230*5113495bSYour Name 	 */
1231*5113495bSYour Name 	if (rc) {
1232*5113495bSYour Name 		napi_info->stats[cpu].napi_workdone += rc;
1233*5113495bSYour Name 		normalized = (rc / napi_info->scale);
1234*5113495bSYour Name 		if (normalized == 0)
1235*5113495bSYour Name 			normalized++;
1236*5113495bSYour Name 		bucket = (normalized - 1) /
1237*5113495bSYour Name 				(QCA_NAPI_BUDGET / QCA_NAPI_NUM_BUCKETS);
1238*5113495bSYour Name 		if (bucket >= QCA_NAPI_NUM_BUCKETS) {
1239*5113495bSYour Name 			bucket = QCA_NAPI_NUM_BUCKETS - 1;
1240*5113495bSYour Name 			hif_err("Bad bucket#(%d) > QCA_NAPI_NUM_BUCKETS(%d)"
1241*5113495bSYour Name 				" normalized %d, napi budget %d",
1242*5113495bSYour Name 				bucket, QCA_NAPI_NUM_BUCKETS,
1243*5113495bSYour Name 				normalized, QCA_NAPI_BUDGET);
1244*5113495bSYour Name 		}
1245*5113495bSYour Name 		napi_info->stats[cpu].napi_budget_uses[bucket]++;
1246*5113495bSYour Name 	} else {
1247*5113495bSYour Name 	/* if ce_per engine reports 0, then poll should be terminated */
1248*5113495bSYour Name 		NAPI_DEBUG("%s:%d: nothing processed by CE. Completing NAPI",
1249*5113495bSYour Name 			   __func__, __LINE__);
1250*5113495bSYour Name 	}
1251*5113495bSYour Name 
1252*5113495bSYour Name 	ce_state = hif->ce_id_to_state[NAPI_ID2PIPE(napi_info->id)];
1253*5113495bSYour Name 
1254*5113495bSYour Name 	/*
1255*5113495bSYour Name 	 * Not using the API hif_napi_correct_cpu directly in the if statement
1256*5113495bSYour Name 	 * below since the API may not get evaluated if put at the end if any
1257*5113495bSYour Name 	 * prior condition would evaluate to be true. The CPU correction
1258*5113495bSYour Name 	 * check should kick in every poll.
1259*5113495bSYour Name 	 */
1260*5113495bSYour Name #ifdef NAPI_YIELD_BUDGET_BASED
1261*5113495bSYour Name 	if (ce_state && (ce_state->force_break || 0 == rc)) {
1262*5113495bSYour Name #else
1263*5113495bSYour Name 	poll_on_right_cpu = hif_napi_correct_cpu(napi_info);
1264*5113495bSYour Name 	if ((ce_state) &&
1265*5113495bSYour Name 	    (!ce_check_rx_pending(ce_state) || (0 == rc) ||
1266*5113495bSYour Name 	     !poll_on_right_cpu)) {
1267*5113495bSYour Name #endif
1268*5113495bSYour Name 		napi_info->stats[cpu].napi_completes++;
1269*5113495bSYour Name #ifdef NAPI_YIELD_BUDGET_BASED
1270*5113495bSYour Name 		ce_state->force_break = 0;
1271*5113495bSYour Name #endif
1272*5113495bSYour Name 
1273*5113495bSYour Name 		hif_record_ce_desc_event(hif, ce_state->id, NAPI_COMPLETE,
1274*5113495bSYour Name 					 NULL, NULL, 0, 0);
1275*5113495bSYour Name 		if (normalized >= budget)
1276*5113495bSYour Name 			normalized = budget - 1;
1277*5113495bSYour Name 
1278*5113495bSYour Name 		napi_complete(napi);
1279*5113495bSYour Name 		/* enable interrupts */
1280*5113495bSYour Name 		hif_napi_enable_irq(hif_ctx, napi_info->id);
1281*5113495bSYour Name 		/* support suspend/resume */
1282*5113495bSYour Name 		qdf_atomic_dec(&(hif->active_tasklet_cnt));
1283*5113495bSYour Name 
1284*5113495bSYour Name 		NAPI_DEBUG("%s:%d: napi_complete + enabling the interrupts",
1285*5113495bSYour Name 			   __func__, __LINE__);
1286*5113495bSYour Name 	} else {
1287*5113495bSYour Name 		/* 4.4 kernel NAPI implementation requires drivers to
1288*5113495bSYour Name 		 * return full work when they ask to be re-scheduled,
1289*5113495bSYour Name 		 * or napi_complete and re-start with a fresh interrupt
1290*5113495bSYour Name 		 */
1291*5113495bSYour Name 		normalized = budget;
1292*5113495bSYour Name 	}
1293*5113495bSYour Name 
1294*5113495bSYour Name 	hif_record_ce_desc_event(hif, NAPI_ID2PIPE(napi_info->id),
1295*5113495bSYour Name 				 NAPI_POLL_EXIT, NULL, NULL, normalized, 0);
1296*5113495bSYour Name 
1297*5113495bSYour Name 	hif_napi_fill_poll_time_histogram(napi_info);
1298*5113495bSYour Name 
1299*5113495bSYour Name 	NAPI_DEBUG("%s <--[normalized=%d]", __func__, normalized);
1300*5113495bSYour Name 	return normalized;
1301*5113495bSYour Name out:
1302*5113495bSYour Name 	return rc;
1303*5113495bSYour Name }
1304*5113495bSYour Name qdf_export_symbol(hif_napi_poll);
1305*5113495bSYour Name 
1306*5113495bSYour Name void hif_update_napi_max_poll_time(struct CE_state *ce_state,
1307*5113495bSYour Name 				   int ce_id,
1308*5113495bSYour Name 				   int cpu_id)
1309*5113495bSYour Name {
1310*5113495bSYour Name 	struct hif_softc *hif;
1311*5113495bSYour Name 	struct qca_napi_info *napi_info;
1312*5113495bSYour Name 	unsigned long long napi_poll_time = qdf_time_sched_clock() -
1313*5113495bSYour Name 					ce_state->ce_service_start_time;
1314*5113495bSYour Name 
1315*5113495bSYour Name 	hif = ce_state->scn;
1316*5113495bSYour Name 	napi_info = hif->napi_data.napis[ce_id];
1317*5113495bSYour Name 	if (napi_poll_time >
1318*5113495bSYour Name 			napi_info->stats[cpu_id].napi_max_poll_time)
1319*5113495bSYour Name 		napi_info->stats[cpu_id].napi_max_poll_time = napi_poll_time;
1320*5113495bSYour Name }
1321*5113495bSYour Name qdf_export_symbol(hif_update_napi_max_poll_time);
1322*5113495bSYour Name 
1323*5113495bSYour Name #ifdef HIF_IRQ_AFFINITY
1324*5113495bSYour Name /**
1325*5113495bSYour Name  * hif_napi_update_yield_stats() - update NAPI yield related stats
1326*5113495bSYour Name  * @ce_state: CE state structure
1327*5113495bSYour Name  * @time_limit_reached: indicates whether the time limit was reached
1328*5113495bSYour Name  * @rxpkt_thresh_reached: indicates whether rx packet threshold was reached
1329*5113495bSYour Name  *
1330*5113495bSYour Name  * Return: None
1331*5113495bSYour Name  */
1332*5113495bSYour Name void hif_napi_update_yield_stats(struct CE_state *ce_state,
1333*5113495bSYour Name 				 bool time_limit_reached,
1334*5113495bSYour Name 				 bool rxpkt_thresh_reached)
1335*5113495bSYour Name {
1336*5113495bSYour Name 	struct hif_softc *hif;
1337*5113495bSYour Name 	struct qca_napi_data *napi_data = NULL;
1338*5113495bSYour Name 	int ce_id = 0;
1339*5113495bSYour Name 	int cpu_id = 0;
1340*5113495bSYour Name 
1341*5113495bSYour Name 	if (unlikely(!ce_state)) {
1342*5113495bSYour Name 		QDF_ASSERT(ce_state);
1343*5113495bSYour Name 		return;
1344*5113495bSYour Name 	}
1345*5113495bSYour Name 
1346*5113495bSYour Name 	hif = ce_state->scn;
1347*5113495bSYour Name 
1348*5113495bSYour Name 	if (unlikely(!hif)) {
1349*5113495bSYour Name 		QDF_ASSERT(hif);
1350*5113495bSYour Name 		return;
1351*5113495bSYour Name 	}
1352*5113495bSYour Name 	napi_data = &(hif->napi_data);
1353*5113495bSYour Name 	if (unlikely(!napi_data)) {
1354*5113495bSYour Name 		QDF_ASSERT(napi_data);
1355*5113495bSYour Name 		return;
1356*5113495bSYour Name 	}
1357*5113495bSYour Name 
1358*5113495bSYour Name 	ce_id = ce_state->id;
1359*5113495bSYour Name 	cpu_id = qdf_get_cpu();
1360*5113495bSYour Name 
1361*5113495bSYour Name 	if (unlikely(!napi_data->napis[ce_id])) {
1362*5113495bSYour Name 		return;
1363*5113495bSYour Name 	}
1364*5113495bSYour Name 
1365*5113495bSYour Name 	if (time_limit_reached)
1366*5113495bSYour Name 		napi_data->napis[ce_id]->stats[cpu_id].time_limit_reached++;
1367*5113495bSYour Name 	else
1368*5113495bSYour Name 		napi_data->napis[ce_id]->stats[cpu_id].rxpkt_thresh_reached++;
1369*5113495bSYour Name 
1370*5113495bSYour Name 	hif_update_napi_max_poll_time(ce_state, ce_id,
1371*5113495bSYour Name 				      cpu_id);
1372*5113495bSYour Name }
1373*5113495bSYour Name 
1374*5113495bSYour Name /**
1375*5113495bSYour Name  * hif_napi_stats() - display NAPI CPU statistics
1376*5113495bSYour Name  * @napid: pointer to qca_napi_data
1377*5113495bSYour Name  *
1378*5113495bSYour Name  * Description:
1379*5113495bSYour Name  *    Prints the various CPU cores on which the NAPI instances /CEs interrupts
1380*5113495bSYour Name  *    are being executed. Can be called from outside NAPI layer.
1381*5113495bSYour Name  *
1382*5113495bSYour Name  * Return: None
1383*5113495bSYour Name  */
1384*5113495bSYour Name void hif_napi_stats(struct qca_napi_data *napid)
1385*5113495bSYour Name {
1386*5113495bSYour Name 	int i;
1387*5113495bSYour Name 	struct qca_napi_cpu *cpu;
1388*5113495bSYour Name 
1389*5113495bSYour Name 	if (!napid) {
1390*5113495bSYour Name 		qdf_debug("%s: napiid struct is null", __func__);
1391*5113495bSYour Name 		return;
1392*5113495bSYour Name 	}
1393*5113495bSYour Name 
1394*5113495bSYour Name 	cpu = napid->napi_cpu;
1395*5113495bSYour Name 	qdf_debug("NAPI CPU TABLE");
1396*5113495bSYour Name 	qdf_debug("lilclhead=%d, bigclhead=%d",
1397*5113495bSYour Name 		  napid->lilcl_head, napid->bigcl_head);
1398*5113495bSYour Name 	for (i = 0; i < NR_CPUS; i++) {
1399*5113495bSYour Name 		qdf_debug("CPU[%02d]: state:%d crid=%02d clid=%02d crmk:0x%0lx thmk:0x%0lx frq:%d napi = 0x%08x lnk:%d",
1400*5113495bSYour Name 			  i,
1401*5113495bSYour Name 			  cpu[i].state, cpu[i].core_id, cpu[i].cluster_id,
1402*5113495bSYour Name 			  cpu[i].core_mask.bits[0],
1403*5113495bSYour Name 			  cpu[i].thread_mask.bits[0],
1404*5113495bSYour Name 			  cpu[i].max_freq, cpu[i].napis,
1405*5113495bSYour Name 			  cpu[i].cluster_nxt);
1406*5113495bSYour Name 	}
1407*5113495bSYour Name }
1408*5113495bSYour Name 
1409*5113495bSYour Name #ifdef FEATURE_NAPI_DEBUG
1410*5113495bSYour Name /*
1411*5113495bSYour Name  * Local functions
1412*5113495bSYour Name  * - no argument checks, all internal/trusted callers
1413*5113495bSYour Name  */
1414*5113495bSYour Name static void hnc_dump_cpus(struct qca_napi_data *napid)
1415*5113495bSYour Name {
1416*5113495bSYour Name 	hif_napi_stats(napid);
1417*5113495bSYour Name }
1418*5113495bSYour Name #else
1419*5113495bSYour Name static void hnc_dump_cpus(struct qca_napi_data *napid) { /* no-op */ };
1420*5113495bSYour Name #endif /* FEATURE_NAPI_DEBUG */
1421*5113495bSYour Name 
1422*5113495bSYour Name #define HNC_MIN_CLUSTER 0
1423*5113495bSYour Name #define HNC_MAX_CLUSTER 1
1424*5113495bSYour Name 
1425*5113495bSYour Name /**
1426*5113495bSYour Name  * hnc_link_clusters() - partitions to cpu table into clusters
1427*5113495bSYour Name  * @napid: pointer to NAPI data
1428*5113495bSYour Name  *
1429*5113495bSYour Name  * Takes in a CPU topology table and builds two linked lists
1430*5113495bSYour Name  * (big cluster cores, list-head at bigcl_head, and little cluster
1431*5113495bSYour Name  * cores, list-head at lilcl_head) out of it.
1432*5113495bSYour Name  *
1433*5113495bSYour Name  * If there are more than two clusters:
1434*5113495bSYour Name  * - bigcl_head and lilcl_head will be different,
1435*5113495bSYour Name  * - the cluster with highest cpufreq will be considered the "big" cluster.
1436*5113495bSYour Name  *   If there are more than one with the highest frequency, the *last* of such
1437*5113495bSYour Name  *   clusters will be designated as the "big cluster"
1438*5113495bSYour Name  * - the cluster with lowest cpufreq will be considered the "li'l" cluster.
1439*5113495bSYour Name  *   If there are more than one clusters with the lowest cpu freq, the *first*
1440*5113495bSYour Name  *   of such clusters will be designated as the "little cluster"
1441*5113495bSYour Name  * - We only support up to 32 clusters
1442*5113495bSYour Name  * Return: 0 : OK
1443*5113495bSYour Name  *         !0: error (at least one of lil/big clusters could not be found)
1444*5113495bSYour Name  */
1445*5113495bSYour Name static int hnc_link_clusters(struct qca_napi_data *napid)
1446*5113495bSYour Name {
1447*5113495bSYour Name 	int rc = 0;
1448*5113495bSYour Name 
1449*5113495bSYour Name 	int i;
1450*5113495bSYour Name 	int it = 0;
1451*5113495bSYour Name 	uint32_t cl_done = 0x0;
1452*5113495bSYour Name 	int cl, curcl, curclhead = 0;
1453*5113495bSYour Name 	int more;
1454*5113495bSYour Name 	unsigned int lilfrq = INT_MAX;
1455*5113495bSYour Name 	unsigned int bigfrq = 0;
1456*5113495bSYour Name 	unsigned int clfrq = 0;
1457*5113495bSYour Name 	int prev = 0;
1458*5113495bSYour Name 	struct qca_napi_cpu *cpus = napid->napi_cpu;
1459*5113495bSYour Name 
1460*5113495bSYour Name 	napid->lilcl_head = napid->bigcl_head = -1;
1461*5113495bSYour Name 
1462*5113495bSYour Name 	do {
1463*5113495bSYour Name 		more = 0;
1464*5113495bSYour Name 		it++; curcl = -1;
1465*5113495bSYour Name 		for (i = 0; i < NR_CPUS; i++) {
1466*5113495bSYour Name 			cl = cpus[i].cluster_id;
1467*5113495bSYour Name 			NAPI_DEBUG("Processing cpu[%d], cluster=%d\n",
1468*5113495bSYour Name 				   i, cl);
1469*5113495bSYour Name 			if ((cl < HNC_MIN_CLUSTER) || (cl > HNC_MAX_CLUSTER)) {
1470*5113495bSYour Name 				NAPI_DEBUG("Bad cluster (%d). SKIPPED\n", cl);
1471*5113495bSYour Name 				/* continue if ASSERTs are disabled */
1472*5113495bSYour Name 				continue;
1473*5113495bSYour Name 			};
1474*5113495bSYour Name 			if (cpumask_weight(&(cpus[i].core_mask)) == 0) {
1475*5113495bSYour Name 				NAPI_DEBUG("Core mask 0. SKIPPED\n");
1476*5113495bSYour Name 				continue;
1477*5113495bSYour Name 			}
1478*5113495bSYour Name 			if (cl_done & (0x01 << cl)) {
1479*5113495bSYour Name 				NAPI_DEBUG("Cluster already processed. SKIPPED\n");
1480*5113495bSYour Name 				continue;
1481*5113495bSYour Name 			} else {
1482*5113495bSYour Name 				if (more == 0) {
1483*5113495bSYour Name 					more = 1;
1484*5113495bSYour Name 					curcl = cl;
1485*5113495bSYour Name 					curclhead = i; /* row */
1486*5113495bSYour Name 					clfrq = cpus[i].max_freq;
1487*5113495bSYour Name 					prev = -1;
1488*5113495bSYour Name 				};
1489*5113495bSYour Name 				if ((curcl >= 0) && (curcl != cl)) {
1490*5113495bSYour Name 					NAPI_DEBUG("Entry cl(%d) != curcl(%d). SKIPPED\n",
1491*5113495bSYour Name 						   cl, curcl);
1492*5113495bSYour Name 					continue;
1493*5113495bSYour Name 				}
1494*5113495bSYour Name 				if (cpus[i].max_freq != clfrq)
1495*5113495bSYour Name 					NAPI_DEBUG("WARN: frq(%d)!=clfrq(%d)\n",
1496*5113495bSYour Name 						   cpus[i].max_freq, clfrq);
1497*5113495bSYour Name 				if (clfrq >= bigfrq) {
1498*5113495bSYour Name 					bigfrq = clfrq;
1499*5113495bSYour Name 					napid->bigcl_head  = curclhead;
1500*5113495bSYour Name 					NAPI_DEBUG("bigcl=%d\n", curclhead);
1501*5113495bSYour Name 				}
1502*5113495bSYour Name 				if (clfrq < lilfrq) {
1503*5113495bSYour Name 					lilfrq = clfrq;
1504*5113495bSYour Name 					napid->lilcl_head = curclhead;
1505*5113495bSYour Name 					NAPI_DEBUG("lilcl=%d\n", curclhead);
1506*5113495bSYour Name 				}
1507*5113495bSYour Name 				if (prev != -1)
1508*5113495bSYour Name 					cpus[prev].cluster_nxt = i;
1509*5113495bSYour Name 
1510*5113495bSYour Name 				prev = i;
1511*5113495bSYour Name 			}
1512*5113495bSYour Name 		}
1513*5113495bSYour Name 		if (curcl >= 0)
1514*5113495bSYour Name 			cl_done |= (0x01 << curcl);
1515*5113495bSYour Name 
1516*5113495bSYour Name 	} while (more);
1517*5113495bSYour Name 
1518*5113495bSYour Name 	if (qdf_unlikely((napid->lilcl_head < 0) && (napid->bigcl_head < 0)))
1519*5113495bSYour Name 		rc = -EFAULT;
1520*5113495bSYour Name 
1521*5113495bSYour Name 	hnc_dump_cpus(napid); /* if NAPI_DEBUG */
1522*5113495bSYour Name 	return rc;
1523*5113495bSYour Name }
1524*5113495bSYour Name #undef HNC_MIN_CLUSTER
1525*5113495bSYour Name #undef HNC_MAX_CLUSTER
1526*5113495bSYour Name 
1527*5113495bSYour Name /*
1528*5113495bSYour Name  * hotplug function group
1529*5113495bSYour Name  */
1530*5113495bSYour Name 
1531*5113495bSYour Name /**
1532*5113495bSYour Name  * hnc_cpu_online_cb() - handles CPU hotplug "up" events
1533*5113495bSYour Name  * @context: the associated HIF context
1534*5113495bSYour Name  * @cpu: the CPU Id of the CPU the event happened on
1535*5113495bSYour Name  *
1536*5113495bSYour Name  * Return: None
1537*5113495bSYour Name  */
1538*5113495bSYour Name static void hnc_cpu_online_cb(void *context, uint32_t cpu)
1539*5113495bSYour Name {
1540*5113495bSYour Name 	struct hif_softc *hif = context;
1541*5113495bSYour Name 	struct qca_napi_data *napid = &hif->napi_data;
1542*5113495bSYour Name 
1543*5113495bSYour Name 	if (cpu >= NR_CPUS)
1544*5113495bSYour Name 		return;
1545*5113495bSYour Name 
1546*5113495bSYour Name 	NAPI_DEBUG("-->%s(act=online, cpu=%u)", __func__, cpu);
1547*5113495bSYour Name 
1548*5113495bSYour Name 	napid->napi_cpu[cpu].state = QCA_NAPI_CPU_UP;
1549*5113495bSYour Name 	NAPI_DEBUG("%s: CPU %u marked %d",
1550*5113495bSYour Name 		   __func__, cpu, napid->napi_cpu[cpu].state);
1551*5113495bSYour Name 
1552*5113495bSYour Name 	NAPI_DEBUG("<--%s", __func__);
1553*5113495bSYour Name }
1554*5113495bSYour Name 
1555*5113495bSYour Name /**
1556*5113495bSYour Name  * hnc_cpu_before_offline_cb() - handles CPU hotplug "prepare down" events
1557*5113495bSYour Name  * @context: the associated HIF context
1558*5113495bSYour Name  * @cpu: the CPU Id of the CPU the event happened on
1559*5113495bSYour Name  *
1560*5113495bSYour Name  * On transition to offline, we act on PREP events, because we may need to move
1561*5113495bSYour Name  * the irqs/NAPIs to another CPU before it is actually off-lined.
1562*5113495bSYour Name  *
1563*5113495bSYour Name  * Return: None
1564*5113495bSYour Name  */
1565*5113495bSYour Name static void hnc_cpu_before_offline_cb(void *context, uint32_t cpu)
1566*5113495bSYour Name {
1567*5113495bSYour Name 	struct hif_softc *hif = context;
1568*5113495bSYour Name 	struct qca_napi_data *napid = &hif->napi_data;
1569*5113495bSYour Name 
1570*5113495bSYour Name 	if (cpu >= NR_CPUS)
1571*5113495bSYour Name 		return;
1572*5113495bSYour Name 
1573*5113495bSYour Name 	NAPI_DEBUG("-->%s(act=before_offline, cpu=%u)", __func__, cpu);
1574*5113495bSYour Name 
1575*5113495bSYour Name 	napid->napi_cpu[cpu].state = QCA_NAPI_CPU_DOWN;
1576*5113495bSYour Name 
1577*5113495bSYour Name 	NAPI_DEBUG("%s: CPU %u marked %d; updating affinity",
1578*5113495bSYour Name 		   __func__, cpu, napid->napi_cpu[cpu].state);
1579*5113495bSYour Name 
1580*5113495bSYour Name 	/**
1581*5113495bSYour Name 	 * we need to move any NAPIs on this CPU out.
1582*5113495bSYour Name 	 * if we are in LO throughput mode, then this is valid
1583*5113495bSYour Name 	 * if the CPU is the the low designated CPU.
1584*5113495bSYour Name 	 */
1585*5113495bSYour Name 	hif_napi_event(GET_HIF_OPAQUE_HDL(hif),
1586*5113495bSYour Name 		       NAPI_EVT_CPU_STATE,
1587*5113495bSYour Name 		       (void *)
1588*5113495bSYour Name 		       ((size_t)cpu << 16 | napid->napi_cpu[cpu].state));
1589*5113495bSYour Name 
1590*5113495bSYour Name 	NAPI_DEBUG("<--%s", __func__);
1591*5113495bSYour Name }
1592*5113495bSYour Name 
1593*5113495bSYour Name static int hnc_hotplug_register(struct hif_softc *hif_sc)
1594*5113495bSYour Name {
1595*5113495bSYour Name 	QDF_STATUS status;
1596*5113495bSYour Name 
1597*5113495bSYour Name 	NAPI_DEBUG("-->%s", __func__);
1598*5113495bSYour Name 
1599*5113495bSYour Name 	status = qdf_cpuhp_register(&hif_sc->napi_data.cpuhp_handler,
1600*5113495bSYour Name 				    hif_sc,
1601*5113495bSYour Name 				    hnc_cpu_online_cb,
1602*5113495bSYour Name 				    hnc_cpu_before_offline_cb);
1603*5113495bSYour Name 
1604*5113495bSYour Name 	NAPI_DEBUG("<--%s [%d]", __func__, status);
1605*5113495bSYour Name 
1606*5113495bSYour Name 	return qdf_status_to_os_return(status);
1607*5113495bSYour Name }
1608*5113495bSYour Name 
1609*5113495bSYour Name static void hnc_hotplug_unregister(struct hif_softc *hif_sc)
1610*5113495bSYour Name {
1611*5113495bSYour Name 	NAPI_DEBUG("-->%s", __func__);
1612*5113495bSYour Name 
1613*5113495bSYour Name 	if (hif_sc->napi_data.cpuhp_handler)
1614*5113495bSYour Name 		qdf_cpuhp_unregister(&hif_sc->napi_data.cpuhp_handler);
1615*5113495bSYour Name 
1616*5113495bSYour Name 	NAPI_DEBUG("<--%s", __func__);
1617*5113495bSYour Name }
1618*5113495bSYour Name 
1619*5113495bSYour Name /**
1620*5113495bSYour Name  * hnc_tput_hook() - installs a callback in the throughput detector
1621*5113495bSYour Name  * @install: !0 => install; =0: uninstall
1622*5113495bSYour Name  *
1623*5113495bSYour Name  * installs a callback to be called when wifi driver throughput (tx+rx)
1624*5113495bSYour Name  * crosses a threshold. Currently, we are using the same criteria as
1625*5113495bSYour Name  * TCP ack suppression (500 packets/100ms by default).
1626*5113495bSYour Name  *
1627*5113495bSYour Name  * Return: 0 : success
1628*5113495bSYour Name  *         <0: failure
1629*5113495bSYour Name  */
1630*5113495bSYour Name 
1631*5113495bSYour Name static int hnc_tput_hook(int install)
1632*5113495bSYour Name {
1633*5113495bSYour Name 	int rc = 0;
1634*5113495bSYour Name 
1635*5113495bSYour Name 	/*
1636*5113495bSYour Name 	 * Nothing, until the bw_calculation accepts registration
1637*5113495bSYour Name 	 * it is now hardcoded in the wlan_hdd_main.c::hdd_bus_bw_compute_cbk
1638*5113495bSYour Name 	 *   hdd_napi_throughput_policy(...)
1639*5113495bSYour Name 	 */
1640*5113495bSYour Name 	return rc;
1641*5113495bSYour Name }
1642*5113495bSYour Name 
1643*5113495bSYour Name /*
1644*5113495bSYour Name  * Implementation of hif_napi_cpu API
1645*5113495bSYour Name  */
1646*5113495bSYour Name 
1647*5113495bSYour Name #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
1648*5113495bSYour Name static inline void record_sibling_cpumask(struct qca_napi_cpu *cpus, int i)
1649*5113495bSYour Name {
1650*5113495bSYour Name 	cpumask_copy(&(cpus[i].thread_mask),
1651*5113495bSYour Name 			     topology_sibling_cpumask(i));
1652*5113495bSYour Name }
1653*5113495bSYour Name #else
1654*5113495bSYour Name static inline void record_sibling_cpumask(struct qca_napi_cpu *cpus, int i)
1655*5113495bSYour Name {
1656*5113495bSYour Name }
1657*5113495bSYour Name #endif
1658*5113495bSYour Name 
1659*5113495bSYour Name 
1660*5113495bSYour Name /**
1661*5113495bSYour Name  * hif_napi_cpu_init() - initialization of irq affinity block
1662*5113495bSYour Name  * @hif: HIF context
1663*5113495bSYour Name  *
1664*5113495bSYour Name  * called by hif_napi_create, after the first instance is called
1665*5113495bSYour Name  * - builds napi_rss_cpus table from cpu topology
1666*5113495bSYour Name  * - links cores of the same clusters together
1667*5113495bSYour Name  * - installs hot-plug notifier
1668*5113495bSYour Name  * - installs throughput trigger notifier (when such mechanism exists)
1669*5113495bSYour Name  *
1670*5113495bSYour Name  * Return: 0: OK
1671*5113495bSYour Name  *         <0: error code
1672*5113495bSYour Name  */
1673*5113495bSYour Name int hif_napi_cpu_init(struct hif_opaque_softc *hif)
1674*5113495bSYour Name {
1675*5113495bSYour Name 	int rc = 0;
1676*5113495bSYour Name 	int i;
1677*5113495bSYour Name 	struct qca_napi_data *napid = &HIF_GET_SOFTC(hif)->napi_data;
1678*5113495bSYour Name 	struct qca_napi_cpu *cpus = napid->napi_cpu;
1679*5113495bSYour Name 
1680*5113495bSYour Name 	NAPI_DEBUG("--> ");
1681*5113495bSYour Name 
1682*5113495bSYour Name 	if (cpus[0].state != QCA_NAPI_CPU_UNINITIALIZED) {
1683*5113495bSYour Name 		NAPI_DEBUG("NAPI RSS table already initialized.\n");
1684*5113495bSYour Name 		rc = -EALREADY;
1685*5113495bSYour Name 		goto lab_rss_init;
1686*5113495bSYour Name 	}
1687*5113495bSYour Name 
1688*5113495bSYour Name 	/* build CPU topology table */
1689*5113495bSYour Name 	for_each_possible_cpu(i) {
1690*5113495bSYour Name 		cpus[i].state       = ((cpumask_test_cpu(i, cpu_online_mask)
1691*5113495bSYour Name 					? QCA_NAPI_CPU_UP
1692*5113495bSYour Name 					: QCA_NAPI_CPU_DOWN));
1693*5113495bSYour Name 		cpus[i].core_id     = topology_core_id(i);
1694*5113495bSYour Name 		cpus[i].cluster_id  = topology_physical_package_id(i);
1695*5113495bSYour Name 		cpumask_copy(&(cpus[i].core_mask),
1696*5113495bSYour Name 			     topology_core_cpumask(i));
1697*5113495bSYour Name 		record_sibling_cpumask(cpus, i);
1698*5113495bSYour Name 		cpus[i].max_freq    = cpufreq_quick_get_max(i);
1699*5113495bSYour Name 		cpus[i].napis       = 0x0;
1700*5113495bSYour Name 		cpus[i].cluster_nxt = -1; /* invalid */
1701*5113495bSYour Name 	}
1702*5113495bSYour Name 
1703*5113495bSYour Name 	/* link clusters together */
1704*5113495bSYour Name 	rc = hnc_link_clusters(napid);
1705*5113495bSYour Name 	if (0 != rc)
1706*5113495bSYour Name 		goto lab_err_topology;
1707*5113495bSYour Name 
1708*5113495bSYour Name 	/* install hotplug notifier */
1709*5113495bSYour Name 	rc = hnc_hotplug_register(HIF_GET_SOFTC(hif));
1710*5113495bSYour Name 	if (0 != rc)
1711*5113495bSYour Name 		goto lab_err_hotplug;
1712*5113495bSYour Name 
1713*5113495bSYour Name 	/* install throughput notifier */
1714*5113495bSYour Name 	rc = hnc_tput_hook(1);
1715*5113495bSYour Name 	if (0 == rc)
1716*5113495bSYour Name 		goto lab_rss_init;
1717*5113495bSYour Name 
1718*5113495bSYour Name lab_err_hotplug:
1719*5113495bSYour Name 	hnc_tput_hook(0);
1720*5113495bSYour Name 	hnc_hotplug_unregister(HIF_GET_SOFTC(hif));
1721*5113495bSYour Name lab_err_topology:
1722*5113495bSYour Name 	memset(napid->napi_cpu, 0, sizeof(struct qca_napi_cpu) * NR_CPUS);
1723*5113495bSYour Name lab_rss_init:
1724*5113495bSYour Name 	NAPI_DEBUG("<-- [rc=%d]", rc);
1725*5113495bSYour Name 	return rc;
1726*5113495bSYour Name }
1727*5113495bSYour Name 
1728*5113495bSYour Name /**
1729*5113495bSYour Name  * hif_napi_cpu_deinit() - clean-up of irq affinity block
1730*5113495bSYour Name  * @hif: HIF context
1731*5113495bSYour Name  *
1732*5113495bSYour Name  * called by hif_napi_destroy, when the last instance is removed
1733*5113495bSYour Name  * - uninstalls throughput and hotplug notifiers
1734*5113495bSYour Name  * - clears cpu topology table
1735*5113495bSYour Name  * Return: 0: OK
1736*5113495bSYour Name  */
1737*5113495bSYour Name int hif_napi_cpu_deinit(struct hif_opaque_softc *hif)
1738*5113495bSYour Name {
1739*5113495bSYour Name 	int rc = 0;
1740*5113495bSYour Name 	struct qca_napi_data *napid = &HIF_GET_SOFTC(hif)->napi_data;
1741*5113495bSYour Name 
1742*5113495bSYour Name 	NAPI_DEBUG("-->%s(...)", __func__);
1743*5113495bSYour Name 
1744*5113495bSYour Name 	/* uninstall tput notifier */
1745*5113495bSYour Name 	rc = hnc_tput_hook(0);
1746*5113495bSYour Name 
1747*5113495bSYour Name 	/* uninstall hotplug notifier */
1748*5113495bSYour Name 	hnc_hotplug_unregister(HIF_GET_SOFTC(hif));
1749*5113495bSYour Name 
1750*5113495bSYour Name 	/* clear the topology table */
1751*5113495bSYour Name 	memset(napid->napi_cpu, 0, sizeof(struct qca_napi_cpu) * NR_CPUS);
1752*5113495bSYour Name 
1753*5113495bSYour Name 	NAPI_DEBUG("<--%s[rc=%d]", __func__, rc);
1754*5113495bSYour Name 
1755*5113495bSYour Name 	return rc;
1756*5113495bSYour Name }
1757*5113495bSYour Name 
1758*5113495bSYour Name /**
1759*5113495bSYour Name  * hncm_migrate_to() - migrates a NAPI to a CPU
1760*5113495bSYour Name  * @napid: pointer to NAPI block
1761*5113495bSYour Name  * @napi_ce: CE_id of the NAPI instance
1762*5113495bSYour Name  * @didx: index in the CPU topology table for the CPU to migrate to
1763*5113495bSYour Name  *
1764*5113495bSYour Name  * Migrates NAPI (identified by the CE_id) to the destination core
1765*5113495bSYour Name  * Updates the napi_map of the destination entry
1766*5113495bSYour Name  *
1767*5113495bSYour Name  * Return:
1768*5113495bSYour Name  *  =0 : success
1769*5113495bSYour Name  *  <0 : error
1770*5113495bSYour Name  */
1771*5113495bSYour Name static int hncm_migrate_to(struct qca_napi_data *napid,
1772*5113495bSYour Name 			   int                   napi_ce,
1773*5113495bSYour Name 			   int                   didx)
1774*5113495bSYour Name {
1775*5113495bSYour Name 	int rc = 0;
1776*5113495bSYour Name 	QDF_STATUS status;
1777*5113495bSYour Name 
1778*5113495bSYour Name 	NAPI_DEBUG("-->%s(napi_cd=%d, didx=%d)", __func__, napi_ce, didx);
1779*5113495bSYour Name 
1780*5113495bSYour Name 	if (!napid->napis[napi_ce])
1781*5113495bSYour Name 		return -EINVAL;
1782*5113495bSYour Name 
1783*5113495bSYour Name 	napid->napis[napi_ce]->cpumask.bits[0] = (1 << didx);
1784*5113495bSYour Name 
1785*5113495bSYour Name 	qdf_dev_modify_irq_status(napid->napis[napi_ce]->irq,
1786*5113495bSYour Name 				  QDF_IRQ_NO_BALANCING, 0);
1787*5113495bSYour Name 	status = qdf_dev_set_irq_affinity(napid->napis[napi_ce]->irq,
1788*5113495bSYour Name 					  (struct qdf_cpu_mask *)
1789*5113495bSYour Name 					  &napid->napis[napi_ce]->cpumask);
1790*5113495bSYour Name 	rc = qdf_status_to_os_return(status);
1791*5113495bSYour Name 
1792*5113495bSYour Name 	/* unmark the napis bitmap in the cpu table */
1793*5113495bSYour Name 	napid->napi_cpu[napid->napis[napi_ce]->cpu].napis &= ~(0x01 << napi_ce);
1794*5113495bSYour Name 	/* mark the napis bitmap for the new designated cpu */
1795*5113495bSYour Name 	napid->napi_cpu[didx].napis |= (0x01 << napi_ce);
1796*5113495bSYour Name 	napid->napis[napi_ce]->cpu = didx;
1797*5113495bSYour Name 
1798*5113495bSYour Name 	NAPI_DEBUG("<--%s[%d]", __func__, rc);
1799*5113495bSYour Name 	return rc;
1800*5113495bSYour Name }
1801*5113495bSYour Name /**
1802*5113495bSYour Name  * hncm_dest_cpu() - finds a destination CPU for NAPI
1803*5113495bSYour Name  * @napid: pointer to NAPI block
1804*5113495bSYour Name  * @act: RELOCATE | COLLAPSE | DISPERSE
1805*5113495bSYour Name  *
1806*5113495bSYour Name  * Finds the designated destination for the next IRQ.
1807*5113495bSYour Name  * RELOCATE: translated to either COLLAPSE or DISPERSE based
1808*5113495bSYour Name  *           on napid->napi_mode (throughput state)
1809*5113495bSYour Name  * COLLAPSE: All have the same destination: the first online CPU in lilcl
1810*5113495bSYour Name  * DISPERSE: One of the CPU in bigcl, which has the smallest number of
1811*5113495bSYour Name  *           NAPIs on it
1812*5113495bSYour Name  *
1813*5113495bSYour Name  * Return: >=0 : index in the cpu topology table
1814*5113495bSYour Name  *       : < 0 : error
1815*5113495bSYour Name  */
1816*5113495bSYour Name static int hncm_dest_cpu(struct qca_napi_data *napid, int act)
1817*5113495bSYour Name {
1818*5113495bSYour Name 	int destidx = -1;
1819*5113495bSYour Name 	int head, i;
1820*5113495bSYour Name 
1821*5113495bSYour Name 	NAPI_DEBUG("-->%s(act=%d)", __func__, act);
1822*5113495bSYour Name 	if (act == HNC_ACT_RELOCATE) {
1823*5113495bSYour Name 		if (napid->napi_mode == QCA_NAPI_TPUT_LO)
1824*5113495bSYour Name 			act = HNC_ACT_COLLAPSE;
1825*5113495bSYour Name 		else
1826*5113495bSYour Name 			act = HNC_ACT_DISPERSE;
1827*5113495bSYour Name 		NAPI_DEBUG("%s: act changed from HNC_ACT_RELOCATE to %d",
1828*5113495bSYour Name 			   __func__, act);
1829*5113495bSYour Name 	}
1830*5113495bSYour Name 	if (act == HNC_ACT_COLLAPSE) {
1831*5113495bSYour Name 		head = i = napid->lilcl_head;
1832*5113495bSYour Name retry_collapse:
1833*5113495bSYour Name 		while (i >= 0) {
1834*5113495bSYour Name 			if (napid->napi_cpu[i].state == QCA_NAPI_CPU_UP) {
1835*5113495bSYour Name 				destidx = i;
1836*5113495bSYour Name 				break;
1837*5113495bSYour Name 			}
1838*5113495bSYour Name 			i = napid->napi_cpu[i].cluster_nxt;
1839*5113495bSYour Name 		}
1840*5113495bSYour Name 		if ((destidx < 0) && (head == napid->lilcl_head)) {
1841*5113495bSYour Name 			NAPI_DEBUG("%s: COLLAPSE: no lilcl dest, try bigcl",
1842*5113495bSYour Name 				__func__);
1843*5113495bSYour Name 			head = i = napid->bigcl_head;
1844*5113495bSYour Name 			goto retry_collapse;
1845*5113495bSYour Name 		}
1846*5113495bSYour Name 	} else { /* HNC_ACT_DISPERSE */
1847*5113495bSYour Name 		int smallest = 99; /* all 32 bits full */
1848*5113495bSYour Name 		int smallidx = -1;
1849*5113495bSYour Name 
1850*5113495bSYour Name 		head = i = napid->bigcl_head;
1851*5113495bSYour Name retry_disperse:
1852*5113495bSYour Name 		while (i >= 0) {
1853*5113495bSYour Name 			if ((napid->napi_cpu[i].state == QCA_NAPI_CPU_UP) &&
1854*5113495bSYour Name 			    (hweight32(napid->napi_cpu[i].napis) <= smallest)) {
1855*5113495bSYour Name 				smallest = napid->napi_cpu[i].napis;
1856*5113495bSYour Name 				smallidx = i;
1857*5113495bSYour Name 			}
1858*5113495bSYour Name 			i = napid->napi_cpu[i].cluster_nxt;
1859*5113495bSYour Name 		}
1860*5113495bSYour Name 		/* Check if matches with user specified CPU mask */
1861*5113495bSYour Name 		smallidx = ((1 << smallidx) & napid->user_cpu_affin_mask) ?
1862*5113495bSYour Name 								smallidx : -1;
1863*5113495bSYour Name 
1864*5113495bSYour Name 		if ((smallidx < 0) && (head == napid->bigcl_head)) {
1865*5113495bSYour Name 			NAPI_DEBUG("%s: DISPERSE: no bigcl dest, try lilcl",
1866*5113495bSYour Name 				__func__);
1867*5113495bSYour Name 			head = i = napid->lilcl_head;
1868*5113495bSYour Name 			goto retry_disperse;
1869*5113495bSYour Name 		}
1870*5113495bSYour Name 		destidx = smallidx;
1871*5113495bSYour Name 	}
1872*5113495bSYour Name 	NAPI_DEBUG("<--%s[dest=%d]", __func__, destidx);
1873*5113495bSYour Name 	return destidx;
1874*5113495bSYour Name }
1875*5113495bSYour Name /**
1876*5113495bSYour Name  * hif_napi_cpu_migrate() - migrate IRQs away
1877*5113495bSYour Name  * @napid: pointer to NAPI block
1878*5113495bSYour Name  * @cpu: -1: all CPUs <n> specific CPU
1879*5113495bSYour Name  * @action: COLLAPSE | DISPERSE
1880*5113495bSYour Name  *
1881*5113495bSYour Name  * Moves IRQs/NAPIs from specific or all CPUs (specified by @cpu) to eligible
1882*5113495bSYour Name  * cores. Eligible cores are:
1883*5113495bSYour Name  * act=COLLAPSE -> the first online core of the little cluster
1884*5113495bSYour Name  * act=DISPERSE -> separate cores of the big cluster, so that each core will
1885*5113495bSYour Name  *                 host minimum number of NAPIs/IRQs (napid->cpus[cpu].napis)
1886*5113495bSYour Name  *
1887*5113495bSYour Name  * Note that this function is called with a spinlock acquired already.
1888*5113495bSYour Name  *
1889*5113495bSYour Name  * Return: =0: success
1890*5113495bSYour Name  *         <0: error
1891*5113495bSYour Name  */
1892*5113495bSYour Name 
1893*5113495bSYour Name int hif_napi_cpu_migrate(struct qca_napi_data *napid, int cpu, int action)
1894*5113495bSYour Name {
1895*5113495bSYour Name 	int      rc = 0;
1896*5113495bSYour Name 	struct qca_napi_cpu *cpup;
1897*5113495bSYour Name 	int      i, dind;
1898*5113495bSYour Name 	uint32_t napis;
1899*5113495bSYour Name 
1900*5113495bSYour Name 	NAPI_DEBUG("-->%s(.., cpu=%d, act=%d)",
1901*5113495bSYour Name 		   __func__, cpu, action);
1902*5113495bSYour Name 	/* the following is really: hif_napi_enabled() with less overhead */
1903*5113495bSYour Name 	if (napid->ce_map == 0) {
1904*5113495bSYour Name 		NAPI_DEBUG("%s: NAPI disabled. Not migrating.", __func__);
1905*5113495bSYour Name 		goto hncm_return;
1906*5113495bSYour Name 	}
1907*5113495bSYour Name 
1908*5113495bSYour Name 	cpup = napid->napi_cpu;
1909*5113495bSYour Name 
1910*5113495bSYour Name 	switch (action) {
1911*5113495bSYour Name 	case HNC_ACT_RELOCATE:
1912*5113495bSYour Name 	case HNC_ACT_DISPERSE:
1913*5113495bSYour Name 	case HNC_ACT_COLLAPSE: {
1914*5113495bSYour Name 		/* first find the src napi set */
1915*5113495bSYour Name 		if (cpu == HNC_ANY_CPU)
1916*5113495bSYour Name 			napis = napid->ce_map;
1917*5113495bSYour Name 		else
1918*5113495bSYour Name 			napis = cpup[cpu].napis;
1919*5113495bSYour Name 		/* then clear the napi bitmap on each CPU */
1920*5113495bSYour Name 		for (i = 0; i < NR_CPUS; i++)
1921*5113495bSYour Name 			cpup[i].napis = 0;
1922*5113495bSYour Name 		/* then for each of the NAPIs to disperse: */
1923*5113495bSYour Name 		for (i = 0; i < CE_COUNT_MAX; i++)
1924*5113495bSYour Name 			if (napis & (1 << i)) {
1925*5113495bSYour Name 				/* find a destination CPU */
1926*5113495bSYour Name 				dind = hncm_dest_cpu(napid, action);
1927*5113495bSYour Name 				if (dind >= 0) {
1928*5113495bSYour Name 					NAPI_DEBUG("Migrating NAPI ce%d to %d",
1929*5113495bSYour Name 						   i, dind);
1930*5113495bSYour Name 					rc = hncm_migrate_to(napid, i, dind);
1931*5113495bSYour Name 				} else {
1932*5113495bSYour Name 					NAPI_DEBUG("No dest for NAPI ce%d", i);
1933*5113495bSYour Name 					hnc_dump_cpus(napid);
1934*5113495bSYour Name 					rc = -1;
1935*5113495bSYour Name 				}
1936*5113495bSYour Name 			}
1937*5113495bSYour Name 		break;
1938*5113495bSYour Name 	}
1939*5113495bSYour Name 	default: {
1940*5113495bSYour Name 		NAPI_DEBUG("%s: bad action: %d\n", __func__, action);
1941*5113495bSYour Name 		QDF_BUG(0);
1942*5113495bSYour Name 		break;
1943*5113495bSYour Name 	}
1944*5113495bSYour Name 	} /* switch action */
1945*5113495bSYour Name 
1946*5113495bSYour Name hncm_return:
1947*5113495bSYour Name 	hnc_dump_cpus(napid);
1948*5113495bSYour Name 	return rc;
1949*5113495bSYour Name }
1950*5113495bSYour Name 
1951*5113495bSYour Name 
1952*5113495bSYour Name /**
1953*5113495bSYour Name  * hif_napi_dl_irq() - calls irq_modify_status to enable/disable denylisting
1954*5113495bSYour Name  * @napid: pointer to qca_napi_data structure
1955*5113495bSYour Name  * @dl_flag: denylist flag to enable/disable denylisting
1956*5113495bSYour Name  *
1957*5113495bSYour Name  * The function enables/disables denylisting for all the copy engine
1958*5113495bSYour Name  * interrupts on which NAPI is enabled.
1959*5113495bSYour Name  *
1960*5113495bSYour Name  * Return: None
1961*5113495bSYour Name  */
1962*5113495bSYour Name static inline void hif_napi_dl_irq(struct qca_napi_data *napid, bool dl_flag)
1963*5113495bSYour Name {
1964*5113495bSYour Name 	int i;
1965*5113495bSYour Name 	struct qca_napi_info *napii;
1966*5113495bSYour Name 
1967*5113495bSYour Name 	for (i = 0; i < CE_COUNT_MAX; i++) {
1968*5113495bSYour Name 		/* check if NAPI is enabled on the CE */
1969*5113495bSYour Name 		if (!(napid->ce_map & (0x01 << i)))
1970*5113495bSYour Name 			continue;
1971*5113495bSYour Name 
1972*5113495bSYour Name 		/*double check that NAPI is allocated for the CE */
1973*5113495bSYour Name 		napii = napid->napis[i];
1974*5113495bSYour Name 		if (!(napii))
1975*5113495bSYour Name 			continue;
1976*5113495bSYour Name 
1977*5113495bSYour Name 		if (dl_flag == true)
1978*5113495bSYour Name 			qdf_dev_modify_irq_status(napii->irq,
1979*5113495bSYour Name 						  0, QDF_IRQ_NO_BALANCING);
1980*5113495bSYour Name 		else
1981*5113495bSYour Name 			qdf_dev_modify_irq_status(napii->irq,
1982*5113495bSYour Name 						  QDF_IRQ_NO_BALANCING, 0);
1983*5113495bSYour Name 		hif_debug("dl_flag %d CE %d", dl_flag, i);
1984*5113495bSYour Name 	}
1985*5113495bSYour Name }
1986*5113495bSYour Name 
1987*5113495bSYour Name /**
1988*5113495bSYour Name  * hif_napi_cpu_denylist() - en(dis)ables denylisting for NAPI RX interrupts.
1989*5113495bSYour Name  * @napid: pointer to qca_napi_data structure
1990*5113495bSYour Name  * @op: denylist operation to perform
1991*5113495bSYour Name  *
1992*5113495bSYour Name  * The function enables/disables/queries denylisting for all CE RX
1993*5113495bSYour Name  * interrupts with NAPI enabled. Besides denylisting, it also enables/disables
1994*5113495bSYour Name  * core_ctl_set_boost.
1995*5113495bSYour Name  * Once denylisting is enabled, the interrupts will not be managed by the IRQ
1996*5113495bSYour Name  * balancer.
1997*5113495bSYour Name  *
1998*5113495bSYour Name  * Return: -EINVAL, in case IRQ_DENYLISTING and CORE_CTL_BOOST is not enabled
1999*5113495bSYour Name  *         for DENYLIST_QUERY op - denylist refcount
2000*5113495bSYour Name  *         for DENYLIST_ON op    - return value from core_ctl_set_boost API
2001*5113495bSYour Name  *         for DENYLIST_OFF op   - return value from core_ctl_set_boost API
2002*5113495bSYour Name  */
2003*5113495bSYour Name int hif_napi_cpu_denylist(struct qca_napi_data *napid,
2004*5113495bSYour Name 			  enum qca_denylist_op op)
2005*5113495bSYour Name {
2006*5113495bSYour Name 	int rc = 0;
2007*5113495bSYour Name 	static int ref_count; /* = 0 by the compiler */
2008*5113495bSYour Name 	uint8_t flags = napid->flags;
2009*5113495bSYour Name 	bool dl_en = flags & QCA_NAPI_FEATURE_IRQ_BLACKLISTING;
2010*5113495bSYour Name 	bool ccb_en = flags & QCA_NAPI_FEATURE_CORE_CTL_BOOST;
2011*5113495bSYour Name 
2012*5113495bSYour Name 	NAPI_DEBUG("-->%s(%d %d)", __func__, flags, op);
2013*5113495bSYour Name 
2014*5113495bSYour Name 	if (!(dl_en && ccb_en)) {
2015*5113495bSYour Name 		rc = -EINVAL;
2016*5113495bSYour Name 		goto out;
2017*5113495bSYour Name 	}
2018*5113495bSYour Name 
2019*5113495bSYour Name 	switch (op) {
2020*5113495bSYour Name 	case DENYLIST_QUERY:
2021*5113495bSYour Name 		rc = ref_count;
2022*5113495bSYour Name 		break;
2023*5113495bSYour Name 	case DENYLIST_ON:
2024*5113495bSYour Name 		ref_count++;
2025*5113495bSYour Name 		rc = 0;
2026*5113495bSYour Name 		if (ref_count == 1) {
2027*5113495bSYour Name 			rc = hif_napi_core_ctl_set_boost(true);
2028*5113495bSYour Name 			NAPI_DEBUG("boost_on() returns %d - refcnt=%d",
2029*5113495bSYour Name 				rc, ref_count);
2030*5113495bSYour Name 			hif_napi_dl_irq(napid, true);
2031*5113495bSYour Name 		}
2032*5113495bSYour Name 		break;
2033*5113495bSYour Name 	case DENYLIST_OFF:
2034*5113495bSYour Name 		if (ref_count) {
2035*5113495bSYour Name 			ref_count--;
2036*5113495bSYour Name 			rc = 0;
2037*5113495bSYour Name 			if (ref_count == 0) {
2038*5113495bSYour Name 				rc = hif_napi_core_ctl_set_boost(false);
2039*5113495bSYour Name 				NAPI_DEBUG("boost_off() returns %d - refcnt=%d",
2040*5113495bSYour Name 					   rc, ref_count);
2041*5113495bSYour Name 				hif_napi_dl_irq(napid, false);
2042*5113495bSYour Name 			}
2043*5113495bSYour Name 		}
2044*5113495bSYour Name 		break;
2045*5113495bSYour Name 	default:
2046*5113495bSYour Name 		NAPI_DEBUG("Invalid denylist op: %d", op);
2047*5113495bSYour Name 		rc = -EINVAL;
2048*5113495bSYour Name 	} /* switch */
2049*5113495bSYour Name out:
2050*5113495bSYour Name 	NAPI_DEBUG("<--%s[%d]", __func__, rc);
2051*5113495bSYour Name 	return rc;
2052*5113495bSYour Name }
2053*5113495bSYour Name 
2054*5113495bSYour Name static unsigned long napi_serialize_reqs;
2055*5113495bSYour Name /**
2056*5113495bSYour Name  * hif_napi_serialize() - [de-]serialize NAPI operations
2057*5113495bSYour Name  * @hif:   context
2058*5113495bSYour Name  * @is_on: 1: serialize, 0: deserialize
2059*5113495bSYour Name  *
2060*5113495bSYour Name  * hif_napi_serialize(hif, 1) can be called multiple times. It will perform the
2061*5113495bSYour Name  * following steps (see hif_napi_event for code):
2062*5113495bSYour Name  * - put irqs of all NAPI instances on the same CPU
2063*5113495bSYour Name  * - only for the first serialize call: denylist
2064*5113495bSYour Name  *
2065*5113495bSYour Name  * hif_napi_serialize(hif, 0):
2066*5113495bSYour Name  * - start a timer (multiple of BusBandwidthTimer -- default: 100 msec)
2067*5113495bSYour Name  * - at the end of the timer, check the current throughput state and
2068*5113495bSYour Name  *   implement it.
2069*5113495bSYour Name  */
2070*5113495bSYour Name int hif_napi_serialize(struct hif_opaque_softc *hif, int is_on)
2071*5113495bSYour Name {
2072*5113495bSYour Name 	int rc = -EINVAL;
2073*5113495bSYour Name 
2074*5113495bSYour Name 	if (hif)
2075*5113495bSYour Name 		switch (is_on) {
2076*5113495bSYour Name 		case 0: { /* de-serialize */
2077*5113495bSYour Name 			rc = hif_napi_event(hif, NAPI_EVT_USR_NORMAL,
2078*5113495bSYour Name 					    (void *) 0);
2079*5113495bSYour Name 			napi_serialize_reqs = 0;
2080*5113495bSYour Name 			break;
2081*5113495bSYour Name 		} /* end de-serialize */
2082*5113495bSYour Name 		case 1: { /* serialize */
2083*5113495bSYour Name 			rc = hif_napi_event(hif, NAPI_EVT_USR_SERIAL,
2084*5113495bSYour Name 					    (void *)napi_serialize_reqs++);
2085*5113495bSYour Name 			break;
2086*5113495bSYour Name 		} /* end serialize */
2087*5113495bSYour Name 		default:
2088*5113495bSYour Name 			break; /* no-op */
2089*5113495bSYour Name 		} /* switch */
2090*5113495bSYour Name 	return rc;
2091*5113495bSYour Name }
2092*5113495bSYour Name 
2093*5113495bSYour Name #endif /* ifdef HIF_IRQ_AFFINITY */
2094