xref: /wlan-driver/qcacld-3.0/core/hdd/src/wlan_hdd_napi.c (revision 5113495b16420b49004c444715d2daae2066e7dc)
1*5113495bSYour Name /*
2*5113495bSYour Name  * Copyright (c) 2015-2020 The Linux Foundation. All rights reserved.
3*5113495bSYour Name  * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
4*5113495bSYour Name  *
5*5113495bSYour Name  * Permission to use, copy, modify, and/or distribute this software for
6*5113495bSYour Name  * any purpose with or without fee is hereby granted, provided that the
7*5113495bSYour Name  * above copyright notice and this permission notice appear in all
8*5113495bSYour Name  * copies.
9*5113495bSYour Name  *
10*5113495bSYour Name  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11*5113495bSYour Name  * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12*5113495bSYour Name  * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13*5113495bSYour Name  * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14*5113495bSYour Name  * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15*5113495bSYour Name  * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16*5113495bSYour Name  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17*5113495bSYour Name  * PERFORMANCE OF THIS SOFTWARE.
18*5113495bSYour Name  */
19*5113495bSYour Name 
20*5113495bSYour Name /**
21*5113495bSYour Name  * DOC: wlan_hdd_napi.c
22*5113495bSYour Name  *
23*5113495bSYour Name  * WLAN HDD NAPI interface implementation
24*5113495bSYour Name  */
25*5113495bSYour Name #include <linux/smp.h> /* get_cpu */
26*5113495bSYour Name 
27*5113495bSYour Name #include "wlan_hdd_napi.h"
28*5113495bSYour Name #include "cds_api.h"       /* cds_get_context */
29*5113495bSYour Name #include "hif.h"           /* hif_map_service...*/
30*5113495bSYour Name #include "wlan_hdd_main.h" /* hdd_err/warn... */
31*5113495bSYour Name #include "qdf_types.h"     /* QDF_MODULE_ID_... */
32*5113495bSYour Name #include "ce_api.h"
33*5113495bSYour Name #include "wlan_dp_ucfg_api.h"
34*5113495bSYour Name 
35*5113495bSYour Name /*  guaranteed to be initialized to zero/NULL by the standard */
36*5113495bSYour Name static struct qca_napi_data *hdd_napi_ctx;
37*5113495bSYour Name 
38*5113495bSYour Name /**
39*5113495bSYour Name  * hdd_napi_get_all() - return the whole NAPI structure from HIF
40*5113495bSYour Name  *
41*5113495bSYour Name  * Gets to the data structure common to all NAPI instances.
42*5113495bSYour Name  *
43*5113495bSYour Name  * Return:
44*5113495bSYour Name  *  NULL  : probably NAPI not initialized yet.
45*5113495bSYour Name  *  <addr>: the address of the whole NAPI structure
46*5113495bSYour Name  */
hdd_napi_get_all(void)47*5113495bSYour Name struct qca_napi_data *hdd_napi_get_all(void)
48*5113495bSYour Name {
49*5113495bSYour Name 	struct qca_napi_data *rp = NULL;
50*5113495bSYour Name 	struct hif_opaque_softc *hif;
51*5113495bSYour Name 
52*5113495bSYour Name 	NAPI_DEBUG("-->");
53*5113495bSYour Name 
54*5113495bSYour Name 	hif = cds_get_context(QDF_MODULE_ID_HIF);
55*5113495bSYour Name 	if (unlikely(!hif))
56*5113495bSYour Name 		QDF_ASSERT(hif); /* WARN */
57*5113495bSYour Name 	else
58*5113495bSYour Name 		rp = hif_napi_get_all(hif);
59*5113495bSYour Name 
60*5113495bSYour Name 	NAPI_DEBUG("<-- [addr=%pK]", rp);
61*5113495bSYour Name 	return rp;
62*5113495bSYour Name }
63*5113495bSYour Name 
64*5113495bSYour Name /**
65*5113495bSYour Name  * hdd_napi_get_map() - get a copy of napi pipe map
66*5113495bSYour Name  *
67*5113495bSYour Name  * Return:
68*5113495bSYour Name  *  uint32_t  : copy of pipe map
69*5113495bSYour Name  */
hdd_napi_get_map(void)70*5113495bSYour Name static uint32_t hdd_napi_get_map(void)
71*5113495bSYour Name {
72*5113495bSYour Name 	uint32_t map = 0;
73*5113495bSYour Name 
74*5113495bSYour Name 	NAPI_DEBUG("-->");
75*5113495bSYour Name 	/* cache once, use forever */
76*5113495bSYour Name 	if (!hdd_napi_ctx)
77*5113495bSYour Name 		hdd_napi_ctx = hdd_napi_get_all();
78*5113495bSYour Name 	if (hdd_napi_ctx)
79*5113495bSYour Name 		map = hdd_napi_ctx->ce_map;
80*5113495bSYour Name 
81*5113495bSYour Name 	NAPI_DEBUG("<-- [map=0x%08x]", map);
82*5113495bSYour Name 	return map;
83*5113495bSYour Name }
84*5113495bSYour Name 
85*5113495bSYour Name /**
86*5113495bSYour Name  * hdd_napi_create() - creates the NAPI structures for a given netdev
87*5113495bSYour Name  *
88*5113495bSYour Name  * Creates NAPI instances. This function is called
89*5113495bSYour Name  * unconditionally during initialization. It creates
90*5113495bSYour Name  * napi structures through the proper HTC/HIF calls.
91*5113495bSYour Name  * The structures are disabled on creation.
92*5113495bSYour Name  *
93*5113495bSYour Name  * Return:
94*5113495bSYour Name  *   single-queue: <0: err, >0=id, 0 (should not happen)
95*5113495bSYour Name  *   multi-queue: bitmap of created instances (0: none)
96*5113495bSYour Name  */
hdd_napi_create(void)97*5113495bSYour Name int hdd_napi_create(void)
98*5113495bSYour Name {
99*5113495bSYour Name 	struct  hif_opaque_softc *hif_ctx;
100*5113495bSYour Name 	int     rc = 0;
101*5113495bSYour Name 	struct hdd_context *hdd_ctx;
102*5113495bSYour Name 	uint8_t feature_flags = 0;
103*5113495bSYour Name 	struct qca_napi_data *napid = hdd_napi_get_all();
104*5113495bSYour Name 
105*5113495bSYour Name 	NAPI_DEBUG("-->");
106*5113495bSYour Name 
107*5113495bSYour Name 	if (!napid) {
108*5113495bSYour Name 		hdd_err("unable to retrieve napi structure");
109*5113495bSYour Name 		rc = -EFAULT;
110*5113495bSYour Name 		goto exit;
111*5113495bSYour Name 	}
112*5113495bSYour Name 
113*5113495bSYour Name 	hif_ctx = cds_get_context(QDF_MODULE_ID_HIF);
114*5113495bSYour Name 	if (unlikely(!hif_ctx)) {
115*5113495bSYour Name 		QDF_ASSERT(hif_ctx);
116*5113495bSYour Name 		rc = -EFAULT;
117*5113495bSYour Name 		goto exit;
118*5113495bSYour Name 	}
119*5113495bSYour Name 
120*5113495bSYour Name 	feature_flags = QCA_NAPI_FEATURE_CPU_CORRECTION |
121*5113495bSYour Name 		QCA_NAPI_FEATURE_IRQ_BLACKLISTING |
122*5113495bSYour Name 		QCA_NAPI_FEATURE_CORE_CTL_BOOST;
123*5113495bSYour Name 
124*5113495bSYour Name 	rc = hif_napi_create(hif_ctx, hdd_napi_poll,
125*5113495bSYour Name 			     QCA_NAPI_BUDGET,
126*5113495bSYour Name 			     QCA_NAPI_DEF_SCALE,
127*5113495bSYour Name 			     feature_flags);
128*5113495bSYour Name 	if (rc < 0) {
129*5113495bSYour Name 		hdd_err("ERR(%d) creating NAPI instances",
130*5113495bSYour Name 			rc);
131*5113495bSYour Name 		goto exit;
132*5113495bSYour Name 	}
133*5113495bSYour Name 
134*5113495bSYour Name 	hdd_debug("napi instances were created. Map=0x%x", rc);
135*5113495bSYour Name 	hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD);
136*5113495bSYour Name 	if (unlikely(!hdd_ctx)) {
137*5113495bSYour Name 		QDF_ASSERT(0);
138*5113495bSYour Name 		rc = -EFAULT;
139*5113495bSYour Name 		goto exit;
140*5113495bSYour Name 	}
141*5113495bSYour Name 
142*5113495bSYour Name 	rc = hdd_napi_event(NAPI_EVT_INI_FILE,
143*5113495bSYour Name 			    (void *)ucfg_dp_get_napi_enabled(hdd_ctx->psoc));
144*5113495bSYour Name 	napid->user_cpu_affin_mask =
145*5113495bSYour Name 		hdd_ctx->config->napi_cpu_affinity_mask;
146*5113495bSYour Name 
147*5113495bSYour Name  exit:
148*5113495bSYour Name 	NAPI_DEBUG("<-- [rc=%d]", rc);
149*5113495bSYour Name 	return rc;
150*5113495bSYour Name }
151*5113495bSYour Name 
152*5113495bSYour Name /**
153*5113495bSYour Name  * hdd_napi_destroy() - destroys the NAPI structures for a given netdev
154*5113495bSYour Name  * @force: if set, will force-disable the instance before _del'ing
155*5113495bSYour Name  *
156*5113495bSYour Name  * Destroy NAPI instances. This function is called
157*5113495bSYour Name  * unconditionally during module removal. It destroy
158*5113495bSYour Name  * napi structures through the proper HTC/HIF calls.
159*5113495bSYour Name  *
160*5113495bSYour Name  * Return:
161*5113495bSYour Name  *    number of NAPI instances destroyed
162*5113495bSYour Name  */
hdd_napi_destroy(int force)163*5113495bSYour Name int hdd_napi_destroy(int force)
164*5113495bSYour Name {
165*5113495bSYour Name 	int rc = 0;
166*5113495bSYour Name 	int i;
167*5113495bSYour Name 	uint32_t hdd_napi_map = hdd_napi_get_map();
168*5113495bSYour Name 
169*5113495bSYour Name 	NAPI_DEBUG("--> (force=%d)", force);
170*5113495bSYour Name 	if (hdd_napi_map) {
171*5113495bSYour Name 		struct hif_opaque_softc *hif_ctx;
172*5113495bSYour Name 
173*5113495bSYour Name 		hif_ctx = cds_get_context(QDF_MODULE_ID_HIF);
174*5113495bSYour Name 		if (unlikely(!hif_ctx))
175*5113495bSYour Name 			QDF_ASSERT(hif_ctx);
176*5113495bSYour Name 		else
177*5113495bSYour Name 			for (i = 0; i < CE_COUNT_MAX; i++)
178*5113495bSYour Name 				if (hdd_napi_map & (0x01 << i)) {
179*5113495bSYour Name 					if (0 <= hif_napi_destroy(
180*5113495bSYour Name 						    hif_ctx,
181*5113495bSYour Name 						    NAPI_PIPE2ID(i), force)) {
182*5113495bSYour Name 						rc++;
183*5113495bSYour Name 						hdd_napi_map &= ~(0x01 << i);
184*5113495bSYour Name 					} else
185*5113495bSYour Name 						hdd_err("cannot destroy napi %d: (pipe:%d), f=%d\n",
186*5113495bSYour Name 							i,
187*5113495bSYour Name 							NAPI_PIPE2ID(i), force);
188*5113495bSYour Name 				}
189*5113495bSYour Name 	} else {
190*5113495bSYour Name 		struct hif_opaque_softc *hif_ctx;
191*5113495bSYour Name 
192*5113495bSYour Name 		hif_ctx = cds_get_context(QDF_MODULE_ID_HIF);
193*5113495bSYour Name 
194*5113495bSYour Name 		if (unlikely(!hif_ctx))
195*5113495bSYour Name 			QDF_ASSERT(hif_ctx);
196*5113495bSYour Name 		else
197*5113495bSYour Name 			rc = hif_napi_cpu_deinit(hif_ctx);
198*5113495bSYour Name 	}
199*5113495bSYour Name 
200*5113495bSYour Name 	/* if all instances are removed, it is likely that hif_context has been
201*5113495bSYour Name 	 * removed as well, so the cached value of the napi context also needs
202*5113495bSYour Name 	 * to be removed
203*5113495bSYour Name 	 */
204*5113495bSYour Name 	if (force)
205*5113495bSYour Name 		QDF_ASSERT(hdd_napi_map == 0);
206*5113495bSYour Name 	if (0 == hdd_napi_map)
207*5113495bSYour Name 		hdd_napi_ctx = NULL;
208*5113495bSYour Name 
209*5113495bSYour Name 	NAPI_DEBUG("<-- [rc=%d]", rc);
210*5113495bSYour Name 	return rc;
211*5113495bSYour Name }
212*5113495bSYour Name 
213*5113495bSYour Name /**
214*5113495bSYour Name  * hdd_napi_enabled() - checks if NAPI is enabled (for a given id)
215*5113495bSYour Name  * @id: the id of the NAPI to check (any= -1)
216*5113495bSYour Name  *
217*5113495bSYour Name  * Return:
218*5113495bSYour Name  *   int: 0  = false (NOT enabled)
219*5113495bSYour Name  *        !0 = true  (enabbled)
220*5113495bSYour Name  */
hdd_napi_enabled(int id)221*5113495bSYour Name int hdd_napi_enabled(int id)
222*5113495bSYour Name {
223*5113495bSYour Name 	struct hif_opaque_softc *hif;
224*5113495bSYour Name 	int rc = 0; /* NOT enabled */
225*5113495bSYour Name 
226*5113495bSYour Name 	hif = cds_get_context(QDF_MODULE_ID_HIF);
227*5113495bSYour Name 	if (unlikely(!hif))
228*5113495bSYour Name 		QDF_ASSERT(hif); /* WARN_ON; rc = 0 */
229*5113495bSYour Name 	else if (-1 == id)
230*5113495bSYour Name 		rc = hif_napi_enabled(hif, id);
231*5113495bSYour Name 	else
232*5113495bSYour Name 		rc = hif_napi_enabled(hif, NAPI_ID2PIPE(id));
233*5113495bSYour Name 	return rc;
234*5113495bSYour Name }
235*5113495bSYour Name 
236*5113495bSYour Name /**
237*5113495bSYour Name  * hdd_napi_event() - relay the event detected by HDD to HIF NAPI event handler
238*5113495bSYour Name  * @event: event code
239*5113495bSYour Name  * @data : event-specific auxiliary data
240*5113495bSYour Name  *
241*5113495bSYour Name  * See function documentation in hif_napi.c::hif_napi_event for list of events
242*5113495bSYour Name  * and how each of them is handled.
243*5113495bSYour Name  *
244*5113495bSYour Name  * Return:
245*5113495bSYour Name  *  < 0: error code
246*5113495bSYour Name  *  = 0: event handled successfully
247*5113495bSYour Name  */
hdd_napi_event(enum qca_napi_event event,void * data)248*5113495bSYour Name int hdd_napi_event(enum qca_napi_event event, void *data)
249*5113495bSYour Name {
250*5113495bSYour Name 	int rc = -EFAULT;  /* assume err */
251*5113495bSYour Name 	struct hif_opaque_softc *hif;
252*5113495bSYour Name 
253*5113495bSYour Name 	NAPI_DEBUG("-->(event=%d, aux=%pK)", event, data);
254*5113495bSYour Name 
255*5113495bSYour Name 	hif = cds_get_context(QDF_MODULE_ID_HIF);
256*5113495bSYour Name 	if (unlikely(!hif))
257*5113495bSYour Name 		QDF_ASSERT(hif);
258*5113495bSYour Name 	else
259*5113495bSYour Name 		rc = hif_napi_event(hif, event, data);
260*5113495bSYour Name 
261*5113495bSYour Name 	NAPI_DEBUG("<--[rc=%d]", rc);
262*5113495bSYour Name 	return rc;
263*5113495bSYour Name }
264*5113495bSYour Name 
265*5113495bSYour Name #if defined HELIUMPLUS && defined MSM_PLATFORM
266*5113495bSYour Name 
267*5113495bSYour Name static int napi_tput_policy_delay;
268*5113495bSYour Name 
269*5113495bSYour Name /**
270*5113495bSYour Name  * hdd_napi_perfd_cpufreq() - set/reset min CPU freq for cores
271*5113495bSYour Name  * @req_state:  high/low
272*5113495bSYour Name  *
273*5113495bSYour Name  * Send a message to cnss-daemon through netlink. cnss-daemon,
274*5113495bSYour Name  * in turn, sends a message to perf-daemon.
275*5113495bSYour Name  * If freq > 0, this is a set request. It sets the min frequency of the
276*5113495bSYour Name  * cores of the specified cluster to provided freq value (in KHz).
277*5113495bSYour Name  * If freq == 0, then the freq lock is removed (and frequency returns to
278*5113495bSYour Name  * system default).
279*5113495bSYour Name  *
280*5113495bSYour Name  * Semantical Alert:
281*5113495bSYour Name  * There can be at most one lock active at a time. Each "set" request must
282*5113495bSYour Name  * be followed by a "reset" request. Perfd behaviour is undefined otherwise.
283*5113495bSYour Name  *
284*5113495bSYour Name  * Return: == 0: netlink message sent to cnss-daemon
285*5113495bSYour Name  *         <  0: failure to send the message
286*5113495bSYour Name  */
hdd_napi_perfd_cpufreq(enum qca_napi_tput_state req_state)287*5113495bSYour Name static int hdd_napi_perfd_cpufreq(enum qca_napi_tput_state req_state)
288*5113495bSYour Name {
289*5113495bSYour Name 	int rc = 0;
290*5113495bSYour Name 	struct wlan_core_minfreq req;
291*5113495bSYour Name 	struct hdd_context *hdd_ctx;
292*5113495bSYour Name 
293*5113495bSYour Name 	NAPI_DEBUG("-> (%d)", req_state);
294*5113495bSYour Name 
295*5113495bSYour Name 	hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD);
296*5113495bSYour Name 	if (unlikely(!hdd_ctx)) {
297*5113495bSYour Name 		rc = -EFAULT;
298*5113495bSYour Name 		goto hnpc_ret;
299*5113495bSYour Name 	}
300*5113495bSYour Name 
301*5113495bSYour Name 	switch (req_state) {
302*5113495bSYour Name 	case QCA_NAPI_TPUT_LO:
303*5113495bSYour Name 		req.magic    = WLAN_CORE_MINFREQ_MAGIC;
304*5113495bSYour Name 		req.reserved = 0; /* unused */
305*5113495bSYour Name 		req.coremask = 0; /* not valid */
306*5113495bSYour Name 		req.freq     = 0; /* reset */
307*5113495bSYour Name 		break;
308*5113495bSYour Name 	case QCA_NAPI_TPUT_HI:
309*5113495bSYour Name 		req.magic    = WLAN_CORE_MINFREQ_MAGIC;
310*5113495bSYour Name 		req.reserved = 0; /* unused */
311*5113495bSYour Name 		req.coremask = 0x0f0; /* perf cluster */
312*5113495bSYour Name 		req.freq     = 700;   /* KHz */
313*5113495bSYour Name 		break;
314*5113495bSYour Name 	default:
315*5113495bSYour Name 		hdd_err("invalid req_state (%d)", req_state);
316*5113495bSYour Name 		rc = -EINVAL;
317*5113495bSYour Name 		goto hnpc_ret;
318*5113495bSYour Name 	} /* switch */
319*5113495bSYour Name 
320*5113495bSYour Name 	NAPI_DEBUG("CPU min freq to %d",
321*5113495bSYour Name 		   (req.freq == 0)?"Resetting":"Setting", req.freq);
322*5113495bSYour Name 	/* the following service function returns void */
323*5113495bSYour Name 	wlan_hdd_send_svc_nlink_msg(hdd_ctx->radio_index,
324*5113495bSYour Name 				WLAN_SVC_CORE_MINFREQ,
325*5113495bSYour Name 				&req, sizeof(struct wlan_core_minfreq));
326*5113495bSYour Name hnpc_ret:
327*5113495bSYour Name 	NAPI_DEBUG("<--[rc=%d]", rc);
328*5113495bSYour Name 	return rc;
329*5113495bSYour Name }
330*5113495bSYour Name 
331*5113495bSYour Name /**
332*5113495bSYour Name  * hdd_napi_apply_throughput_policy() - implement the throughput action policy
333*5113495bSYour Name  * @hddctx:     HDD context
334*5113495bSYour Name  * @tx_packets: number of tx packets in the last interval
335*5113495bSYour Name  * @rx_packets: number of rx packets in the last interval
336*5113495bSYour Name  *
337*5113495bSYour Name  * Called by hdd_bus_bw_compute_cb, checks the number of packets in the last
338*5113495bSYour Name  * interval, and determines the desired napi throughput state (HI/LO). If
339*5113495bSYour Name  * the desired state is different from the current, then it invokes the
340*5113495bSYour Name  * event handler to switch to the desired state.
341*5113495bSYour Name  *
342*5113495bSYour Name  * The policy implementation is limited to this function and
343*5113495bSYour Name  * The current policy is: determine the NAPI mode based on the condition:
344*5113495bSYour Name  *      (total number of packets > medium threshold)
345*5113495bSYour Name  * - tx packets are included because:
346*5113495bSYour Name  *   a- tx-completions arrive at one of the rx CEs
347*5113495bSYour Name  *   b- in TCP, a lof of TX implies ~(tx/2) rx (ACKs)
348*5113495bSYour Name  *   c- so that we can use the same normalized criteria in ini file
349*5113495bSYour Name  * - medium-threshold (default: 500 packets / 10 ms), because
350*5113495bSYour Name  *   we would like to be more reactive.
351*5113495bSYour Name  *
352*5113495bSYour Name  * Return: 0 : no action taken, or action return code
353*5113495bSYour Name  *         !0: error, or action error code
354*5113495bSYour Name  */
hdd_napi_apply_throughput_policy(struct hdd_context * hddctx,uint64_t tx_packets,uint64_t rx_packets)355*5113495bSYour Name int hdd_napi_apply_throughput_policy(struct hdd_context *hddctx,
356*5113495bSYour Name 				     uint64_t tx_packets,
357*5113495bSYour Name 				     uint64_t rx_packets)
358*5113495bSYour Name {
359*5113495bSYour Name 	int rc = 0;
360*5113495bSYour Name 	uint64_t packets = tx_packets + rx_packets;
361*5113495bSYour Name 	enum qca_napi_tput_state req_state;
362*5113495bSYour Name 	struct qca_napi_data *napid = hdd_napi_get_all();
363*5113495bSYour Name 	int enabled;
364*5113495bSYour Name 
365*5113495bSYour Name 	NAPI_DEBUG("-->(tx=%lld, rx=%lld)", tx_packets, rx_packets);
366*5113495bSYour Name 
367*5113495bSYour Name 	if (unlikely(napi_tput_policy_delay < 0))
368*5113495bSYour Name 		napi_tput_policy_delay = 0;
369*5113495bSYour Name 	if (napi_tput_policy_delay > 0) {
370*5113495bSYour Name 		NAPI_DEBUG("delaying policy; delay-count=%d",
371*5113495bSYour Name 			   napi_tput_policy_delay);
372*5113495bSYour Name 		napi_tput_policy_delay--;
373*5113495bSYour Name 
374*5113495bSYour Name 		/* make sure the next timer call calls us */
375*5113495bSYour Name 		ucfg_dp_set_current_throughput_level(hddctx->psoc, -1);
376*5113495bSYour Name 
377*5113495bSYour Name 		return rc;
378*5113495bSYour Name 	}
379*5113495bSYour Name 
380*5113495bSYour Name 	if (!napid) {
381*5113495bSYour Name 		hdd_err("ERR: napid NULL");
382*5113495bSYour Name 		return rc;
383*5113495bSYour Name 	}
384*5113495bSYour Name 
385*5113495bSYour Name 	enabled = hdd_napi_enabled(HDD_NAPI_ANY);
386*5113495bSYour Name 	if (!enabled) {
387*5113495bSYour Name 		hdd_err("ERR: napi not enabled");
388*5113495bSYour Name 		return rc;
389*5113495bSYour Name 	}
390*5113495bSYour Name 
391*5113495bSYour Name 	if (packets > ucfg_dp_get_bus_bw_high_threshold(hddctx->psoc))
392*5113495bSYour Name 		req_state = QCA_NAPI_TPUT_HI;
393*5113495bSYour Name 	else
394*5113495bSYour Name 		req_state = QCA_NAPI_TPUT_LO;
395*5113495bSYour Name 
396*5113495bSYour Name 	if (req_state != napid->napi_mode) {
397*5113495bSYour Name 		/* [re]set the floor frequency of high cluster */
398*5113495bSYour Name 		rc = hdd_napi_perfd_cpufreq(req_state);
399*5113495bSYour Name 		/* denylist/boost_mode on/off */
400*5113495bSYour Name 		rc = hdd_napi_event(NAPI_EVT_TPUT_STATE, (void *)req_state);
401*5113495bSYour Name 	}
402*5113495bSYour Name 	return rc;
403*5113495bSYour Name }
404*5113495bSYour Name 
405*5113495bSYour Name /**
406*5113495bSYour Name  * hdd_napi_serialize() - serialize all NAPI activities
407*5113495bSYour Name  * @is_on: 1="serialize" or 0="de-serialize"
408*5113495bSYour Name  *
409*5113495bSYour Name  * Start/stop "serial-NAPI-mode".
410*5113495bSYour Name  * NAPI serial mode describes a state where all NAPI operations are forced to be
411*5113495bSYour Name  * run serially. This is achieved by ensuring all NAPI instances are run on the
412*5113495bSYour Name  * same CPU, so forced to be serial.
413*5113495bSYour Name  * NAPI life-cycle:
414*5113495bSYour Name  * - Interrupt is received for a given CE.
415*5113495bSYour Name  * - In the ISR, the interrupt is masked and corresponding NAPI instance
416*5113495bSYour Name  *   is scheduled, to be run as a bottom-half.
417*5113495bSYour Name  * - Bottom-half starts with a poll call (by the net_rx softirq). There may be
418*5113495bSYour Name  *   one of more subsequent calls until the work is complete.
419*5113495bSYour Name  * - Once the work is complete, the poll handler enables the interrupt and
420*5113495bSYour Name  *   the cycle re-starts.
421*5113495bSYour Name  *
422*5113495bSYour Name  * Return: <0: error-code (operation failed)
423*5113495bSYour Name  *         =0: success
424*5113495bSYour Name  *         >0: status (not used)
425*5113495bSYour Name  */
hdd_napi_serialize(int is_on)426*5113495bSYour Name int hdd_napi_serialize(int is_on)
427*5113495bSYour Name {
428*5113495bSYour Name 	int rc;
429*5113495bSYour Name 	struct hdd_context *hdd_ctx;
430*5113495bSYour Name #define POLICY_DELAY_FACTOR (1)
431*5113495bSYour Name 	rc = hif_napi_serialize(cds_get_context(QDF_MODULE_ID_HIF), is_on);
432*5113495bSYour Name 	if ((rc == 0) && (is_on == 0)) {
433*5113495bSYour Name 		/* apply throughput policy after one timeout */
434*5113495bSYour Name 		napi_tput_policy_delay = POLICY_DELAY_FACTOR;
435*5113495bSYour Name 
436*5113495bSYour Name 		/* make sure that bus_bandwidth trigger is executed */
437*5113495bSYour Name 		hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD);
438*5113495bSYour Name 		if (hdd_ctx)
439*5113495bSYour Name 			ucfg_dp_set_current_throughput_level(hdd_ctx->psoc,
440*5113495bSYour Name 							     -1);
441*5113495bSYour Name 
442*5113495bSYour Name 	}
443*5113495bSYour Name 	return rc;
444*5113495bSYour Name }
445*5113495bSYour Name #endif /* HELIUMPLUS && MSM_PLATFORM */
446*5113495bSYour Name 
447*5113495bSYour Name /**
448*5113495bSYour Name  * hdd_napi_poll() - NAPI poll function
449*5113495bSYour Name  * @napi  : pointer to NAPI struct
450*5113495bSYour Name  * @budget: the pre-declared budget
451*5113495bSYour Name  *
452*5113495bSYour Name  * Implementation of poll function. This function is called
453*5113495bSYour Name  * by kernel during softirq processing.
454*5113495bSYour Name  *
455*5113495bSYour Name  * NOTE FOR THE MAINTAINER:
456*5113495bSYour Name  *   Make sure this is very close to the ce_tasklet code.
457*5113495bSYour Name  *
458*5113495bSYour Name  * Return:
459*5113495bSYour Name  *   int: the amount of work done ( <= budget )
460*5113495bSYour Name  */
hdd_napi_poll(struct napi_struct * napi,int budget)461*5113495bSYour Name int hdd_napi_poll(struct napi_struct *napi, int budget)
462*5113495bSYour Name {
463*5113495bSYour Name 	return hif_napi_poll(cds_get_context(QDF_MODULE_ID_HIF), napi, budget);
464*5113495bSYour Name }
465*5113495bSYour Name 
466*5113495bSYour Name /**
467*5113495bSYour Name  * hdd_display_napi_stats() - print NAPI stats
468*5113495bSYour Name  *
469*5113495bSYour Name  * Return: == 0: success; !=0: failure
470*5113495bSYour Name  */
hdd_display_napi_stats(void)471*5113495bSYour Name int hdd_display_napi_stats(void)
472*5113495bSYour Name {
473*5113495bSYour Name 	int i, j, k, n; /* NAPI, CPU, bucket indices, bucket buf write index*/
474*5113495bSYour Name 	int max;
475*5113495bSYour Name 	struct qca_napi_data *napid;
476*5113495bSYour Name 	struct qca_napi_info *napii;
477*5113495bSYour Name 	struct qca_napi_stat *napis;
478*5113495bSYour Name 	/*
479*5113495bSYour Name 	 * Expecting each NAPI bucket item to need at max 5 numerals + space for
480*5113495bSYour Name 	 * formatting. For example "10000 " Thus the array needs to have
481*5113495bSYour Name 	 * (5 + 1) * QCA_NAPI_NUM_BUCKETS bytes of space. Leaving one space at
482*5113495bSYour Name 	 * the end of the "buf" array for end of string char.
483*5113495bSYour Name 	 */
484*5113495bSYour Name 	char buf[6 * QCA_NAPI_NUM_BUCKETS + 1] = {'\0'};
485*5113495bSYour Name 
486*5113495bSYour Name 	napid = hdd_napi_get_all();
487*5113495bSYour Name 	if (!napid) {
488*5113495bSYour Name 		hdd_err("unable to retrieve napi structure");
489*5113495bSYour Name 		return -EFAULT;
490*5113495bSYour Name 	}
491*5113495bSYour Name 	hdd_nofl_info("[NAPI %u][BL %d]:  scheds   polls   comps    done t-lim p-lim  corr  max_time napi-buckets(%d)",
492*5113495bSYour Name 		      napid->napi_mode,
493*5113495bSYour Name 		      hif_napi_cpu_denylist(napid, DENYLIST_QUERY),
494*5113495bSYour Name 		      QCA_NAPI_NUM_BUCKETS);
495*5113495bSYour Name 
496*5113495bSYour Name 	for (i = 0; i < CE_COUNT_MAX; i++)
497*5113495bSYour Name 		if (napid->ce_map & (0x01 << i)) {
498*5113495bSYour Name 			napii = napid->napis[i];
499*5113495bSYour Name 			if (!napii)
500*5113495bSYour Name 				continue;
501*5113495bSYour Name 
502*5113495bSYour Name 			for (j = 0; j < num_possible_cpus(); j++) {
503*5113495bSYour Name 				napis = &(napii->stats[j]);
504*5113495bSYour Name 				n = 0;
505*5113495bSYour Name 				max = sizeof(buf);
506*5113495bSYour Name 				for (k = 0; k < QCA_NAPI_NUM_BUCKETS; k++) {
507*5113495bSYour Name 					n += scnprintf(
508*5113495bSYour Name 						buf + n, max - n,
509*5113495bSYour Name 						" %d",
510*5113495bSYour Name 						napis->napi_budget_uses[k]);
511*5113495bSYour Name 				}
512*5113495bSYour Name 
513*5113495bSYour Name 				if (napis->napi_schedules != 0)
514*5113495bSYour Name 					hdd_nofl_info("NAPI[%2d]CPU[%d]: %7d %7d %7d %7d %5d %5d %5d %9llu %s",
515*5113495bSYour Name 						      i, j,
516*5113495bSYour Name 						      napis->napi_schedules,
517*5113495bSYour Name 						      napis->napi_polls,
518*5113495bSYour Name 						      napis->napi_completes,
519*5113495bSYour Name 						      napis->napi_workdone,
520*5113495bSYour Name 						      napis->time_limit_reached,
521*5113495bSYour Name 						      napis->
522*5113495bSYour Name 							rxpkt_thresh_reached,
523*5113495bSYour Name 						      napis->cpu_corrected,
524*5113495bSYour Name 						      napis->napi_max_poll_time,
525*5113495bSYour Name 						      buf);
526*5113495bSYour Name 			}
527*5113495bSYour Name 		}
528*5113495bSYour Name 
529*5113495bSYour Name 	hif_napi_stats(napid);
530*5113495bSYour Name 	return 0;
531*5113495bSYour Name }
532*5113495bSYour Name 
533*5113495bSYour Name /**
534*5113495bSYour Name  * hdd_clear_napi_stats() - clear NAPI stats
535*5113495bSYour Name  *
536*5113495bSYour Name  * Return: == 0: success; !=0: failure
537*5113495bSYour Name  */
hdd_clear_napi_stats(void)538*5113495bSYour Name int hdd_clear_napi_stats(void)
539*5113495bSYour Name {
540*5113495bSYour Name 	int i, j;
541*5113495bSYour Name 	struct qca_napi_data *napid;
542*5113495bSYour Name 	struct qca_napi_info *napii;
543*5113495bSYour Name 	struct qca_napi_stat *napis;
544*5113495bSYour Name 
545*5113495bSYour Name 	napid = hdd_napi_get_all();
546*5113495bSYour Name 	if (!napid) {
547*5113495bSYour Name 		hdd_err("unable to retrieve napi structure");
548*5113495bSYour Name 		return -EFAULT;
549*5113495bSYour Name 	}
550*5113495bSYour Name 
551*5113495bSYour Name 	for (i = 0; i < CE_COUNT_MAX; i++)
552*5113495bSYour Name 		if (napid->ce_map & (0x01 << i)) {
553*5113495bSYour Name 			napii = napid->napis[i];
554*5113495bSYour Name 			for (j = 0; j < NR_CPUS; j++) {
555*5113495bSYour Name 				napis = &(napii->stats[j]);
556*5113495bSYour Name 				qdf_mem_zero(napis,
557*5113495bSYour Name 					     sizeof(struct qca_napi_stat));
558*5113495bSYour Name 			}
559*5113495bSYour Name 		}
560*5113495bSYour Name 
561*5113495bSYour Name 	return 0;
562*5113495bSYour Name }
563