1 /*
2 * Copyright (c) 2015-2019 The Linux Foundation. All rights reserved.
3 * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for
6 * any purpose with or without fee is hereby granted, provided that the
7 * above copyright notice and this permission notice appear in all
8 * copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17 * PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 #ifndef __HIF_NAPI_H__
21 #define __HIF_NAPI_H__
22
23 /**
24 * DOC: hif_napi.h
25 *
26 * Interface to HIF implemented functions of NAPI.
27 * These are used by hdd_napi.
28 */
29
30
31 /* CLD headers */
32 #include <hif.h> /* struct hif_opaque_softc; */
33
34 /*
35 * common stuff
36 * The declarations until #ifdef FEATURE_NAPI below
37 * are valid whether or not FEATURE_NAPI has been
38 * defined.
39 */
40
41 /**
42 * enum qca_napi_event - NAPI Events
43 * @NAPI_EVT_INVALID: invalid event
44 * @NAPI_EVT_INI_FILE: ini file processed
45 * @NAPI_EVT_CMD_STATE: userspace command
46 * @NAPI_EVT_INT_STATE: internal event
47 * @NAPI_EVT_CPU_STATE: CPU hotplus events
48 * @NAPI_EVT_TPUT_STATE: throughput triggers
49 * @NAPI_EVT_USR_SERIAL: WMA/Roaming Start
50 * @NAPI_EVT_USR_NORMAL: WMA/Roaming End
51 *
52 * NAPI manages the following states:
53 * NAPI state: per NAPI instance, ENABLED/DISABLED
54 * CPU state: per CPU, DOWN/UP
55 * TPUT state: global, LOW/HI
56 *
57 * "Dynamic" changes to state of various NAPI structures are
58 * managed by NAPI events. The events may be produced by
59 * various detection points. With each event, some data is
60 * sent. The main event handler in hif_napi handles and makes
61 * the state changes.
62 *
63 * event : data : generated
64 * ---------------:------------------:------------------
65 * EVT_INI_FILE : cfg->napi_enable : after ini file processed
66 * EVT_CMD_STATE : cmd arg : by the vendor cmd
67 * EVT_INT_STATE : 0 : internal - shut off/disable
68 * EVT_CPU_STATE : (cpu << 16)|state: CPU hotplug events
69 * EVT_TPUT_STATE : (high/low) : tput trigger
70 * EVT_USR_SERIAL : num-serial_calls : WMA/ROAMING-START/IND
71 * EVT_USR_NORMAL : N/A : WMA/ROAMING-END
72 */
73 enum qca_napi_event {
74 NAPI_EVT_INVALID,
75 NAPI_EVT_INI_FILE,
76 NAPI_EVT_CMD_STATE,
77 NAPI_EVT_INT_STATE,
78 NAPI_EVT_CPU_STATE,
79 NAPI_EVT_TPUT_STATE,
80 NAPI_EVT_USR_SERIAL,
81 NAPI_EVT_USR_NORMAL
82 };
83
84 /*
85 * Following are some of NAPI related features controlled using feature flag
86 * These flags need to be enabled in the qca_napi_data->flags variable for the
87 * feature to kick in.
88 .* QCA_NAPI_FEATURE_CPU_CORRECTION - controls CPU correction logic
89 .* QCA_NAPI_FEATURE_IRQ_BLACKLISTING - controls call to irq_denylist_on API
90 .* QCA_NAPI_FEATURE_CORE_CTL_BOOST - controls call to core_ctl_set_boost API
91 */
92 #define QCA_NAPI_FEATURE_CPU_CORRECTION BIT(1)
93 #define QCA_NAPI_FEATURE_IRQ_BLACKLISTING BIT(2)
94 #define QCA_NAPI_FEATURE_CORE_CTL_BOOST BIT(3)
95
96 /*
97 * Macros to map ids -returned by ...create()- to pipes and vice versa
98 */
99 #define NAPI_ID2PIPE(i) ((i)-1)
100 #define NAPI_PIPE2ID(p) ((p)+1)
101
102 #ifdef RECEIVE_OFFLOAD
103 /**
104 * hif_napi_rx_offld_flush_cb_register() - Register flush callback for Rx offld
105 * @hif_hdl: pointer to hif context
106 * @rx_ol_flush_handler: register offld flush callback
107 *
108 * Return: None
109 */
110 void hif_napi_rx_offld_flush_cb_register(struct hif_opaque_softc *hif_hdl,
111 void (rx_ol_flush_handler)(void *arg));
112
113 /**
114 * hif_napi_rx_offld_flush_cb_deregister() - Degregister offld flush_cb
115 * @hif_hdl: pointer to hif context
116 *
117 * Return: NONE
118 */
119 void hif_napi_rx_offld_flush_cb_deregister(struct hif_opaque_softc *hif_hdl);
120 #endif /* RECEIVE_OFFLOAD */
121
122 /**
123 * hif_napi_get_lro_info() - returns the address LRO data for napi_id
124 * @hif_hdl: pointer to hif context
125 * @napi_id: napi instance
126 *
127 * Description:
128 * Returns the address of the LRO structure
129 *
130 * Return:
131 * <addr>: address of the LRO structure
132 */
133 void *hif_napi_get_lro_info(struct hif_opaque_softc *hif_hdl, int napi_id);
134
135 enum qca_denylist_op {
136 DENYLIST_QUERY,
137 DENYLIST_OFF,
138 DENYLIST_ON
139 };
140
141 #ifdef FEATURE_NAPI
142
143 /*
144 * NAPI HIF API
145 *
146 * the declarations below only apply to the case
147 * where FEATURE_NAPI is defined
148 */
149
150 int hif_napi_create(struct hif_opaque_softc *hif,
151 int (*poll)(struct napi_struct *, int),
152 int budget,
153 int scale,
154 uint8_t flags);
155 int hif_napi_destroy(struct hif_opaque_softc *hif,
156 uint8_t id,
157 int force);
158
159 struct qca_napi_data *hif_napi_get_all(struct hif_opaque_softc *hif);
160
161 /**
162 * hif_get_napi() - get NAPI corresponding to napi_id
163 * @napi_id: NAPI instance
164 * @napid: Handle NAPI
165 *
166 * Return: napi corresponding napi_id
167 */
168 struct qca_napi_info *hif_get_napi(int napi_id, struct qca_napi_data *napid);
169
170 int hif_napi_event(struct hif_opaque_softc *hif,
171 enum qca_napi_event event,
172 void *data);
173
174 /* called from the ISR within hif, so, ce is known */
175 int hif_napi_enabled(struct hif_opaque_softc *hif, int ce);
176
177 bool hif_napi_created(struct hif_opaque_softc *hif, int ce);
178
179 /* called from hdd (napi_poll), using napi id as a selector */
180 void hif_napi_enable_irq(struct hif_opaque_softc *hif, int id);
181
182 /* called by ce_tasklet.c::ce_dispatch_interrupt*/
183 bool hif_napi_schedule(struct hif_opaque_softc *scn, int ce_id);
184
185 /* called by hdd_napi, which is called by kernel */
186 int hif_napi_poll(struct hif_opaque_softc *hif_ctx,
187 struct napi_struct *napi, int budget);
188
189 #ifdef FEATURE_NAPI_DEBUG
190 #define NAPI_DEBUG(fmt, ...) \
191 qdf_debug("wlan: NAPI: %s:%d "fmt, __func__, __LINE__, ##__VA_ARGS__)
192 #else
193 #define NAPI_DEBUG(fmt, ...) /* NO-OP */
194 #endif /* FEATURE NAPI_DEBUG */
195
196 #define HNC_ANY_CPU (-1)
197 #define HNC_ACT_RELOCATE (0)
198 #define HNC_ACT_COLLAPSE (1)
199 #define HNC_ACT_DISPERSE (-1)
200
201 /**
202 * hif_update_napi_max_poll_time() - updates NAPI max poll time
203 * @ce_state: ce state
204 * @ce_id: Copy engine ID
205 * @cpu_id: cpu id
206 *
207 * This API updates NAPI max poll time per CE per SPU.
208 *
209 * Return: void
210 */
211 void hif_update_napi_max_poll_time(struct CE_state *ce_state,
212 int ce_id,
213 int cpu_id);
214 /*
215 * Local interface to HIF implemented functions of NAPI CPU affinity management.
216 * Note:
217 * 1- The symbols in this file are NOT supposed to be used by any
218 * entity other than hif_napi.c
219 * 2- The symbols are valid only if HELIUMPLUS is defined. They are otherwise
220 * mere wrappers.
221 *
222 */
223
224 #else /* ! defined(FEATURE_NAPI) */
225
226 /*
227 * Stub API
228 *
229 * The declarations in this section are valid only
230 * when FEATURE_NAPI has *not* been defined.
231 */
232
233 #define NAPI_DEBUG(fmt, ...) /* NO-OP */
234
hif_napi_create(struct hif_opaque_softc * hif,uint8_t pipe_id,int (* poll)(struct napi_struct *,int),int budget,int scale,uint8_t flags)235 static inline int hif_napi_create(struct hif_opaque_softc *hif,
236 uint8_t pipe_id,
237 int (*poll)(struct napi_struct *, int),
238 int budget,
239 int scale,
240 uint8_t flags)
241 { return -EPERM; }
242
hif_napi_destroy(struct hif_opaque_softc * hif,uint8_t id,int force)243 static inline int hif_napi_destroy(struct hif_opaque_softc *hif,
244 uint8_t id,
245 int force)
246 { return -EPERM; }
247
hif_napi_get_all(struct hif_opaque_softc * hif)248 static inline struct qca_napi_data *hif_napi_get_all(
249 struct hif_opaque_softc *hif)
250 { return NULL; }
251
hif_get_napi(int napi_id,struct qca_napi_data * napid)252 static inline struct qca_napi_info *hif_get_napi(int napi_id,
253 struct qca_napi_data *napid)
254 { return NULL; }
255
hif_napi_event(struct hif_opaque_softc * hif,enum qca_napi_event event,void * data)256 static inline int hif_napi_event(struct hif_opaque_softc *hif,
257 enum qca_napi_event event,
258 void *data)
259 { return -EPERM; }
260
261 /* called from the ISR within hif, so, ce is known */
hif_napi_enabled(struct hif_opaque_softc * hif,int ce)262 static inline int hif_napi_enabled(struct hif_opaque_softc *hif, int ce)
263 { return 0; }
264
hif_napi_created(struct hif_opaque_softc * hif,int ce)265 static inline bool hif_napi_created(struct hif_opaque_softc *hif, int ce)
266 { return false; }
267
268 /* called from hdd (napi_poll), using napi id as a selector */
hif_napi_enable_irq(struct hif_opaque_softc * hif,int id)269 static inline void hif_napi_enable_irq(struct hif_opaque_softc *hif, int id)
270 { return; }
271
hif_napi_schedule(struct hif_opaque_softc * hif,int ce_id)272 static inline bool hif_napi_schedule(struct hif_opaque_softc *hif, int ce_id)
273 { return false; }
274
hif_napi_poll(struct napi_struct * napi,int budget)275 static inline int hif_napi_poll(struct napi_struct *napi, int budget)
276 { return -EPERM; }
277
278 /**
279 * hif_update_napi_max_poll_time() - updates NAPI max poll time
280 * @ce_state: ce state
281 * @ce_id: Copy engine ID
282 * @cpu_id: cpu id
283 *
284 * This API updates NAPI max poll time per CE per SPU.
285 *
286 * Return: void
287 */
hif_update_napi_max_poll_time(struct CE_state * ce_state,int ce_id,int cpu_id)288 static inline void hif_update_napi_max_poll_time(struct CE_state *ce_state,
289 int ce_id,
290 int cpu_id)
291 { return; }
292 #endif /* FEATURE_NAPI */
293
294 #if defined(HIF_IRQ_AFFINITY) && defined(FEATURE_NAPI)
295 /*
296 * prototype signatures
297 */
298 int hif_napi_cpu_init(struct hif_opaque_softc *hif);
299 int hif_napi_cpu_deinit(struct hif_opaque_softc *hif);
300
301 int hif_napi_cpu_migrate(struct qca_napi_data *napid, int cpu, int action);
302 int hif_napi_serialize(struct hif_opaque_softc *hif, int is_on);
303
304 int hif_napi_cpu_denylist(struct qca_napi_data *napid,
305 enum qca_denylist_op op);
306
307 /* not directly related to irq affinity, but oh well */
308 void hif_napi_stats(struct qca_napi_data *napid);
309 void hif_napi_update_yield_stats(struct CE_state *ce_state,
310 bool time_limit_reached,
311 bool rxpkt_thresh_reached);
312 #else
313 struct qca_napi_data;
hif_napi_cpu_init(struct hif_opaque_softc * hif)314 static inline int hif_napi_cpu_init(struct hif_opaque_softc *hif)
315 { return 0; }
316
hif_napi_cpu_deinit(struct hif_opaque_softc * hif)317 static inline int hif_napi_cpu_deinit(struct hif_opaque_softc *hif)
318 { return 0; }
319
hif_napi_cpu_migrate(struct qca_napi_data * napid,int cpu,int action)320 static inline int hif_napi_cpu_migrate(struct qca_napi_data *napid, int cpu,
321 int action)
322 { return 0; }
323
hif_napi_serialize(struct hif_opaque_softc * hif,int is_on)324 static inline int hif_napi_serialize(struct hif_opaque_softc *hif, int is_on)
325 { return -EPERM; }
326
hif_napi_stats(struct qca_napi_data * napid)327 static inline void hif_napi_stats(struct qca_napi_data *napid) { }
hif_napi_update_yield_stats(struct CE_state * ce_state,bool time_limit_reached,bool rxpkt_thresh_reached)328 static inline void hif_napi_update_yield_stats(struct CE_state *ce_state,
329 bool time_limit_reached,
330 bool rxpkt_thresh_reached) { }
331
hif_napi_cpu_denylist(struct qca_napi_data * napid,enum qca_denylist_op op)332 static inline int hif_napi_cpu_denylist(struct qca_napi_data *napid,
333 enum qca_denylist_op op)
334 { return 0; }
335 #endif /* HIF_IRQ_AFFINITY */
336
337 #endif /* __HIF_NAPI_H__ */
338