1 /*
2 * Copyright (c) 2017-2021 The Linux Foundation. All rights reserved.
3 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for
6 * any purpose with or without fee is hereby granted, provided that the
7 * above copyright notice and this permission notice appear in all
8 * copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17 * PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 #ifndef __HIF_EXEC_H__
21 #define __HIF_EXEC_H__
22
23 #include <hif.h>
24 #include <hif_irq_affinity.h>
25 #include <linux/cpumask.h>
26
27 #ifndef IRQ_DISABLED_MAX_DURATION_NS
28 #define IRQ_DISABLED_MAX_DURATION_NS 100000000
29 #endif
30
31 struct hif_exec_context;
32
33 struct hif_execution_ops {
34 char *context_type;
35 void (*schedule)(struct hif_exec_context *);
36 void (*reschedule)(struct hif_exec_context *);
37 void (*kill)(struct hif_exec_context *);
38 };
39
40 /**
41 * struct hif_exec_context - only ever allocated as a subtype eg.
42 * hif_tasklet_exec_context
43 *
44 * @sched_ops: HIF executon ops
45 * @hif: HIF context
46 * @numirq: number of IRQs
47 * @irq: irq handle corresponding to hw block
48 * @os_irq: irq handle for irq_affinity
49 * @cpumask:
50 * @grp_id:
51 * @scale_bin_shift:
52 * @context_name: a pointer to a const string for debugging.
53 * this should help whenever there could be ambiguity
54 * in what type of context the void* context points to
55 * @context: context for the handler function to use.
56 * @handler: interrupt handler
57 * @work_complete: Function call called when leaving the execution context to
58 * determine if this context should reschedule or wait for an interrupt.
59 * This function may be used as a hook for post processing.
60 * @irq_enable: called when the context leaves polling mode
61 * @irq_disable: called before scheduling the context.
62 * @irq_name: pointer to function to return irq name/string mapped to irq number
63 * @sched_latency_stats: schedule latency stats for different latency buckets
64 * @tstamp: timestamp when napi poll happens
65 * @cpu: the cpu this context should be affined to
66 * @stats:
67 * @inited:
68 * @configured:
69 * @irq_requested:
70 * @irq_enabled:
71 * @irq_lock: spinlock used while enabling/disabling IRQs
72 * @type: type of execution context
73 * @poll_start_time: hif napi poll start time in nanoseconds
74 * @force_break: flag to indicate if HIF execution context was forced to return
75 * to HIF. This means there is more work to be done. Hence do not
76 * call napi_complete.
77 * @new_cpu_mask: Stores the affinity hint mask for each WLAN IRQ
78 * @force_napi_complete: do a force napi_complete when this flag is set to -1
79 * @irq_disabled_start_time: irq disabled start time for single MSI
80 */
81 struct hif_exec_context {
82 struct hif_execution_ops *sched_ops;
83 struct hif_opaque_softc *hif;
84 uint32_t numirq;
85 uint32_t irq[HIF_MAX_GRP_IRQ];
86 uint32_t os_irq[HIF_MAX_GRP_IRQ];
87 cpumask_t cpumask;
88 uint32_t grp_id;
89 uint32_t scale_bin_shift;
90 const char *context_name;
91 void *context;
92 ext_intr_handler handler;
93
94 bool (*work_complete)(struct hif_exec_context *, int work_done);
95 void (*irq_enable)(struct hif_exec_context *);
96 void (*irq_disable)(struct hif_exec_context *);
97 const char* (*irq_name)(int irq_no);
98 uint64_t sched_latency_stats[HIF_SCHED_LATENCY_BUCKETS];
99 uint64_t tstamp;
100
101 uint8_t cpu;
102 struct qca_napi_stat stats[NR_CPUS];
103 bool inited;
104 bool configured;
105 bool irq_requested;
106 bool irq_enabled;
107 qdf_spinlock_t irq_lock;
108 enum hif_exec_type type;
109 unsigned long long poll_start_time;
110 bool force_break;
111 #if defined(FEATURE_IRQ_AFFINITY) || defined(HIF_CPU_PERF_AFFINE_MASK) || \
112 defined(HIF_CPU_CLEAR_AFFINITY)
113 qdf_cpu_mask new_cpu_mask[HIF_MAX_GRP_IRQ];
114 #endif
115 #ifdef FEATURE_IRQ_AFFINITY
116 qdf_atomic_t force_napi_complete;
117 #endif
118 unsigned long long irq_disabled_start_time;
119 };
120
121 /**
122 * struct hif_tasklet_exec_context - exec_context for tasklets
123 * @exec_ctx: inherited data type
124 * @tasklet: tasklet structure for scheduling
125 */
126 struct hif_tasklet_exec_context {
127 struct hif_exec_context exec_ctx;
128 struct tasklet_struct tasklet;
129 };
130
131 /**
132 * struct hif_napi_exec_context - exec_context for NAPI
133 * @exec_ctx: inherited data type
134 * @netdev: dummy net device associated with the napi context
135 * @napi: napi structure used in scheduling
136 */
137 struct hif_napi_exec_context {
138 struct hif_exec_context exec_ctx;
139 struct net_device netdev; /* dummy net_dev */
140 struct napi_struct napi;
141 };
142
143 static inline struct hif_napi_exec_context*
hif_exec_get_napi(struct hif_exec_context * ctx)144 hif_exec_get_napi(struct hif_exec_context *ctx)
145 {
146 return (struct hif_napi_exec_context *) ctx;
147 }
148
149 static inline struct hif_tasklet_exec_context*
hif_exec_get_tasklet(struct hif_exec_context * ctx)150 hif_exec_get_tasklet(struct hif_exec_context *ctx)
151 {
152 return (struct hif_tasklet_exec_context *) ctx;
153 }
154
155 struct hif_exec_context *hif_exec_create(enum hif_exec_type type,
156 uint32_t scale);
157
158 void hif_exec_destroy(struct hif_exec_context *ctx);
159
160 int hif_grp_irq_configure(struct hif_softc *scn,
161 struct hif_exec_context *hif_exec);
162 void hif_grp_irq_deconfigure(struct hif_softc *scn);
163 irqreturn_t hif_ext_group_interrupt_handler(int irq, void *context);
164
165 struct hif_exec_context *hif_exec_get_ctx(struct hif_opaque_softc *hif,
166 uint8_t id);
167 void hif_exec_kill(struct hif_opaque_softc *scn);
168
169 #if defined(HIF_CPU_PERF_AFFINE_MASK) || defined(FEATURE_IRQ_AFFINITY)
170 /**
171 * hif_pci_irq_set_affinity_hint() - API to set IRQ affinity
172 * @hif_ext_group: hif_ext_group to extract the irq info
173 * @perf: affine to perf cluster or non-perf cluster
174 *
175 * This function will set the IRQ affinity to gold cores
176 * or silver cores based on perf flag
177 *
178 * Return: none
179 */
180 void hif_pci_irq_set_affinity_hint(struct hif_exec_context *hif_ext_group,
181 bool perf);
182 #else
183 static inline
hif_pci_irq_set_affinity_hint(struct hif_exec_context * hif_ext_group,bool perf)184 void hif_pci_irq_set_affinity_hint(struct hif_exec_context *hif_ext_group,
185 bool perf)
186 {
187 }
188 #endif
189
190 #ifdef HIF_CPU_PERF_AFFINE_MASK
191
192 /**
193 * hif_pci_ce_irq_set_affinity_hint() - API to set IRQ affinity
194 * @scn: hif_softc to extract the CE irq info
195 *
196 * This function will set the CE IRQ affinity to the gold cores
197 * only for defconfig builds
198 *
199 * Return: none
200 */
201 void hif_pci_ce_irq_set_affinity_hint(
202 struct hif_softc *scn);
203
204 /**
205 * hif_ce_irq_remove_affinity_hint() - remove affinity for the irq
206 * @irq: irq number to remove affinity from
207 */
hif_ce_irq_remove_affinity_hint(int irq)208 static inline void hif_ce_irq_remove_affinity_hint(int irq)
209 {
210 hif_irq_affinity_remove(irq);
211 }
212 #else
213
hif_pci_ce_irq_set_affinity_hint(struct hif_softc * scn)214 static inline void hif_pci_ce_irq_set_affinity_hint(
215 struct hif_softc *scn)
216 {
217 }
218
hif_ce_irq_remove_affinity_hint(int irq)219 static inline void hif_ce_irq_remove_affinity_hint(int irq)
220 {
221 }
222 #endif /* ifdef HIF_CPU_PERF_AFFINE_MASK */
223
224 #ifdef HIF_CPU_CLEAR_AFFINITY
225 /**
226 * hif_pci_config_irq_clear_cpu_affinity() - Remove cpu affinity of IRQ
227 * @scn: HIF handle
228 * @intr_ctxt_id: interrupt group index
229 * @cpu: CPU core to clear
230 *
231 * Return: None
232 */
233 void hif_pci_config_irq_clear_cpu_affinity(struct hif_softc *scn,
234 int intr_ctxt_id, int cpu);
235 #else
236 static inline
hif_pci_config_irq_clear_cpu_affinity(struct hif_softc * scn,int intr_ctxt_id,int cpu)237 void hif_pci_config_irq_clear_cpu_affinity(struct hif_softc *scn,
238 int intr_ctxt_id, int cpu)
239 {
240 }
241 #endif /* HIF_CPU_CLEAR_AFFINITY */
242
243 #endif
244
245