1 /*
2 * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
3 * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for
6 * any purpose with or without fee is hereby granted, provided that the
7 * above copyright notice and this permission notice appear in all
8 * copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17 * PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 #ifndef _HIF_H_
21 #define _HIF_H_
22
23 #ifdef __cplusplus
24 extern "C" {
25 #endif /* __cplusplus */
26
27 /* Header files */
28 #include <qdf_status.h>
29 #include "qdf_ipa.h"
30 #include "qdf_nbuf.h"
31 #include "qdf_lro.h"
32 #include "ol_if_athvar.h"
33 #include <linux/platform_device.h>
34 #ifdef HIF_PCI
35 #include <linux/pci.h>
36 #endif /* HIF_PCI */
37 #ifdef HIF_USB
38 #include <linux/usb.h>
39 #endif /* HIF_USB */
40 #ifdef IPA_OFFLOAD
41 #include <linux/ipa.h>
42 #endif
43 #include "cfg_ucfg_api.h"
44 #include "qdf_dev.h"
45 #include <wlan_init_cfg.h>
46
47 #define ENABLE_MBOX_DUMMY_SPACE_FEATURE 1
48
49 typedef void __iomem *A_target_id_t;
50 typedef void *hif_handle_t;
51
52 #if defined(HIF_IPCI) && defined(FEATURE_HAL_DELAYED_REG_WRITE)
53 #define HIF_WORK_DRAIN_WAIT_CNT 50
54
55 #define HIF_EP_WAKE_RESET_WAIT_CNT 10
56 #endif
57
58 #define HIF_TYPE_AR6002 2
59 #define HIF_TYPE_AR6003 3
60 #define HIF_TYPE_AR6004 5
61 #define HIF_TYPE_AR9888 6
62 #define HIF_TYPE_AR6320 7
63 #define HIF_TYPE_AR6320V2 8
64 /* For attaching Peregrine 2.0 board host_reg_tbl only */
65 #define HIF_TYPE_AR9888V2 9
66 #define HIF_TYPE_ADRASTEA 10
67 #define HIF_TYPE_AR900B 11
68 #define HIF_TYPE_QCA9984 12
69 #define HIF_TYPE_QCA9888 14
70 #define HIF_TYPE_QCA8074 15
71 #define HIF_TYPE_QCA6290 16
72 #define HIF_TYPE_QCN7605 17
73 #define HIF_TYPE_QCA6390 18
74 #define HIF_TYPE_QCA8074V2 19
75 #define HIF_TYPE_QCA6018 20
76 #define HIF_TYPE_QCN9000 21
77 #define HIF_TYPE_QCA6490 22
78 #define HIF_TYPE_QCA6750 23
79 #define HIF_TYPE_QCA5018 24
80 #define HIF_TYPE_QCN6122 25
81 #define HIF_TYPE_KIWI 26
82 #define HIF_TYPE_QCN9224 27
83 #define HIF_TYPE_QCA9574 28
84 #define HIF_TYPE_MANGO 29
85 #define HIF_TYPE_QCA5332 30
86 #define HIF_TYPE_QCN9160 31
87 #define HIF_TYPE_PEACH 32
88 #define HIF_TYPE_WCN6450 33
89 #define HIF_TYPE_QCN6432 34
90
91 #define DMA_COHERENT_MASK_DEFAULT 37
92
93 #ifdef IPA_OFFLOAD
94 #define DMA_COHERENT_MASK_BELOW_IPA_VER_3 32
95 #endif
96
97 /* enum hif_ic_irq - enum defining integrated chip irq numbers
98 * defining irq nubers that can be used by external modules like datapath
99 */
100 enum hif_ic_irq {
101 host2wbm_desc_feed = 16,
102 host2reo_re_injection,
103 host2reo_command,
104 host2rxdma_monitor_ring3,
105 host2rxdma_monitor_ring2,
106 host2rxdma_monitor_ring1,
107 reo2host_exception,
108 wbm2host_rx_release,
109 reo2host_status,
110 reo2host_destination_ring4,
111 reo2host_destination_ring3,
112 reo2host_destination_ring2,
113 reo2host_destination_ring1,
114 rxdma2host_monitor_destination_mac3,
115 rxdma2host_monitor_destination_mac2,
116 rxdma2host_monitor_destination_mac1,
117 ppdu_end_interrupts_mac3,
118 ppdu_end_interrupts_mac2,
119 ppdu_end_interrupts_mac1,
120 rxdma2host_monitor_status_ring_mac3,
121 rxdma2host_monitor_status_ring_mac2,
122 rxdma2host_monitor_status_ring_mac1,
123 host2rxdma_host_buf_ring_mac3,
124 host2rxdma_host_buf_ring_mac2,
125 host2rxdma_host_buf_ring_mac1,
126 rxdma2host_destination_ring_mac3,
127 rxdma2host_destination_ring_mac2,
128 rxdma2host_destination_ring_mac1,
129 host2tcl_input_ring4,
130 host2tcl_input_ring3,
131 host2tcl_input_ring2,
132 host2tcl_input_ring1,
133 wbm2host_tx_completions_ring4,
134 wbm2host_tx_completions_ring3,
135 wbm2host_tx_completions_ring2,
136 wbm2host_tx_completions_ring1,
137 tcl2host_status_ring,
138 txmon2host_monitor_destination_mac3,
139 txmon2host_monitor_destination_mac2,
140 txmon2host_monitor_destination_mac1,
141 host2tx_monitor_ring1,
142 };
143
144 #ifdef QCA_SUPPORT_LEGACY_INTERRUPTS
145 enum hif_legacy_pci_irq {
146 ce0,
147 ce1,
148 ce2,
149 ce3,
150 ce4,
151 ce5,
152 ce6,
153 ce7,
154 ce8,
155 ce9,
156 ce10,
157 ce11,
158 ce12,
159 ce13,
160 ce14,
161 ce15,
162 reo2sw8_intr2,
163 reo2sw7_intr2,
164 reo2sw6_intr2,
165 reo2sw5_intr2,
166 reo2sw4_intr2,
167 reo2sw3_intr2,
168 reo2sw2_intr2,
169 reo2sw1_intr2,
170 reo2sw0_intr2,
171 reo2sw8_intr,
172 reo2sw7_intr,
173 reo2sw6_inrr,
174 reo2sw5_intr,
175 reo2sw4_intr,
176 reo2sw3_intr,
177 reo2sw2_intr,
178 reo2sw1_intr,
179 reo2sw0_intr,
180 reo2status_intr2,
181 reo_status,
182 reo2rxdma_out_2,
183 reo2rxdma_out_1,
184 reo_cmd,
185 sw2reo6,
186 sw2reo5,
187 sw2reo1,
188 sw2reo,
189 rxdma2reo_mlo_0_dst_ring1,
190 rxdma2reo_mlo_0_dst_ring0,
191 rxdma2reo_mlo_1_dst_ring1,
192 rxdma2reo_mlo_1_dst_ring0,
193 rxdma2reo_dst_ring1,
194 rxdma2reo_dst_ring0,
195 rxdma2sw_dst_ring1,
196 rxdma2sw_dst_ring0,
197 rxdma2release_dst_ring1,
198 rxdma2release_dst_ring0,
199 sw2rxdma_2_src_ring,
200 sw2rxdma_1_src_ring,
201 sw2rxdma_0,
202 wbm2sw6_release2,
203 wbm2sw5_release2,
204 wbm2sw4_release2,
205 wbm2sw3_release2,
206 wbm2sw2_release2,
207 wbm2sw1_release2,
208 wbm2sw0_release2,
209 wbm2sw6_release,
210 wbm2sw5_release,
211 wbm2sw4_release,
212 wbm2sw3_release,
213 wbm2sw2_release,
214 wbm2sw1_release,
215 wbm2sw0_release,
216 wbm2sw_link,
217 wbm_error_release,
218 sw2txmon_src_ring,
219 sw2rxmon_src_ring,
220 txmon2sw_p1_intr1,
221 txmon2sw_p1_intr0,
222 txmon2sw_p0_dest1,
223 txmon2sw_p0_dest0,
224 rxmon2sw_p1_intr1,
225 rxmon2sw_p1_intr0,
226 rxmon2sw_p0_dest1,
227 rxmon2sw_p0_dest0,
228 sw_release,
229 sw2tcl_credit2,
230 sw2tcl_credit,
231 sw2tcl4,
232 sw2tcl5,
233 sw2tcl3,
234 sw2tcl2,
235 sw2tcl1,
236 sw2wbm1,
237 misc_8,
238 misc_7,
239 misc_6,
240 misc_5,
241 misc_4,
242 misc_3,
243 misc_2,
244 misc_1,
245 misc_0,
246 };
247 #endif
248
249 struct CE_state;
250 #ifdef QCA_WIFI_QCN9224
251 #define CE_COUNT_MAX 16
252 #else
253 #define CE_COUNT_MAX 12
254 #endif
255
256 #ifndef HIF_MAX_GROUP
257 #define HIF_MAX_GROUP WLAN_CFG_INT_NUM_CONTEXTS
258 #endif
259
260 #ifdef CONFIG_BERYLLIUM
261 #define HIF_MAX_GRP_IRQ 25
262 #else
263 #define HIF_MAX_GRP_IRQ 16
264 #endif
265
266 #ifndef NAPI_YIELD_BUDGET_BASED
267 #ifndef QCA_NAPI_DEF_SCALE_BIN_SHIFT
268 #define QCA_NAPI_DEF_SCALE_BIN_SHIFT 4
269 #endif
270 #else /* NAPI_YIELD_BUDGET_BASED */
271 #define QCA_NAPI_DEF_SCALE_BIN_SHIFT 2
272 #endif /* NAPI_YIELD_BUDGET_BASED */
273
274 #define QCA_NAPI_BUDGET 64
275 #define QCA_NAPI_DEF_SCALE \
276 (1 << QCA_NAPI_DEF_SCALE_BIN_SHIFT)
277
278 #define HIF_NAPI_MAX_RECEIVES (QCA_NAPI_BUDGET * QCA_NAPI_DEF_SCALE)
279 /* NOTE: "napi->scale" can be changed,
280 * but this does not change the number of buckets
281 */
282 #define QCA_NAPI_NUM_BUCKETS 4
283
284 /**
285 * struct qca_napi_stat - stats structure for execution contexts
286 * @napi_schedules: number of times the schedule function is called
287 * @napi_polls: number of times the execution context runs
288 * @napi_completes: number of times that the generating interrupt is re-enabled
289 * @napi_workdone: cumulative of all work done reported by handler
290 * @cpu_corrected: incremented when execution context runs on a different core
291 * than the one that its irq is affined to.
292 * @napi_budget_uses: histogram of work done per execution run
293 * @time_limit_reached: count of yields due to time limit thresholds
294 * @rxpkt_thresh_reached: count of yields due to a work limit
295 * @napi_max_poll_time:
296 * @poll_time_buckets: histogram of poll times for the napi
297 *
298 */
299 struct qca_napi_stat {
300 uint32_t napi_schedules;
301 uint32_t napi_polls;
302 uint32_t napi_completes;
303 uint32_t napi_workdone;
304 uint32_t cpu_corrected;
305 uint32_t napi_budget_uses[QCA_NAPI_NUM_BUCKETS];
306 uint32_t time_limit_reached;
307 uint32_t rxpkt_thresh_reached;
308 unsigned long long napi_max_poll_time;
309 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
310 uint32_t poll_time_buckets[QCA_NAPI_NUM_BUCKETS];
311 #endif
312 };
313
314 /*Number of buckets for latency*/
315 #define HIF_SCHED_LATENCY_BUCKETS 8
316
317 /*Buckets for latency between 0 to 2 ms*/
318 #define HIF_SCHED_LATENCY_BUCKET_0_2 2
319 /*Buckets for latency between 3 to 10 ms*/
320 #define HIF_SCHED_LATENCY_BUCKET_3_10 10
321 /*Buckets for latency between 11 to 20 ms*/
322 #define HIF_SCHED_LATENCY_BUCKET_11_20 20
323 /*Buckets for latency between 21 to 50 ms*/
324 #define HIF_SCHED_LATENCY_BUCKET_21_50 50
325 /*Buckets for latency between 50 to 100 ms*/
326 #define HIF_SCHED_LATENCY_BUCKET_51_100 100
327 /*Buckets for latency between 100 to 250 ms*/
328 #define HIF_SCHED_LATENCY_BUCKET_101_250 250
329 /*Buckets for latency between 250 to 500 ms*/
330 #define HIF_SCHED_LATENCY_BUCKET_251_500 500
331
332 /**
333 * struct qca_napi_info - per NAPI instance data structure
334 * @netdev: dummy net_dev
335 * @hif_ctx:
336 * @napi:
337 * @scale:
338 * @id:
339 * @cpu:
340 * @irq:
341 * @cpumask:
342 * @stats:
343 * @offld_flush_cb:
344 * @rx_thread_napi:
345 * @rx_thread_netdev:
346 * @lro_ctx:
347 * @poll_start_time: napi poll service start time
348 * @sched_latency_stats: napi schedule latency stats
349 * @tstamp: napi schedule start timestamp
350 *
351 * This data structure holds stuff per NAPI instance.
352 * Note that, in the current implementation, though scale is
353 * an instance variable, it is set to the same value for all
354 * instances.
355 */
356 struct qca_napi_info {
357 struct net_device netdev; /* dummy net_dev */
358 void *hif_ctx;
359 struct napi_struct napi;
360 uint8_t scale; /* currently same on all instances */
361 uint8_t id;
362 uint8_t cpu;
363 int irq;
364 cpumask_t cpumask;
365 struct qca_napi_stat stats[NR_CPUS];
366 #ifdef RECEIVE_OFFLOAD
367 /* will only be present for data rx CE's */
368 void (*offld_flush_cb)(void *);
369 struct napi_struct rx_thread_napi;
370 struct net_device rx_thread_netdev;
371 #endif /* RECEIVE_OFFLOAD */
372 qdf_lro_ctx_t lro_ctx;
373 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
374 unsigned long long poll_start_time;
375 #endif
376 #ifdef HIF_LATENCY_PROFILE_ENABLE
377 uint64_t sched_latency_stats[HIF_SCHED_LATENCY_BUCKETS];
378 uint64_t tstamp;
379 #endif
380 };
381
382 enum qca_napi_tput_state {
383 QCA_NAPI_TPUT_UNINITIALIZED,
384 QCA_NAPI_TPUT_LO,
385 QCA_NAPI_TPUT_HI
386 };
387 enum qca_napi_cpu_state {
388 QCA_NAPI_CPU_UNINITIALIZED,
389 QCA_NAPI_CPU_DOWN,
390 QCA_NAPI_CPU_UP };
391
392 /**
393 * struct qca_napi_cpu - an entry of the napi cpu table
394 * @state:
395 * @core_id: physical core id of the core
396 * @cluster_id: cluster this core belongs to
397 * @core_mask: mask to match all core of this cluster
398 * @thread_mask: mask for this core within the cluster
399 * @max_freq: maximum clock this core can be clocked at
400 * same for all cpus of the same core.
401 * @napis: bitmap of napi instances on this core
402 * @execs: bitmap of execution contexts on this core
403 * @cluster_nxt: chain to link cores within the same cluster
404 *
405 * This structure represents a single entry in the napi cpu
406 * table. The table is part of struct qca_napi_data.
407 * This table is initialized by the init function, called while
408 * the first napi instance is being created, updated by hotplug
409 * notifier and when cpu affinity decisions are made (by throughput
410 * detection), and deleted when the last napi instance is removed.
411 */
412 struct qca_napi_cpu {
413 enum qca_napi_cpu_state state;
414 int core_id;
415 int cluster_id;
416 cpumask_t core_mask;
417 cpumask_t thread_mask;
418 unsigned int max_freq;
419 uint32_t napis;
420 uint32_t execs;
421 int cluster_nxt; /* index, not pointer */
422 };
423
424 /**
425 * struct qca_napi_data - collection of napi data for a single hif context
426 * @hif_softc: pointer to the hif context
427 * @lock: spinlock used in the event state machine
428 * @state: state variable used in the napi stat machine
429 * @ce_map: bit map indicating which ce's have napis running
430 * @exec_map: bit map of instantiated exec contexts
431 * @user_cpu_affin_mask: CPU affinity mask from INI config.
432 * @napis:
433 * @napi_cpu: cpu info for irq affinity
434 * @lilcl_head:
435 * @bigcl_head:
436 * @napi_mode: irq affinity & clock voting mode
437 * @cpuhp_handler: CPU hotplug event registration handle
438 * @flags:
439 */
440 struct qca_napi_data {
441 struct hif_softc *hif_softc;
442 qdf_spinlock_t lock;
443 uint32_t state;
444
445 /* bitmap of created/registered NAPI instances, indexed by pipe_id,
446 * not used by clients (clients use an id returned by create)
447 */
448 uint32_t ce_map;
449 uint32_t exec_map;
450 uint32_t user_cpu_affin_mask;
451 struct qca_napi_info *napis[CE_COUNT_MAX];
452 struct qca_napi_cpu napi_cpu[NR_CPUS];
453 int lilcl_head, bigcl_head;
454 enum qca_napi_tput_state napi_mode;
455 struct qdf_cpuhp_handler *cpuhp_handler;
456 uint8_t flags;
457 };
458
459 /**
460 * struct hif_config_info - Place Holder for HIF configuration
461 * @enable_self_recovery: Self Recovery
462 * @enable_runtime_pm: Enable Runtime PM
463 * @runtime_pm_delay: Runtime PM Delay
464 * @rx_softirq_max_yield_duration_ns: Max Yield time duration for RX Softirq
465 * @enable_ce_dp_irq_affine: Enable affinity for CE DP IRQs
466 *
467 * Structure for holding HIF ini parameters.
468 */
469 struct hif_config_info {
470 bool enable_self_recovery;
471 #ifdef FEATURE_RUNTIME_PM
472 uint8_t enable_runtime_pm;
473 u_int32_t runtime_pm_delay;
474 #endif
475 uint64_t rx_softirq_max_yield_duration_ns;
476 #ifdef FEATURE_ENABLE_CE_DP_IRQ_AFFINE
477 bool enable_ce_dp_irq_affine;
478 #endif
479 };
480
481 /**
482 * struct hif_target_info - Target Information
483 * @target_version: Target Version
484 * @target_type: Target Type
485 * @target_revision: Target Revision
486 * @soc_version: SOC Version
487 * @hw_name: pointer to hardware name
488 *
489 * Structure to hold target information.
490 */
491 struct hif_target_info {
492 uint32_t target_version;
493 uint32_t target_type;
494 uint32_t target_revision;
495 uint32_t soc_version;
496 char *hw_name;
497 };
498
499 struct hif_opaque_softc {
500 };
501
502 /**
503 * struct hif_ce_ring_info - CE ring information
504 * @ring_id: ring id
505 * @ring_dir: ring direction
506 * @num_entries: number of entries in ring
507 * @entry_size: ring entry size
508 * @ring_base_paddr: srng base physical address
509 * @hp_paddr: head pointer physical address
510 * @tp_paddr: tail pointer physical address
511 */
512 struct hif_ce_ring_info {
513 uint8_t ring_id;
514 uint8_t ring_dir;
515 uint32_t num_entries;
516 uint32_t entry_size;
517 uint64_t ring_base_paddr;
518 uint64_t hp_paddr;
519 uint64_t tp_paddr;
520 };
521
522 /**
523 * struct hif_direct_link_ce_info - Direct Link CE information
524 * @ce_id: CE ide
525 * @pipe_dir: Pipe direction
526 * @ring_info: ring information
527 */
528 struct hif_direct_link_ce_info {
529 uint8_t ce_id;
530 uint8_t pipe_dir;
531 struct hif_ce_ring_info ring_info;
532 };
533
534 /**
535 * enum hif_event_type - Type of DP events to be recorded
536 * @HIF_EVENT_IRQ_TRIGGER: IRQ trigger event
537 * @HIF_EVENT_TIMER_ENTRY: Monitor Timer entry event
538 * @HIF_EVENT_TIMER_EXIT: Monitor Timer exit event
539 * @HIF_EVENT_BH_SCHED: NAPI POLL scheduled event
540 * @HIF_EVENT_SRNG_ACCESS_START: hal ring access start event
541 * @HIF_EVENT_SRNG_ACCESS_END: hal ring access end event
542 * @HIF_EVENT_BH_COMPLETE: NAPI POLL completion event
543 * @HIF_EVENT_BH_FORCE_BREAK: NAPI POLL force break event
544 * @HIF_EVENT_IRQ_DISABLE_EXPIRED: IRQ disable expired event
545 */
546 enum hif_event_type {
547 HIF_EVENT_IRQ_TRIGGER,
548 HIF_EVENT_TIMER_ENTRY,
549 HIF_EVENT_TIMER_EXIT,
550 HIF_EVENT_BH_SCHED,
551 HIF_EVENT_SRNG_ACCESS_START,
552 HIF_EVENT_SRNG_ACCESS_END,
553 HIF_EVENT_BH_COMPLETE,
554 HIF_EVENT_BH_FORCE_BREAK,
555 HIF_EVENT_IRQ_DISABLE_EXPIRED,
556 /* Do check hif_hist_skip_event_record when adding new events */
557 };
558
559 /**
560 * enum hif_system_pm_state - System PM state
561 * @HIF_SYSTEM_PM_STATE_ON: System in active state
562 * @HIF_SYSTEM_PM_STATE_BUS_RESUMING: bus resume in progress as part of
563 * system resume
564 * @HIF_SYSTEM_PM_STATE_BUS_SUSPENDING: bus suspend in progress as part of
565 * system suspend
566 * @HIF_SYSTEM_PM_STATE_BUS_SUSPENDED: bus suspended as part of system suspend
567 */
568 enum hif_system_pm_state {
569 HIF_SYSTEM_PM_STATE_ON,
570 HIF_SYSTEM_PM_STATE_BUS_RESUMING,
571 HIF_SYSTEM_PM_STATE_BUS_SUSPENDING,
572 HIF_SYSTEM_PM_STATE_BUS_SUSPENDED,
573 };
574
575 #ifdef WLAN_FEATURE_DP_EVENT_HISTORY
576 #define HIF_NUM_INT_CONTEXTS HIF_MAX_GROUP
577
578 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
579 /* HIF_EVENT_HIST_MAX should always be power of 2 */
580 #define HIF_EVENT_HIST_MAX 512
581
582 #define HIF_EVENT_HIST_ENABLE_MASK 0xFF
583
hif_get_log_timestamp(void)584 static inline uint64_t hif_get_log_timestamp(void)
585 {
586 return qdf_get_log_timestamp();
587 }
588
589 #else
590
591 #define HIF_EVENT_HIST_MAX 32
592 /* Enable IRQ TRIGGER, NAPI SCHEDULE, SRNG ACCESS START */
593 #define HIF_EVENT_HIST_ENABLE_MASK 0x19
594
hif_get_log_timestamp(void)595 static inline uint64_t hif_get_log_timestamp(void)
596 {
597 return qdf_sched_clock();
598 }
599
600 #endif
601
602 /**
603 * struct hif_event_record - an entry of the DP event history
604 * @hal_ring_id: ring id for which event is recorded
605 * @hp: head pointer of the ring (may not be applicable for all events)
606 * @tp: tail pointer of the ring (may not be applicable for all events)
607 * @cpu_id: cpu id on which the event occurred
608 * @timestamp: timestamp when event occurred
609 * @type: type of the event
610 *
611 * This structure represents the information stored for every datapath
612 * event which is logged in the history.
613 */
614 struct hif_event_record {
615 uint8_t hal_ring_id;
616 uint32_t hp;
617 uint32_t tp;
618 int cpu_id;
619 uint64_t timestamp;
620 enum hif_event_type type;
621 };
622
623 /**
624 * struct hif_event_misc - history related misc info
625 * @last_irq_index: last irq event index in history
626 * @last_irq_ts: last irq timestamp
627 */
628 struct hif_event_misc {
629 int32_t last_irq_index;
630 uint64_t last_irq_ts;
631 };
632
633 #ifdef WLAN_FEATURE_AFFINITY_MGR
634 /**
635 * struct hif_cpu_affinity - CPU affinity mask info for IRQ
636 *
637 * @current_irq_mask: Current CPU mask set for IRQ
638 * @wlan_requested_mask: CPU mask requested by WLAN
639 * @walt_taken_mask: Current CPU taken by Audio
640 * @last_updated: Last time IRQ CPU affinity was updated
641 * @last_affined_away: Last time when IRQ was affined away
642 * @update_requested: IRQ affinity hint set requested by WLAN
643 * @irq: IRQ number
644 */
645 struct hif_cpu_affinity {
646 qdf_cpu_mask current_irq_mask;
647 qdf_cpu_mask wlan_requested_mask;
648 qdf_cpu_mask walt_taken_mask;
649 uint64_t last_updated;
650 uint64_t last_affined_away;
651 bool update_requested;
652 int irq;
653 };
654 #endif
655
656 /**
657 * struct hif_event_history - history for one interrupt group
658 * @index: index to store new event
659 * @misc: event misc information
660 * @event: event entry
661 *
662 * This structure represents the datapath history for one
663 * interrupt group.
664 */
665 struct hif_event_history {
666 qdf_atomic_t index;
667 struct hif_event_misc misc;
668 struct hif_event_record event[HIF_EVENT_HIST_MAX];
669 };
670
671 /**
672 * hif_desc_history_log_register() - Register hif_event_desc_history buffers
673 *
674 * Return: None
675 */
676 void hif_desc_history_log_register(void);
677
678 /**
679 * hif_desc_history_log_unregister() - Unregister hif_event_desc_history
680 *
681 * Return: None
682 */
683 void hif_desc_history_log_unregister(void);
684
685 /**
686 * hif_hist_record_event() - Record one datapath event in history
687 * @hif_ctx: HIF opaque context
688 * @event: DP event entry
689 * @intr_grp_id: interrupt group ID registered with hif
690 *
691 * Return: None
692 */
693 void hif_hist_record_event(struct hif_opaque_softc *hif_ctx,
694 struct hif_event_record *event,
695 uint8_t intr_grp_id);
696
697 /**
698 * hif_event_history_init() - Initialize SRNG event history buffers
699 * @hif_ctx: HIF opaque context
700 * @id: context group ID for which history is recorded
701 *
702 * Returns: None
703 */
704 void hif_event_history_init(struct hif_opaque_softc *hif_ctx, uint8_t id);
705
706 /**
707 * hif_event_history_deinit() - De-initialize SRNG event history buffers
708 * @hif_ctx: HIF opaque context
709 * @id: context group ID for which history is recorded
710 *
711 * Returns: None
712 */
713 void hif_event_history_deinit(struct hif_opaque_softc *hif_ctx, uint8_t id);
714
715 /**
716 * hif_record_event() - Wrapper function to form and record DP event
717 * @hif_ctx: HIF opaque context
718 * @intr_grp_id: interrupt group ID registered with hif
719 * @hal_ring_id: ring id for which event is recorded
720 * @hp: head pointer index of the srng
721 * @tp: tail pointer index of the srng
722 * @type: type of the event to be logged in history
723 *
724 * Return: None
725 */
hif_record_event(struct hif_opaque_softc * hif_ctx,uint8_t intr_grp_id,uint8_t hal_ring_id,uint32_t hp,uint32_t tp,enum hif_event_type type)726 static inline void hif_record_event(struct hif_opaque_softc *hif_ctx,
727 uint8_t intr_grp_id,
728 uint8_t hal_ring_id,
729 uint32_t hp,
730 uint32_t tp,
731 enum hif_event_type type)
732 {
733 struct hif_event_record event;
734
735 event.hal_ring_id = hal_ring_id;
736 event.hp = hp;
737 event.tp = tp;
738 event.type = type;
739
740 hif_hist_record_event(hif_ctx, &event, intr_grp_id);
741
742 return;
743 }
744
745 #else
hif_desc_history_log_register(void)746 static inline void hif_desc_history_log_register(void)
747 {
748 }
749
hif_desc_history_log_unregister(void)750 static inline void hif_desc_history_log_unregister(void)
751 {
752 }
753
hif_record_event(struct hif_opaque_softc * hif_ctx,uint8_t intr_grp_id,uint8_t hal_ring_id,uint32_t hp,uint32_t tp,enum hif_event_type type)754 static inline void hif_record_event(struct hif_opaque_softc *hif_ctx,
755 uint8_t intr_grp_id,
756 uint8_t hal_ring_id,
757 uint32_t hp,
758 uint32_t tp,
759 enum hif_event_type type)
760 {
761 }
762
hif_event_history_init(struct hif_opaque_softc * hif_ctx,uint8_t id)763 static inline void hif_event_history_init(struct hif_opaque_softc *hif_ctx,
764 uint8_t id)
765 {
766 }
767
hif_event_history_deinit(struct hif_opaque_softc * hif_ctx,uint8_t id)768 static inline void hif_event_history_deinit(struct hif_opaque_softc *hif_ctx,
769 uint8_t id)
770 {
771 }
772 #endif /* WLAN_FEATURE_DP_EVENT_HISTORY */
773
774 void hif_display_ctrl_traffic_pipes_state(struct hif_opaque_softc *hif_ctx);
775
776 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
777 void hif_display_latest_desc_hist(struct hif_opaque_softc *hif_ctx);
778 #else
779 static
hif_display_latest_desc_hist(struct hif_opaque_softc * hif_ctx)780 inline void hif_display_latest_desc_hist(struct hif_opaque_softc *hif_ctx) {}
781 #endif
782
783 /**
784 * enum HIF_DEVICE_POWER_CHANGE_TYPE: Device Power change type
785 *
786 * @HIF_DEVICE_POWER_UP: HIF layer should power up interface and/or module
787 * @HIF_DEVICE_POWER_DOWN: HIF layer should initiate bus-specific measures to
788 * minimize power
789 * @HIF_DEVICE_POWER_CUT: HIF layer should initiate bus-specific AND/OR
790 * platform-specific measures to completely power-off
791 * the module and associated hardware (i.e. cut power
792 * supplies)
793 */
794 enum HIF_DEVICE_POWER_CHANGE_TYPE {
795 HIF_DEVICE_POWER_UP,
796 HIF_DEVICE_POWER_DOWN,
797 HIF_DEVICE_POWER_CUT
798 };
799
800 /**
801 * enum hif_enable_type: what triggered the enabling of hif
802 *
803 * @HIF_ENABLE_TYPE_PROBE: probe triggered enable
804 * @HIF_ENABLE_TYPE_REINIT: reinit triggered enable
805 * @HIF_ENABLE_TYPE_MAX: Max value
806 */
807 enum hif_enable_type {
808 HIF_ENABLE_TYPE_PROBE,
809 HIF_ENABLE_TYPE_REINIT,
810 HIF_ENABLE_TYPE_MAX
811 };
812
813 /**
814 * enum hif_disable_type: what triggered the disabling of hif
815 *
816 * @HIF_DISABLE_TYPE_PROBE_ERROR: probe error triggered disable
817 * @HIF_DISABLE_TYPE_REINIT_ERROR: reinit error triggered disable
818 * @HIF_DISABLE_TYPE_REMOVE: remove triggered disable
819 * @HIF_DISABLE_TYPE_SHUTDOWN: shutdown triggered disable
820 * @HIF_DISABLE_TYPE_MAX: Max value
821 */
822 enum hif_disable_type {
823 HIF_DISABLE_TYPE_PROBE_ERROR,
824 HIF_DISABLE_TYPE_REINIT_ERROR,
825 HIF_DISABLE_TYPE_REMOVE,
826 HIF_DISABLE_TYPE_SHUTDOWN,
827 HIF_DISABLE_TYPE_MAX
828 };
829
830 /**
831 * enum hif_device_config_opcode: configure mode
832 *
833 * @HIF_DEVICE_POWER_STATE: device power state
834 * @HIF_DEVICE_GET_BLOCK_SIZE: get block size
835 * @HIF_DEVICE_GET_FIFO_ADDR: get block address
836 * @HIF_DEVICE_GET_PENDING_EVENTS_FUNC: get pending events functions
837 * @HIF_DEVICE_GET_IRQ_PROC_MODE: get irq proc mode
838 * @HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC: receive event function
839 * @HIF_DEVICE_POWER_STATE_CHANGE: change power state
840 * @HIF_DEVICE_GET_IRQ_YIELD_PARAMS: get yield params
841 * @HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT: configure scatter request
842 * @HIF_DEVICE_GET_OS_DEVICE: get OS device
843 * @HIF_DEVICE_DEBUG_BUS_STATE: debug bus state
844 * @HIF_BMI_DONE: bmi done
845 * @HIF_DEVICE_SET_TARGET_TYPE: set target type
846 * @HIF_DEVICE_SET_HTC_CONTEXT: set htc context
847 * @HIF_DEVICE_GET_HTC_CONTEXT: get htc context
848 */
849 enum hif_device_config_opcode {
850 HIF_DEVICE_POWER_STATE = 0,
851 HIF_DEVICE_GET_BLOCK_SIZE,
852 HIF_DEVICE_GET_FIFO_ADDR,
853 HIF_DEVICE_GET_PENDING_EVENTS_FUNC,
854 HIF_DEVICE_GET_IRQ_PROC_MODE,
855 HIF_DEVICE_GET_RECV_EVENT_MASK_UNMASK_FUNC,
856 HIF_DEVICE_POWER_STATE_CHANGE,
857 HIF_DEVICE_GET_IRQ_YIELD_PARAMS,
858 HIF_CONFIGURE_QUERY_SCATTER_REQUEST_SUPPORT,
859 HIF_DEVICE_GET_OS_DEVICE,
860 HIF_DEVICE_DEBUG_BUS_STATE,
861 HIF_BMI_DONE,
862 HIF_DEVICE_SET_TARGET_TYPE,
863 HIF_DEVICE_SET_HTC_CONTEXT,
864 HIF_DEVICE_GET_HTC_CONTEXT,
865 };
866
867 #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
868 struct HID_ACCESS_LOG {
869 uint32_t seqnum;
870 bool is_write;
871 void *addr;
872 uint32_t value;
873 };
874 #endif
875
876 void hif_reg_write(struct hif_opaque_softc *hif_ctx, uint32_t offset,
877 uint32_t value);
878 uint32_t hif_reg_read(struct hif_opaque_softc *hif_ctx, uint32_t offset);
879
880 #define HIF_MAX_DEVICES 1
881 /**
882 * struct htc_callbacks - Structure for HTC Callbacks methods
883 * @context: context to pass to the @dsr_handler
884 * note : @rw_compl_handler is provided the context
885 * passed to hif_read_write
886 * @rw_compl_handler: Read / write completion handler
887 * @dsr_handler: DSR Handler
888 */
889 struct htc_callbacks {
890 void *context;
891 QDF_STATUS(*rw_compl_handler)(void *rw_ctx, QDF_STATUS status);
892 QDF_STATUS(*dsr_handler)(void *context);
893 };
894
895 /**
896 * struct hif_driver_state_callbacks - Callbacks for HIF to query Driver state
897 * @context: Private data context
898 * @set_recovery_in_progress: To Set Driver state for recovery in progress
899 * @is_recovery_in_progress: Query if driver state is recovery in progress
900 * @is_load_unload_in_progress: Query if driver state Load/Unload in Progress
901 * @is_driver_unloading: Query if driver is unloading.
902 * @is_target_ready:
903 * @get_bandwidth_level: Query current bandwidth level for the driver
904 * @prealloc_get_consistent_mem_unaligned: get prealloc unaligned consistent mem
905 * @prealloc_put_consistent_mem_unaligned: put unaligned consistent mem to pool
906 * @prealloc_get_multi_pages: get prealloc multi pages memory
907 * @prealloc_put_multi_pages: put prealloc multi pages memory back to pool
908 * This Structure provides callback pointer for HIF to query hdd for driver
909 * states.
910 */
911 struct hif_driver_state_callbacks {
912 void *context;
913 void (*set_recovery_in_progress)(void *context, uint8_t val);
914 bool (*is_recovery_in_progress)(void *context);
915 bool (*is_load_unload_in_progress)(void *context);
916 bool (*is_driver_unloading)(void *context);
917 bool (*is_target_ready)(void *context);
918 int (*get_bandwidth_level)(void *context);
919 void *(*prealloc_get_consistent_mem_unaligned)(qdf_size_t size,
920 qdf_dma_addr_t *paddr,
921 uint32_t ring_type);
922 void (*prealloc_put_consistent_mem_unaligned)(void *vaddr);
923 void (*prealloc_get_multi_pages)(uint32_t desc_type,
924 qdf_size_t elem_size,
925 uint16_t elem_num,
926 struct qdf_mem_multi_page_t *pages,
927 bool cacheable);
928 void (*prealloc_put_multi_pages)(uint32_t desc_type,
929 struct qdf_mem_multi_page_t *pages);
930 };
931
932 /* This API detaches the HTC layer from the HIF device */
933 void hif_detach_htc(struct hif_opaque_softc *hif_ctx);
934
935 /****************************************************************/
936 /* BMI and Diag window abstraction */
937 /****************************************************************/
938
939 #define HIF_BMI_EXCHANGE_NO_TIMEOUT ((uint32_t)(0))
940
941 #define DIAG_TRANSFER_LIMIT 2048U /* maximum number of bytes that can be
942 * handled atomically by
943 * DiagRead/DiagWrite
944 */
945
946 #ifdef WLAN_FEATURE_BMI
947 /*
948 * API to handle HIF-specific BMI message exchanges, this API is synchronous
949 * and only allowed to be called from a context that can block (sleep)
950 */
951 QDF_STATUS hif_exchange_bmi_msg(struct hif_opaque_softc *hif_ctx,
952 qdf_dma_addr_t cmd, qdf_dma_addr_t rsp,
953 uint8_t *pSendMessage, uint32_t Length,
954 uint8_t *pResponseMessage,
955 uint32_t *pResponseLength, uint32_t TimeoutMS);
956 void hif_register_bmi_callbacks(struct hif_opaque_softc *hif_ctx);
957 bool hif_needs_bmi(struct hif_opaque_softc *hif_ctx);
958 #else /* WLAN_FEATURE_BMI */
959 static inline void
hif_register_bmi_callbacks(struct hif_opaque_softc * hif_ctx)960 hif_register_bmi_callbacks(struct hif_opaque_softc *hif_ctx)
961 {
962 }
963
964 static inline bool
hif_needs_bmi(struct hif_opaque_softc * hif_ctx)965 hif_needs_bmi(struct hif_opaque_softc *hif_ctx)
966 {
967 return false;
968 }
969 #endif /* WLAN_FEATURE_BMI */
970
971 #ifdef HIF_CPU_CLEAR_AFFINITY
972 /**
973 * hif_config_irq_clear_cpu_affinity() - Remove cpu affinity of IRQ
974 * @scn: HIF handle
975 * @intr_ctxt_id: interrupt group index
976 * @cpu: CPU core to clear
977 *
978 * Return: None
979 */
980 void hif_config_irq_clear_cpu_affinity(struct hif_opaque_softc *scn,
981 int intr_ctxt_id, int cpu);
982 #else
983 static inline
hif_config_irq_clear_cpu_affinity(struct hif_opaque_softc * scn,int intr_ctxt_id,int cpu)984 void hif_config_irq_clear_cpu_affinity(struct hif_opaque_softc *scn,
985 int intr_ctxt_id, int cpu)
986 {
987 }
988 #endif
989
990 /*
991 * APIs to handle HIF specific diagnostic read accesses. These APIs are
992 * synchronous and only allowed to be called from a context that
993 * can block (sleep). They are not high performance APIs.
994 *
995 * hif_diag_read_access reads a 4 Byte aligned/length value from a
996 * Target register or memory word.
997 *
998 * hif_diag_read_mem reads an arbitrary length of arbitrarily aligned memory.
999 */
1000 QDF_STATUS hif_diag_read_access(struct hif_opaque_softc *hif_ctx,
1001 uint32_t address, uint32_t *data);
1002 QDF_STATUS hif_diag_read_mem(struct hif_opaque_softc *hif_ctx, uint32_t address,
1003 uint8_t *data, int nbytes);
1004 void hif_dump_target_memory(struct hif_opaque_softc *hif_ctx,
1005 void *ramdump_base, uint32_t address, uint32_t size);
1006 /*
1007 * APIs to handle HIF specific diagnostic write accesses. These APIs are
1008 * synchronous and only allowed to be called from a context that
1009 * can block (sleep).
1010 * They are not high performance APIs.
1011 *
1012 * hif_diag_write_access writes a 4 Byte aligned/length value to a
1013 * Target register or memory word.
1014 *
1015 * hif_diag_write_mem writes an arbitrary length of arbitrarily aligned memory.
1016 */
1017 QDF_STATUS hif_diag_write_access(struct hif_opaque_softc *hif_ctx,
1018 uint32_t address, uint32_t data);
1019 QDF_STATUS hif_diag_write_mem(struct hif_opaque_softc *hif_ctx,
1020 uint32_t address, uint8_t *data, int nbytes);
1021
1022 typedef void (*fastpath_msg_handler)(void *, qdf_nbuf_t *, uint32_t);
1023
1024 void hif_enable_polled_mode(struct hif_opaque_softc *hif_ctx);
1025 bool hif_is_polled_mode_enabled(struct hif_opaque_softc *hif_ctx);
1026
1027 /*
1028 * Set the FASTPATH_mode_on flag in sc, for use by data path
1029 */
1030 #ifdef WLAN_FEATURE_FASTPATH
1031 void hif_enable_fastpath(struct hif_opaque_softc *hif_ctx);
1032 bool hif_is_fastpath_mode_enabled(struct hif_opaque_softc *hif_ctx);
1033 void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int ret);
1034
1035 /**
1036 * hif_ce_fastpath_cb_register() - Register callback for fastpath msg handler
1037 * @hif_ctx: HIF opaque context
1038 * @handler: Callback function
1039 * @context: handle for callback function
1040 *
1041 * Return: QDF_STATUS_SUCCESS on success or QDF_STATUS_E_FAILURE
1042 */
1043 QDF_STATUS hif_ce_fastpath_cb_register(
1044 struct hif_opaque_softc *hif_ctx,
1045 fastpath_msg_handler handler, void *context);
1046 #else
hif_ce_fastpath_cb_register(struct hif_opaque_softc * hif_ctx,fastpath_msg_handler handler,void * context)1047 static inline QDF_STATUS hif_ce_fastpath_cb_register(
1048 struct hif_opaque_softc *hif_ctx,
1049 fastpath_msg_handler handler, void *context)
1050 {
1051 return QDF_STATUS_E_FAILURE;
1052 }
1053
hif_get_ce_handle(struct hif_opaque_softc * hif_ctx,int ret)1054 static inline void *hif_get_ce_handle(struct hif_opaque_softc *hif_ctx, int ret)
1055 {
1056 return NULL;
1057 }
1058
1059 #endif
1060
1061 /*
1062 * Enable/disable CDC max performance workaround
1063 * For max-performance set this to 0
1064 * To allow SoC to enter sleep set this to 1
1065 */
1066 #define CONFIG_DISABLE_CDC_MAX_PERF_WAR 0
1067
1068 void hif_ipa_get_ce_resource(struct hif_opaque_softc *hif_ctx,
1069 qdf_shared_mem_t **ce_sr,
1070 uint32_t *ce_sr_ring_size,
1071 qdf_dma_addr_t *ce_reg_paddr);
1072
1073 /**
1074 * struct hif_msg_callbacks - List of callbacks - filled in by HTC.
1075 * @Context: context meaningful to HTC
1076 * @txCompletionHandler:
1077 * @rxCompletionHandler:
1078 * @txResourceAvailHandler:
1079 * @fwEventHandler:
1080 * @update_bundle_stats:
1081 */
1082 struct hif_msg_callbacks {
1083 void *Context;
1084 /**< context meaningful to HTC */
1085 QDF_STATUS (*txCompletionHandler)(void *Context, qdf_nbuf_t wbuf,
1086 uint32_t transferID,
1087 uint32_t toeplitz_hash_result);
1088 QDF_STATUS (*rxCompletionHandler)(void *Context, qdf_nbuf_t wbuf,
1089 uint8_t pipeID);
1090 void (*txResourceAvailHandler)(void *context, uint8_t pipe);
1091 void (*fwEventHandler)(void *context, QDF_STATUS status);
1092 void (*update_bundle_stats)(void *context, uint8_t no_of_pkt_in_bundle);
1093 };
1094
1095 enum hif_target_status {
1096 TARGET_STATUS_CONNECTED = 0, /* target connected */
1097 TARGET_STATUS_RESET, /* target got reset */
1098 TARGET_STATUS_EJECT, /* target got ejected */
1099 TARGET_STATUS_SUSPEND /*target got suspend */
1100 };
1101
1102 /**
1103 * enum hif_attribute_flags: configure hif
1104 *
1105 * @HIF_LOWDESC_CE_CFG: Configure HIF with Low descriptor CE
1106 * @HIF_LOWDESC_CE_NO_PKTLOG_CFG: Configure HIF with Low descriptor
1107 * + No pktlog CE
1108 */
1109 enum hif_attribute_flags {
1110 HIF_LOWDESC_CE_CFG = 1,
1111 HIF_LOWDESC_CE_NO_PKTLOG_CFG
1112 };
1113
1114 #define HIF_DATA_ATTR_SET_TX_CLASSIFY(attr, v) \
1115 (attr |= (v & 0x01) << 5)
1116 #define HIF_DATA_ATTR_SET_ENCAPSULATION_TYPE(attr, v) \
1117 (attr |= (v & 0x03) << 6)
1118 #define HIF_DATA_ATTR_SET_ADDR_X_SEARCH_DISABLE(attr, v) \
1119 (attr |= (v & 0x01) << 13)
1120 #define HIF_DATA_ATTR_SET_ADDR_Y_SEARCH_DISABLE(attr, v) \
1121 (attr |= (v & 0x01) << 14)
1122 #define HIF_DATA_ATTR_SET_TOEPLITZ_HASH_ENABLE(attr, v) \
1123 (attr |= (v & 0x01) << 15)
1124 #define HIF_DATA_ATTR_SET_PACKET_OR_RESULT_OFFSET(attr, v) \
1125 (attr |= (v & 0x0FFF) << 16)
1126 #define HIF_DATA_ATTR_SET_ENABLE_11H(attr, v) \
1127 (attr |= (v & 0x01) << 30)
1128
1129 struct hif_ul_pipe_info {
1130 unsigned int nentries;
1131 unsigned int nentries_mask;
1132 unsigned int sw_index;
1133 unsigned int write_index; /* cached copy */
1134 unsigned int hw_index; /* cached copy */
1135 void *base_addr_owner_space; /* Host address space */
1136 qdf_dma_addr_t base_addr_CE_space; /* CE address space */
1137 };
1138
1139 struct hif_dl_pipe_info {
1140 unsigned int nentries;
1141 unsigned int nentries_mask;
1142 unsigned int sw_index;
1143 unsigned int write_index; /* cached copy */
1144 unsigned int hw_index; /* cached copy */
1145 void *base_addr_owner_space; /* Host address space */
1146 qdf_dma_addr_t base_addr_CE_space; /* CE address space */
1147 };
1148
1149 struct hif_pipe_addl_info {
1150 uint32_t pci_mem;
1151 uint32_t ctrl_addr;
1152 struct hif_ul_pipe_info ul_pipe;
1153 struct hif_dl_pipe_info dl_pipe;
1154 };
1155
1156 #ifdef CONFIG_SLUB_DEBUG_ON
1157 #define MSG_FLUSH_NUM 16
1158 #else /* PERF build */
1159 #define MSG_FLUSH_NUM 32
1160 #endif /* SLUB_DEBUG_ON */
1161
1162 struct hif_bus_id;
1163
1164 #ifdef CUSTOM_CB_SCHEDULER_SUPPORT
1165 /**
1166 * hif_register_ce_custom_cb() - Helper API to register the custom callback
1167 * @hif_ctx: HIF opaque context
1168 * @pipe: Pipe number
1169 * @custom_cb: Custom call back function pointer
1170 * @custom_cb_context: Custom callback context
1171 *
1172 * return: QDF_STATUS
1173 */
1174 QDF_STATUS
1175 hif_register_ce_custom_cb(struct hif_opaque_softc *hif_ctx, uint8_t pipe,
1176 void (*custom_cb)(void *), void *custom_cb_context);
1177
1178 /**
1179 * hif_unregister_ce_custom_cb() - Helper API to unregister the custom callback
1180 * @hif_ctx: HIF opaque context
1181 * @pipe: Pipe number
1182 *
1183 * return: QDF_STATUS
1184 */
1185 QDF_STATUS
1186 hif_unregister_ce_custom_cb(struct hif_opaque_softc *hif_ctx, uint8_t pipe);
1187
1188 /**
1189 * hif_enable_ce_custom_cb() - Helper API to enable the custom callback
1190 * @hif_ctx: HIF opaque context
1191 * @pipe: Pipe number
1192 *
1193 * return: QDF_STATUS
1194 */
1195 QDF_STATUS
1196 hif_enable_ce_custom_cb(struct hif_opaque_softc *hif_ctx, uint8_t pipe);
1197
1198 /**
1199 * hif_disable_ce_custom_cb() - Helper API to disable the custom callback
1200 * @hif_ctx: HIF opaque context
1201 * @pipe: Pipe number
1202 *
1203 * return: QDF_STATUS
1204 */
1205 QDF_STATUS
1206 hif_disable_ce_custom_cb(struct hif_opaque_softc *hif_ctx, uint8_t pipe);
1207 #endif /* CUSTOM_CB_SCHEDULER_SUPPORT */
1208
1209 void hif_claim_device(struct hif_opaque_softc *hif_ctx);
1210 QDF_STATUS hif_get_config_item(struct hif_opaque_softc *hif_ctx,
1211 int opcode, void *config, uint32_t config_len);
1212 void hif_set_mailbox_swap(struct hif_opaque_softc *hif_ctx);
1213 void hif_mask_interrupt_call(struct hif_opaque_softc *hif_ctx);
1214 void hif_post_init(struct hif_opaque_softc *hif_ctx, void *hHTC,
1215 struct hif_msg_callbacks *callbacks);
1216 QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx);
1217 void hif_stop(struct hif_opaque_softc *hif_ctx);
1218 void hif_flush_surprise_remove(struct hif_opaque_softc *hif_ctx);
1219 void hif_dump(struct hif_opaque_softc *hif_ctx, uint8_t CmdId, bool start);
1220 void hif_trigger_dump(struct hif_opaque_softc *hif_ctx,
1221 uint8_t cmd_id, bool start);
1222
1223 QDF_STATUS hif_send_head(struct hif_opaque_softc *hif_ctx, uint8_t PipeID,
1224 uint32_t transferID, uint32_t nbytes,
1225 qdf_nbuf_t wbuf, uint32_t data_attr);
1226 void hif_send_complete_check(struct hif_opaque_softc *hif_ctx, uint8_t PipeID,
1227 int force);
1228 void hif_schedule_ce_tasklet(struct hif_opaque_softc *hif_ctx, uint8_t PipeID);
1229 void hif_shut_down_device(struct hif_opaque_softc *hif_ctx);
1230 void hif_get_default_pipe(struct hif_opaque_softc *hif_ctx, uint8_t *ULPipe,
1231 uint8_t *DLPipe);
1232 int hif_map_service_to_pipe(struct hif_opaque_softc *hif_ctx, uint16_t svc_id,
1233 uint8_t *ul_pipe, uint8_t *dl_pipe, int *ul_is_polled,
1234 int *dl_is_polled);
1235 uint16_t
1236 hif_get_free_queue_number(struct hif_opaque_softc *hif_ctx, uint8_t PipeID);
1237 void *hif_get_targetdef(struct hif_opaque_softc *hif_ctx);
1238 uint32_t hif_hia_item_address(uint32_t target_type, uint32_t item_offset);
1239 void hif_set_target_sleep(struct hif_opaque_softc *hif_ctx, bool sleep_ok,
1240 bool wait_for_it);
1241 int hif_check_fw_reg(struct hif_opaque_softc *hif_ctx);
1242 #ifndef HIF_PCI
hif_check_soc_status(struct hif_opaque_softc * hif_ctx)1243 static inline int hif_check_soc_status(struct hif_opaque_softc *hif_ctx)
1244 {
1245 return 0;
1246 }
1247 #else
1248 int hif_check_soc_status(struct hif_opaque_softc *hif_ctx);
1249 #endif
1250 void hif_get_hw_info(struct hif_opaque_softc *hif_ctx, u32 *version,
1251 u32 *revision, const char **target_name);
1252
1253 #ifdef RECEIVE_OFFLOAD
1254 /**
1255 * hif_offld_flush_cb_register() - Register the offld flush callback
1256 * @scn: HIF opaque context
1257 * @offld_flush_handler: Flush callback is either ol_flush, incase of rx_thread
1258 * Or GRO/LRO flush when RxThread is not enabled. Called
1259 * with corresponding context for flush.
1260 * Return: None
1261 */
1262 void hif_offld_flush_cb_register(struct hif_opaque_softc *scn,
1263 void (offld_flush_handler)(void *ol_ctx));
1264
1265 /**
1266 * hif_offld_flush_cb_deregister() - deRegister the offld flush callback
1267 * @scn: HIF opaque context
1268 *
1269 * Return: None
1270 */
1271 void hif_offld_flush_cb_deregister(struct hif_opaque_softc *scn);
1272 #endif
1273
1274 #ifdef WLAN_FEATURE_RX_SOFTIRQ_TIME_LIMIT
1275 /**
1276 * hif_exec_should_yield() - Check if hif napi context should yield
1277 * @hif_ctx: HIF opaque context
1278 * @grp_id: grp_id of the napi for which check needs to be done
1279 *
1280 * The function uses grp_id to look for NAPI and checks if NAPI needs to
1281 * yield. HIF_EXT_GROUP_MAX_YIELD_DURATION_NS is the duration used for
1282 * yield decision.
1283 *
1284 * Return: true if NAPI needs to yield, else false
1285 */
1286 bool hif_exec_should_yield(struct hif_opaque_softc *hif_ctx, uint grp_id);
1287 #else
hif_exec_should_yield(struct hif_opaque_softc * hif_ctx,uint grp_id)1288 static inline bool hif_exec_should_yield(struct hif_opaque_softc *hif_ctx,
1289 uint grp_id)
1290 {
1291 return false;
1292 }
1293 #endif
1294
1295 void hif_disable_isr(struct hif_opaque_softc *hif_ctx);
1296 void hif_reset_soc(struct hif_opaque_softc *hif_ctx);
1297 void hif_save_htc_htt_config_endpoint(struct hif_opaque_softc *hif_ctx,
1298 int htc_htt_tx_endpoint);
1299
1300 /**
1301 * hif_open() - Create hif handle
1302 * @qdf_ctx: qdf context
1303 * @mode: Driver Mode
1304 * @bus_type: Bus Type
1305 * @cbk: CDS Callbacks
1306 * @psoc: psoc object manager
1307 *
1308 * API to open HIF Context
1309 *
1310 * Return: HIF Opaque Pointer
1311 */
1312 struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx,
1313 uint32_t mode,
1314 enum qdf_bus_type bus_type,
1315 struct hif_driver_state_callbacks *cbk,
1316 struct wlan_objmgr_psoc *psoc);
1317
1318 /**
1319 * hif_init_dma_mask() - Set dma mask for the dev
1320 * @dev: dev for which DMA mask is to be set
1321 * @bus_type: bus type for the target
1322 *
1323 * This API sets the DMA mask for the device. before the datapath
1324 * memory pre-allocation is done. If the DMA mask is not set before
1325 * requesting the DMA memory, kernel defaults to a 32-bit DMA mask,
1326 * and does not utilize the full device capability.
1327 *
1328 * Return: 0 - success, non-zero on failure.
1329 */
1330 int hif_init_dma_mask(struct device *dev, enum qdf_bus_type bus_type);
1331 void hif_close(struct hif_opaque_softc *hif_ctx);
1332 QDF_STATUS hif_enable(struct hif_opaque_softc *hif_ctx, struct device *dev,
1333 void *bdev, const struct hif_bus_id *bid,
1334 enum qdf_bus_type bus_type,
1335 enum hif_enable_type type);
1336 void hif_disable(struct hif_opaque_softc *hif_ctx, enum hif_disable_type type);
1337 #ifdef CE_TASKLET_DEBUG_ENABLE
1338 void hif_enable_ce_latency_stats(struct hif_opaque_softc *hif_ctx,
1339 uint8_t value);
1340 #endif
1341 void hif_display_stats(struct hif_opaque_softc *hif_ctx);
1342 void hif_clear_stats(struct hif_opaque_softc *hif_ctx);
1343
1344 /**
1345 * enum hif_pm_wake_irq_type - Wake interrupt type for Power Management
1346 * @HIF_PM_INVALID_WAKE: Wake irq is invalid or not configured
1347 * @HIF_PM_MSI_WAKE: Wake irq is MSI interrupt
1348 * @HIF_PM_CE_WAKE: Wake irq is CE interrupt
1349 */
1350 typedef enum {
1351 HIF_PM_INVALID_WAKE,
1352 HIF_PM_MSI_WAKE,
1353 HIF_PM_CE_WAKE,
1354 } hif_pm_wake_irq_type;
1355
1356 /**
1357 * hif_pm_get_wake_irq_type - Get wake irq type for Power Management
1358 * @hif_ctx: HIF context
1359 *
1360 * Return: enum hif_pm_wake_irq_type
1361 */
1362 hif_pm_wake_irq_type hif_pm_get_wake_irq_type(struct hif_opaque_softc *hif_ctx);
1363
1364 /**
1365 * enum hif_ep_vote_type - hif ep vote type
1366 * @HIF_EP_VOTE_DP_ACCESS: vote type is specific DP
1367 * @HIF_EP_VOTE_NONDP_ACCESS: ep vote for over all access
1368 */
1369 enum hif_ep_vote_type {
1370 HIF_EP_VOTE_DP_ACCESS,
1371 HIF_EP_VOTE_NONDP_ACCESS
1372 };
1373
1374 /**
1375 * enum hif_ep_vote_access - hif ep vote access
1376 * @HIF_EP_VOTE_ACCESS_ENABLE: Enable ep voting
1377 * @HIF_EP_VOTE_INTERMEDIATE_ACCESS: allow during transition
1378 * @HIF_EP_VOTE_ACCESS_DISABLE: disable ep voting
1379 */
1380 enum hif_ep_vote_access {
1381 HIF_EP_VOTE_ACCESS_ENABLE,
1382 HIF_EP_VOTE_INTERMEDIATE_ACCESS,
1383 HIF_EP_VOTE_ACCESS_DISABLE
1384 };
1385
1386 /**
1387 * enum hif_rtpm_client_id - modules registered with runtime pm module
1388 * @HIF_RTPM_ID_RESERVED: Reserved ID
1389 * @HIF_RTPM_ID_HAL_REO_CMD: HAL REO commands
1390 * @HIF_RTPM_ID_WMI: WMI commands Tx
1391 * @HIF_RTPM_ID_HTT: HTT commands Tx
1392 * @HIF_RTPM_ID_DP: Datapath Tx path
1393 * @HIF_RTPM_ID_DP_RING_STATS: Datapath ring stats
1394 * @HIF_RTPM_ID_CE: CE Tx buffer posting
1395 * @HIF_RTPM_ID_FORCE_WAKE: Force wake request
1396 * @HIF_RTPM_ID_PM_QOS_NOTIFY:
1397 * @HIF_RTPM_ID_WIPHY_SUSPEND:
1398 * @HIF_RTPM_ID_MAX: Max id
1399 */
1400 enum hif_rtpm_client_id {
1401 HIF_RTPM_ID_RESERVED,
1402 HIF_RTPM_ID_HAL_REO_CMD,
1403 HIF_RTPM_ID_WMI,
1404 HIF_RTPM_ID_HTT,
1405 HIF_RTPM_ID_DP,
1406 HIF_RTPM_ID_DP_RING_STATS,
1407 HIF_RTPM_ID_CE,
1408 HIF_RTPM_ID_FORCE_WAKE,
1409 HIF_RTPM_ID_PM_QOS_NOTIFY,
1410 HIF_RTPM_ID_WIPHY_SUSPEND,
1411 HIF_RTPM_ID_MAX
1412 };
1413
1414 /**
1415 * enum rpm_type - Get and Put calls types
1416 * @HIF_RTPM_GET_ASYNC: Increment usage count and when system is suspended
1417 * schedule resume process, return depends on pm state.
1418 * @HIF_RTPM_GET_FORCE: Increment usage count and when system is suspended
1419 * schedule resume process, returns success irrespective of
1420 * pm_state.
1421 * @HIF_RTPM_GET_SYNC: Increment usage count and when system is suspended,
1422 * wait till process is resumed.
1423 * @HIF_RTPM_GET_NORESUME: Only increments usage count.
1424 * @HIF_RTPM_PUT_ASYNC: Decrements usage count and puts system in idle state.
1425 * @HIF_RTPM_PUT_SYNC_SUSPEND: Decrements usage count and puts system in
1426 * suspended state.
1427 * @HIF_RTPM_PUT_NOIDLE: Decrements usage count.
1428 */
1429 enum rpm_type {
1430 HIF_RTPM_GET_ASYNC,
1431 HIF_RTPM_GET_FORCE,
1432 HIF_RTPM_GET_SYNC,
1433 HIF_RTPM_GET_NORESUME,
1434 HIF_RTPM_PUT_ASYNC,
1435 HIF_RTPM_PUT_SYNC_SUSPEND,
1436 HIF_RTPM_PUT_NOIDLE,
1437 };
1438
1439 /**
1440 * struct hif_pm_runtime_lock - data structure for preventing runtime suspend
1441 * @list: global list of runtime locks
1442 * @active: true if this lock is preventing suspend
1443 * @name: character string for tracking this lock
1444 */
1445 struct hif_pm_runtime_lock {
1446 struct list_head list;
1447 bool active;
1448 const char *name;
1449 };
1450
1451 #ifdef FEATURE_RUNTIME_PM
1452 /**
1453 * hif_rtpm_register() - Register a module with runtime PM.
1454 * @id: ID of the module which needs to be registered
1455 * @hif_rpm_cbk: callback to be called when get was called in suspended state.
1456 *
1457 * Return: success status if successfully registered
1458 */
1459 QDF_STATUS hif_rtpm_register(uint32_t id, void (*hif_rpm_cbk)(void));
1460
1461 /**
1462 * hif_rtpm_deregister() - Deregister the module
1463 * @id: ID of the module which needs to be de-registered
1464 */
1465 QDF_STATUS hif_rtpm_deregister(uint32_t id);
1466
1467 /**
1468 * hif_rtpm_set_autosuspend_delay() - Set delay to trigger RTPM suspend
1469 * @delay: delay in ms to be set
1470 *
1471 * Return: Success if delay is set successfully
1472 */
1473 QDF_STATUS hif_rtpm_set_autosuspend_delay(int delay);
1474
1475 /**
1476 * hif_rtpm_restore_autosuspend_delay() - Restore delay value to default value
1477 *
1478 * Return: Success if reset done. E_ALREADY if delay same as config value
1479 */
1480 QDF_STATUS hif_rtpm_restore_autosuspend_delay(void);
1481
1482 /**
1483 * hif_rtpm_get_autosuspend_delay() -Get delay to trigger RTPM suspend
1484 *
1485 * Return: Delay in ms
1486 */
1487 int hif_rtpm_get_autosuspend_delay(void);
1488
1489 /**
1490 * hif_runtime_lock_init() - API to initialize Runtime PM context
1491 * @lock: QDF lock context
1492 * @name: Context name
1493 *
1494 * This API initializes the Runtime PM context of the caller and
1495 * return the pointer.
1496 *
1497 * Return: None
1498 */
1499 int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name);
1500
1501 /**
1502 * hif_runtime_lock_deinit() - This API frees the runtime pm context
1503 * @data: Runtime PM context
1504 *
1505 * Return: void
1506 */
1507 void hif_runtime_lock_deinit(struct hif_pm_runtime_lock *data);
1508
1509 /**
1510 * hif_rtpm_get() - Increment usage_count on the device to avoid suspend.
1511 * @type: get call types from hif_rpm_type
1512 * @id: ID of the module calling get()
1513 *
1514 * A get operation will prevent a runtime suspend until a
1515 * corresponding put is done. This api should be used when accessing bus.
1516 *
1517 * CONTRARY TO THE REGULAR RUNTIME PM, WHEN THE BUS IS SUSPENDED,
1518 * THIS API WILL ONLY REQUEST THE RESUME AND NOT DO A GET!!!
1519 *
1520 * return: success if a get has been issued, else error code.
1521 */
1522 QDF_STATUS hif_rtpm_get(uint8_t type, uint32_t id);
1523
1524 /**
1525 * hif_rtpm_put() - do a put operation on the device
1526 * @type: put call types from hif_rpm_type
1527 * @id: ID of the module calling put()
1528 *
1529 * A put operation will allow a runtime suspend after a corresponding
1530 * get was done. This api should be used when finished accessing bus.
1531 *
1532 * This api will return a failure if runtime pm is stopped
1533 * This api will return failure if it would decrement the usage count below 0.
1534 *
1535 * return: QDF_STATUS_SUCCESS if the put is performed
1536 */
1537 QDF_STATUS hif_rtpm_put(uint8_t type, uint32_t id);
1538
1539 /**
1540 * hif_pm_runtime_prevent_suspend() - Prevent Runtime suspend
1541 * @data: runtime PM lock
1542 *
1543 * This function will prevent runtime suspend, by incrementing
1544 * device's usage count.
1545 *
1546 * Return: status
1547 */
1548 int hif_pm_runtime_prevent_suspend(struct hif_pm_runtime_lock *data);
1549
1550 /**
1551 * hif_pm_runtime_prevent_suspend_sync() - Synchronized prevent Runtime suspend
1552 * @data: runtime PM lock
1553 *
1554 * This function will prevent runtime suspend, by incrementing
1555 * device's usage count.
1556 *
1557 * Return: status
1558 */
1559 int hif_pm_runtime_prevent_suspend_sync(struct hif_pm_runtime_lock *data);
1560
1561 /**
1562 * hif_pm_runtime_allow_suspend() - Allow Runtime suspend
1563 * @data: runtime PM lock
1564 *
1565 * This function will allow runtime suspend, by decrementing
1566 * device's usage count.
1567 *
1568 * Return: status
1569 */
1570 int hif_pm_runtime_allow_suspend(struct hif_pm_runtime_lock *data);
1571
1572 /**
1573 * hif_rtpm_request_resume() - Request resume if bus is suspended
1574 *
1575 * Return: None
1576 */
1577 void hif_rtpm_request_resume(void);
1578
1579 /**
1580 * hif_rtpm_sync_resume() - Invoke synchronous runtime resume.
1581 *
1582 * This function will invoke synchronous runtime resume.
1583 *
1584 * Return: status
1585 */
1586 QDF_STATUS hif_rtpm_sync_resume(void);
1587
1588 /**
1589 * hif_rtpm_check_and_request_resume() - check if bus is suspended and
1590 * request resume.
1591 * @suspend_in_progress: Request resume if suspend is in progress
1592 *
1593 * Return: void
1594 */
1595 void hif_rtpm_check_and_request_resume(bool suspend_in_progress);
1596
1597 /**
1598 * hif_rtpm_set_client_job() - Set job for the client.
1599 * @client_id: Client id for which job needs to be set
1600 *
1601 * If get failed due to system being in suspended state, set the client job so
1602 * when system resumes the client's job is called.
1603 *
1604 * Return: None
1605 */
1606 void hif_rtpm_set_client_job(uint32_t client_id);
1607
1608 /**
1609 * hif_rtpm_mark_last_busy() - Mark last busy to delay retry to suspend
1610 * @id: ID marking last busy
1611 *
1612 * Return: None
1613 */
1614 void hif_rtpm_mark_last_busy(uint32_t id);
1615
1616 /**
1617 * hif_rtpm_get_monitor_wake_intr() - API to get monitor_wake_intr
1618 *
1619 * monitor_wake_intr variable can be used to indicate if driver expects wake
1620 * MSI for runtime PM
1621 *
1622 * Return: monitor_wake_intr variable
1623 */
1624 int hif_rtpm_get_monitor_wake_intr(void);
1625
1626 /**
1627 * hif_rtpm_set_monitor_wake_intr() - API to set monitor_wake_intr
1628 * @val: value to set
1629 *
1630 * monitor_wake_intr variable can be used to indicate if driver expects wake
1631 * MSI for runtime PM
1632 *
1633 * Return: void
1634 */
1635 void hif_rtpm_set_monitor_wake_intr(int val);
1636
1637 /**
1638 * hif_pre_runtime_suspend() - book keeping before beginning runtime suspend.
1639 * @hif_ctx: HIF context
1640 *
1641 * Makes sure that the pci link will be taken down by the suspend operation.
1642 * If the hif layer is configured to leave the bus on, runtime suspend will
1643 * not save any power.
1644 *
1645 * Set the runtime suspend state to SUSPENDING.
1646 *
1647 * return -EINVAL if the bus won't go down. otherwise return 0
1648 */
1649 int hif_pre_runtime_suspend(struct hif_opaque_softc *hif_ctx);
1650
1651 /**
1652 * hif_pre_runtime_resume() - bookkeeping before beginning runtime resume
1653 *
1654 * update the runtime pm state to RESUMING.
1655 * Return: void
1656 */
1657 void hif_pre_runtime_resume(void);
1658
1659 /**
1660 * hif_process_runtime_suspend_success() - bookkeeping of suspend success
1661 *
1662 * Record the success.
1663 * update the runtime_pm state to SUSPENDED
1664 * Return: void
1665 */
1666 void hif_process_runtime_suspend_success(void);
1667
1668 /**
1669 * hif_process_runtime_suspend_failure() - bookkeeping of suspend failure
1670 *
1671 * Record the failure.
1672 * mark last busy to delay a retry.
1673 * update the runtime_pm state back to ON
1674 *
1675 * Return: void
1676 */
1677 void hif_process_runtime_suspend_failure(void);
1678
1679 /**
1680 * hif_process_runtime_resume_linkup() - bookkeeping of resuming link up
1681 *
1682 * update the runtime_pm state to RESUMING_LINKUP
1683 * Return: void
1684 */
1685 void hif_process_runtime_resume_linkup(void);
1686
1687 /**
1688 * hif_process_runtime_resume_success() - bookkeeping after a runtime resume
1689 *
1690 * record the success.
1691 * update the runtime_pm state to SUSPENDED
1692 * Return: void
1693 */
1694 void hif_process_runtime_resume_success(void);
1695
1696 /**
1697 * hif_rtpm_print_prevent_list() - list the clients preventing suspend.
1698 *
1699 * Return: None
1700 */
1701 void hif_rtpm_print_prevent_list(void);
1702
1703 /**
1704 * hif_rtpm_suspend_lock() - spin_lock on marking runtime suspend
1705 *
1706 * Return: void
1707 */
1708 void hif_rtpm_suspend_lock(void);
1709
1710 /**
1711 * hif_rtpm_suspend_unlock() - spin_unlock on marking runtime suspend
1712 *
1713 * Return: void
1714 */
1715 void hif_rtpm_suspend_unlock(void);
1716
1717 /**
1718 * hif_runtime_suspend() - do the bus suspend part of a runtime suspend
1719 * @hif_ctx: HIF context
1720 *
1721 * Return: 0 for success and non-zero error code for failure
1722 */
1723 int hif_runtime_suspend(struct hif_opaque_softc *hif_ctx);
1724
1725 /**
1726 * hif_runtime_resume() - do the bus resume part of a runtime resume
1727 * @hif_ctx: HIF context
1728 *
1729 * Return: 0 for success and non-zero error code for failure
1730 */
1731 int hif_runtime_resume(struct hif_opaque_softc *hif_ctx);
1732
1733 /**
1734 * hif_fastpath_resume() - resume fastpath for runtimepm
1735 * @hif_ctx: HIF context
1736 *
1737 * ensure that the fastpath write index register is up to date
1738 * since runtime pm may cause ce_send_fast to skip the register
1739 * write.
1740 *
1741 * fastpath only applicable to legacy copy engine
1742 */
1743 void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx);
1744
1745 /**
1746 * hif_rtpm_get_state(): get rtpm link state
1747 *
1748 * Return: state
1749 */
1750 int hif_rtpm_get_state(void);
1751
1752 /**
1753 * hif_rtpm_display_last_busy_hist() - Display runtimepm last busy history
1754 * @hif_ctx: HIF context
1755 *
1756 * Return: None
1757 */
1758 void hif_rtpm_display_last_busy_hist(struct hif_opaque_softc *hif_ctx);
1759
1760 /**
1761 * hif_rtpm_record_ce_last_busy_evt() - Record CE runtimepm last busy event
1762 * @scn: HIF context
1763 * @ce_id: CE id
1764 *
1765 * Return: None
1766 */
1767 void hif_rtpm_record_ce_last_busy_evt(struct hif_softc *scn,
1768 unsigned long ce_id);
1769
1770 /**
1771 * hif_set_enable_rpm() - Set enable_rpm value
1772 * @hif_hdl: hif opaque handle
1773 *
1774 * Return: None
1775 */
1776 void hif_set_enable_rpm(struct hif_opaque_softc *hif_hdl);
1777
1778 #else
1779
1780 /**
1781 * hif_rtpm_display_last_busy_hist() - Display runtimepm last busy history
1782 * @hif_ctx: HIF context
1783 *
1784 * Return: None
1785 */
1786 static inline
hif_rtpm_display_last_busy_hist(struct hif_opaque_softc * hif_ctx)1787 void hif_rtpm_display_last_busy_hist(struct hif_opaque_softc *hif_ctx) { }
1788
1789 /**
1790 * hif_rtpm_record_ce_last_busy_evt() - Record CE runtimepm last busy event
1791 * @scn: HIF context
1792 * @ce_id: CE id
1793 *
1794 * Return: None
1795 */
1796 static inline
hif_rtpm_record_ce_last_busy_evt(struct hif_softc * scn,unsigned long ce_id)1797 void hif_rtpm_record_ce_last_busy_evt(struct hif_softc *scn,
1798 unsigned long ce_id)
1799 { }
1800
1801 static inline
hif_rtpm_register(uint32_t id,void (* hif_rpm_cbk)(void))1802 QDF_STATUS hif_rtpm_register(uint32_t id, void (*hif_rpm_cbk)(void))
1803 { return QDF_STATUS_SUCCESS; }
1804
1805 static inline
hif_rtpm_deregister(uint32_t id)1806 QDF_STATUS hif_rtpm_deregister(uint32_t id)
1807 { return QDF_STATUS_SUCCESS; }
1808
1809 static inline
hif_rtpm_set_autosuspend_delay(int delay)1810 QDF_STATUS hif_rtpm_set_autosuspend_delay(int delay)
1811 { return QDF_STATUS_SUCCESS; }
1812
hif_rtpm_restore_autosuspend_delay(void)1813 static inline QDF_STATUS hif_rtpm_restore_autosuspend_delay(void)
1814 { return QDF_STATUS_SUCCESS; }
1815
hif_rtpm_get_autosuspend_delay(void)1816 static inline int hif_rtpm_get_autosuspend_delay(void)
1817 { return 0; }
1818
1819 static inline
hif_runtime_lock_init(qdf_runtime_lock_t * lock,const char * name)1820 int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name)
1821 { return 0; }
1822
1823 static inline
hif_runtime_lock_deinit(struct hif_pm_runtime_lock * data)1824 void hif_runtime_lock_deinit(struct hif_pm_runtime_lock *data)
1825 {}
1826
1827 static inline
hif_rtpm_get(uint8_t type,uint32_t id)1828 int hif_rtpm_get(uint8_t type, uint32_t id)
1829 { return QDF_STATUS_SUCCESS; }
1830
1831 static inline
hif_rtpm_put(uint8_t type,uint32_t id)1832 QDF_STATUS hif_rtpm_put(uint8_t type, uint32_t id)
1833 { return QDF_STATUS_SUCCESS; }
1834
1835 static inline
hif_pm_runtime_allow_suspend(struct hif_pm_runtime_lock * data)1836 int hif_pm_runtime_allow_suspend(struct hif_pm_runtime_lock *data)
1837 { return 0; }
1838
1839 static inline
hif_pm_runtime_prevent_suspend(struct hif_pm_runtime_lock * data)1840 int hif_pm_runtime_prevent_suspend(struct hif_pm_runtime_lock *data)
1841 { return 0; }
1842
1843 static inline
hif_pm_runtime_prevent_suspend_sync(struct hif_pm_runtime_lock * data)1844 int hif_pm_runtime_prevent_suspend_sync(struct hif_pm_runtime_lock *data)
1845 { return 0; }
1846
1847 static inline
hif_rtpm_sync_resume(void)1848 QDF_STATUS hif_rtpm_sync_resume(void)
1849 { return QDF_STATUS_SUCCESS; }
1850
1851 static inline
hif_rtpm_request_resume(void)1852 void hif_rtpm_request_resume(void)
1853 {}
1854
1855 static inline
hif_rtpm_check_and_request_resume(bool suspend_in_progress)1856 void hif_rtpm_check_and_request_resume(bool suspend_in_progress)
1857 {}
1858
1859 static inline
hif_rtpm_set_client_job(uint32_t client_id)1860 void hif_rtpm_set_client_job(uint32_t client_id)
1861 {}
1862
1863 static inline
hif_rtpm_print_prevent_list(void)1864 void hif_rtpm_print_prevent_list(void)
1865 {}
1866
1867 static inline
hif_rtpm_suspend_unlock(void)1868 void hif_rtpm_suspend_unlock(void)
1869 {}
1870
1871 static inline
hif_rtpm_suspend_lock(void)1872 void hif_rtpm_suspend_lock(void)
1873 {}
1874
1875 static inline
hif_rtpm_get_monitor_wake_intr(void)1876 int hif_rtpm_get_monitor_wake_intr(void)
1877 { return 0; }
1878
1879 static inline
hif_rtpm_set_monitor_wake_intr(int val)1880 void hif_rtpm_set_monitor_wake_intr(int val)
1881 {}
1882
1883 static inline
hif_rtpm_mark_last_busy(uint32_t id)1884 void hif_rtpm_mark_last_busy(uint32_t id)
1885 {}
1886
1887 static inline
hif_set_enable_rpm(struct hif_opaque_softc * hif_hdl)1888 void hif_set_enable_rpm(struct hif_opaque_softc *hif_hdl)
1889 {
1890 }
1891 #endif
1892
1893 void hif_enable_power_management(struct hif_opaque_softc *hif_ctx,
1894 bool is_packet_log_enabled);
1895 void hif_disable_power_management(struct hif_opaque_softc *hif_ctx);
1896
1897 void hif_vote_link_up(struct hif_opaque_softc *hif_ctx);
1898 void hif_vote_link_down(struct hif_opaque_softc *hif_ctx);
1899
1900 bool hif_can_suspend_link(struct hif_opaque_softc *hif_ctx);
1901
1902 #ifdef IPA_OFFLOAD
1903 /**
1904 * hif_get_ipa_hw_type() - get IPA hw type
1905 *
1906 * This API return the IPA hw type.
1907 *
1908 * Return: IPA hw type
1909 */
1910 static inline
hif_get_ipa_hw_type(void)1911 enum ipa_hw_type hif_get_ipa_hw_type(void)
1912 {
1913 return ipa_get_hw_type();
1914 }
1915
1916 /**
1917 * hif_get_ipa_present() - get IPA hw status
1918 *
1919 * This API return the IPA hw status.
1920 *
1921 * Return: true if IPA is present or false otherwise
1922 */
1923 static inline
hif_get_ipa_present(void)1924 bool hif_get_ipa_present(void)
1925 {
1926 if (qdf_ipa_uc_reg_rdyCB(NULL) != -EPERM)
1927 return true;
1928 else
1929 return false;
1930 }
1931 #endif
1932 int hif_bus_resume(struct hif_opaque_softc *hif_ctx);
1933
1934 /**
1935 * hif_bus_early_suspend() - stop non wmi tx traffic
1936 * @hif_ctx: hif context
1937 */
1938 int hif_bus_early_suspend(struct hif_opaque_softc *hif_ctx);
1939
1940 /**
1941 * hif_bus_late_resume() - resume non wmi traffic
1942 * @hif_ctx: hif context
1943 */
1944 int hif_bus_late_resume(struct hif_opaque_softc *hif_ctx);
1945 int hif_bus_suspend(struct hif_opaque_softc *hif_ctx);
1946 int hif_bus_resume_noirq(struct hif_opaque_softc *hif_ctx);
1947 int hif_bus_suspend_noirq(struct hif_opaque_softc *hif_ctx);
1948
1949 /**
1950 * hif_apps_irqs_enable() - Enables all irqs from the APPS side
1951 * @hif_ctx: an opaque HIF handle to use
1952 *
1953 * As opposed to the standard hif_irq_enable, this function always applies to
1954 * the APPS side kernel interrupt handling.
1955 *
1956 * Return: errno
1957 */
1958 int hif_apps_irqs_enable(struct hif_opaque_softc *hif_ctx);
1959
1960 /**
1961 * hif_apps_irqs_disable() - Disables all irqs from the APPS side
1962 * @hif_ctx: an opaque HIF handle to use
1963 *
1964 * As opposed to the standard hif_irq_disable, this function always applies to
1965 * the APPS side kernel interrupt handling.
1966 *
1967 * Return: errno
1968 */
1969 int hif_apps_irqs_disable(struct hif_opaque_softc *hif_ctx);
1970
1971 /**
1972 * hif_apps_wake_irq_enable() - Enables the wake irq from the APPS side
1973 * @hif_ctx: an opaque HIF handle to use
1974 *
1975 * As opposed to the standard hif_irq_enable, this function always applies to
1976 * the APPS side kernel interrupt handling.
1977 *
1978 * Return: errno
1979 */
1980 int hif_apps_wake_irq_enable(struct hif_opaque_softc *hif_ctx);
1981
1982 /**
1983 * hif_apps_wake_irq_disable() - Disables the wake irq from the APPS side
1984 * @hif_ctx: an opaque HIF handle to use
1985 *
1986 * As opposed to the standard hif_irq_disable, this function always applies to
1987 * the APPS side kernel interrupt handling.
1988 *
1989 * Return: errno
1990 */
1991 int hif_apps_wake_irq_disable(struct hif_opaque_softc *hif_ctx);
1992
1993 /**
1994 * hif_apps_enable_irq_wake() - Enables the irq wake from the APPS side
1995 * @hif_ctx: an opaque HIF handle to use
1996 *
1997 * This function always applies to the APPS side kernel interrupt handling
1998 * to wake the system from suspend.
1999 *
2000 * Return: errno
2001 */
2002 int hif_apps_enable_irq_wake(struct hif_opaque_softc *hif_ctx);
2003
2004 /**
2005 * hif_apps_disable_irq_wake() - Disables the wake irq from the APPS side
2006 * @hif_ctx: an opaque HIF handle to use
2007 *
2008 * This function always applies to the APPS side kernel interrupt handling
2009 * to disable the wake irq.
2010 *
2011 * Return: errno
2012 */
2013 int hif_apps_disable_irq_wake(struct hif_opaque_softc *hif_ctx);
2014
2015 /**
2016 * hif_apps_enable_irqs_except_wake_irq() - Enables all irqs except wake_irq
2017 * @hif_ctx: an opaque HIF handle to use
2018 *
2019 * As opposed to the standard hif_irq_enable, this function always applies to
2020 * the APPS side kernel interrupt handling.
2021 *
2022 * Return: errno
2023 */
2024 int hif_apps_enable_irqs_except_wake_irq(struct hif_opaque_softc *hif_ctx);
2025
2026 /**
2027 * hif_apps_disable_irqs_except_wake_irq() - Disables all irqs except wake_irq
2028 * @hif_ctx: an opaque HIF handle to use
2029 *
2030 * As opposed to the standard hif_irq_disable, this function always applies to
2031 * the APPS side kernel interrupt handling.
2032 *
2033 * Return: errno
2034 */
2035 int hif_apps_disable_irqs_except_wake_irq(struct hif_opaque_softc *hif_ctx);
2036
2037 int hif_get_irq_num(struct hif_opaque_softc *scn, int *irq, uint32_t size);
2038 int hif_dump_registers(struct hif_opaque_softc *scn);
2039 int ol_copy_ramdump(struct hif_opaque_softc *scn);
2040 void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx);
2041 void hif_get_hw_info(struct hif_opaque_softc *hif_ctx, u32 *version,
2042 u32 *revision, const char **target_name);
2043 enum qdf_bus_type hif_get_bus_type(struct hif_opaque_softc *hif_hdl);
2044 struct hif_target_info *hif_get_target_info_handle(struct hif_opaque_softc *
2045 scn);
2046 struct hif_config_info *hif_get_ini_handle(struct hif_opaque_softc *hif_ctx);
2047 struct ramdump_info *hif_get_ramdump_ctx(struct hif_opaque_softc *hif_ctx);
2048 enum hif_target_status hif_get_target_status(struct hif_opaque_softc *hif_ctx);
2049 void hif_set_target_status(struct hif_opaque_softc *hif_ctx, enum
2050 hif_target_status);
2051 void hif_init_ini_config(struct hif_opaque_softc *hif_ctx,
2052 struct hif_config_info *cfg);
2053 void hif_update_tx_ring(struct hif_opaque_softc *osc, u_int32_t num_htt_cmpls);
2054 qdf_nbuf_t hif_batch_send(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
2055 uint32_t transfer_id, u_int32_t len, uint32_t sendhead);
2056 QDF_STATUS hif_send_single(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
2057 uint32_t transfer_id, u_int32_t len);
2058 int hif_send_fast(struct hif_opaque_softc *osc, qdf_nbuf_t nbuf,
2059 uint32_t transfer_id, uint32_t download_len);
2060 void hif_pkt_dl_len_set(void *hif_sc, unsigned int pkt_download_len);
2061 void hif_ce_war_disable(void);
2062 void hif_ce_war_enable(void);
2063 void hif_disable_interrupt(struct hif_opaque_softc *osc, uint32_t pipe_num);
2064 #ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT
2065 struct hif_pipe_addl_info *hif_get_addl_pipe_info(struct hif_opaque_softc *osc,
2066 struct hif_pipe_addl_info *hif_info, uint32_t pipe_number);
2067 uint32_t hif_set_nss_wifiol_mode(struct hif_opaque_softc *osc,
2068 uint32_t pipe_num);
2069 int32_t hif_get_nss_wifiol_bypass_nw_process(struct hif_opaque_softc *osc);
2070 #endif /* QCA_NSS_WIFI_OFFLOAD_SUPPORT */
2071
2072 void hif_set_bundle_mode(struct hif_opaque_softc *hif_ctx, bool enabled,
2073 int rx_bundle_cnt);
2074 int hif_bus_reset_resume(struct hif_opaque_softc *hif_ctx);
2075
2076 void hif_set_attribute(struct hif_opaque_softc *osc, uint8_t hif_attrib);
2077
2078 void *hif_get_lro_info(int ctx_id, struct hif_opaque_softc *hif_hdl);
2079
2080 enum hif_exec_type {
2081 HIF_EXEC_NAPI_TYPE,
2082 HIF_EXEC_TASKLET_TYPE,
2083 };
2084
2085 typedef uint32_t (*ext_intr_handler)(void *, uint32_t, int);
2086
2087 /**
2088 * hif_get_int_ctx_irq_num() - retrieve an irq num for an interrupt context id
2089 * @softc: hif opaque context owning the exec context
2090 * @id: the id of the interrupt context
2091 *
2092 * Return: IRQ number of the first (zero'th) IRQ within the interrupt context ID
2093 * 'id' registered with the OS
2094 */
2095 int32_t hif_get_int_ctx_irq_num(struct hif_opaque_softc *softc,
2096 uint8_t id);
2097
2098 /**
2099 * hif_configure_ext_group_interrupts() - Configure ext group interrupts
2100 * @hif_ctx: hif opaque context
2101 *
2102 * Return: QDF_STATUS
2103 */
2104 QDF_STATUS hif_configure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx);
2105
2106 /**
2107 * hif_deconfigure_ext_group_interrupts() - Deconfigure ext group interrupts
2108 * @hif_ctx: hif opaque context
2109 *
2110 * Return: None
2111 */
2112 void hif_deconfigure_ext_group_interrupts(struct hif_opaque_softc *hif_ctx);
2113
2114 /**
2115 * hif_register_ext_group() - API to register external group
2116 * interrupt handler.
2117 * @hif_ctx : HIF Context
2118 * @numirq: number of irq's in the group
2119 * @irq: array of irq values
2120 * @handler: callback interrupt handler function
2121 * @cb_ctx: context to passed in callback
2122 * @context_name: text name of the context
2123 * @type: napi vs tasklet
2124 * @scale:
2125 *
2126 * Return: QDF_STATUS
2127 */
2128 QDF_STATUS hif_register_ext_group(struct hif_opaque_softc *hif_ctx,
2129 uint32_t numirq, uint32_t irq[],
2130 ext_intr_handler handler,
2131 void *cb_ctx, const char *context_name,
2132 enum hif_exec_type type, uint32_t scale);
2133
2134 void hif_deregister_exec_group(struct hif_opaque_softc *hif_ctx,
2135 const char *context_name);
2136
2137 void hif_update_pipe_callback(struct hif_opaque_softc *osc,
2138 u_int8_t pipeid,
2139 struct hif_msg_callbacks *callbacks);
2140
2141 /**
2142 * hif_print_napi_stats() - Display HIF NAPI stats
2143 * @hif_ctx: HIF opaque context
2144 *
2145 * Return: None
2146 */
2147 void hif_print_napi_stats(struct hif_opaque_softc *hif_ctx);
2148
2149 /**
2150 * hif_clear_napi_stats() - function clears the stats of the
2151 * latency when called.
2152 * @hif_ctx: the HIF context to assign the callback to
2153 *
2154 * Return: None
2155 */
2156 void hif_clear_napi_stats(struct hif_opaque_softc *hif_ctx);
2157
2158 #ifdef __cplusplus
2159 }
2160 #endif
2161
2162 #ifdef FORCE_WAKE
2163 /**
2164 * hif_force_wake_request() - Function to wake from power collapse
2165 * @handle: HIF opaque handle
2166 *
2167 * Description: API to check if the device is awake or not before
2168 * read/write to BAR + 4K registers. If device is awake return
2169 * success otherwise write '1' to
2170 * PCIE_PCIE_LOCAL_REG_PCIE_SOC_WAKE_PCIE_LOCAL_REG which will interrupt
2171 * the device and does wakeup the PCI and MHI within 50ms
2172 * and then the device writes a value to
2173 * PCIE_SOC_PCIE_REG_PCIE_SCRATCH_0_SOC_PCIE_REG to complete the
2174 * handshake process to let the host know the device is awake.
2175 *
2176 * Return: zero - success/non-zero - failure
2177 */
2178 int hif_force_wake_request(struct hif_opaque_softc *handle);
2179
2180 /**
2181 * hif_force_wake_release() - API to release/reset the SOC wake register
2182 * from interrupting the device.
2183 * @handle: HIF opaque handle
2184 *
2185 * Description: API to set the
2186 * PCIE_PCIE_LOCAL_REG_PCIE_SOC_WAKE_PCIE_LOCAL_REG to '0'
2187 * to release the interrupt line.
2188 *
2189 * Return: zero - success/non-zero - failure
2190 */
2191 int hif_force_wake_release(struct hif_opaque_softc *handle);
2192 #else
2193 static inline
hif_force_wake_request(struct hif_opaque_softc * handle)2194 int hif_force_wake_request(struct hif_opaque_softc *handle)
2195 {
2196 return 0;
2197 }
2198
2199 static inline
hif_force_wake_release(struct hif_opaque_softc * handle)2200 int hif_force_wake_release(struct hif_opaque_softc *handle)
2201 {
2202 return 0;
2203 }
2204 #endif /* FORCE_WAKE */
2205
2206 #if defined(FEATURE_HAL_DELAYED_REG_WRITE) || \
2207 defined(FEATURE_HIF_DELAYED_REG_WRITE)
2208 /**
2209 * hif_prevent_link_low_power_states() - Prevent from going to low power states
2210 * @hif: HIF opaque context
2211 *
2212 * Return: 0 on success. Error code on failure.
2213 */
2214 int hif_prevent_link_low_power_states(struct hif_opaque_softc *hif);
2215
2216 /**
2217 * hif_allow_link_low_power_states() - Allow link to go to low power states
2218 * @hif: HIF opaque context
2219 *
2220 * Return: None
2221 */
2222 void hif_allow_link_low_power_states(struct hif_opaque_softc *hif);
2223
2224 #else
2225
2226 static inline
hif_prevent_link_low_power_states(struct hif_opaque_softc * hif)2227 int hif_prevent_link_low_power_states(struct hif_opaque_softc *hif)
2228 {
2229 return 0;
2230 }
2231
2232 static inline
hif_allow_link_low_power_states(struct hif_opaque_softc * hif)2233 void hif_allow_link_low_power_states(struct hif_opaque_softc *hif)
2234 {
2235 }
2236 #endif
2237
2238 #ifdef IPA_OPT_WIFI_DP
2239 /**
2240 * hif_prevent_l1() - Prevent from going to low power states
2241 * @hif: HIF opaque context
2242 *
2243 * Return: 0 on success. Error code on failure.
2244 */
2245 int hif_prevent_l1(struct hif_opaque_softc *hif);
2246
2247 /**
2248 * hif_allow_l1() - Allow link to go to low power states
2249 * @hif: HIF opaque context
2250 *
2251 * Return: None
2252 */
2253 void hif_allow_l1(struct hif_opaque_softc *hif);
2254
2255 #else
2256
2257 static inline
hif_prevent_l1(struct hif_opaque_softc * hif)2258 int hif_prevent_l1(struct hif_opaque_softc *hif)
2259 {
2260 return 0;
2261 }
2262
2263 static inline
hif_allow_l1(struct hif_opaque_softc * hif)2264 void hif_allow_l1(struct hif_opaque_softc *hif)
2265 {
2266 }
2267 #endif
2268
2269 void *hif_get_dev_ba(struct hif_opaque_softc *hif_handle);
2270 void *hif_get_dev_ba_ce(struct hif_opaque_softc *hif_handle);
2271 void *hif_get_dev_ba_pmm(struct hif_opaque_softc *hif_handle);
2272
2273 /**
2274 * hif_get_dev_ba_cmem() - get base address of CMEM
2275 * @hif_handle: the HIF context
2276 *
2277 */
2278 void *hif_get_dev_ba_cmem(struct hif_opaque_softc *hif_handle);
2279
2280 /**
2281 * hif_get_soc_version() - get soc major version from target info
2282 * @hif_handle: the HIF context
2283 *
2284 * Return: version number
2285 */
2286 uint32_t hif_get_soc_version(struct hif_opaque_softc *hif_handle);
2287
2288 /**
2289 * hif_set_initial_wakeup_cb() - set the initial wakeup event handler function
2290 * @hif_ctx: the HIF context to assign the callback to
2291 * @callback: the callback to assign
2292 * @priv: the private data to pass to the callback when invoked
2293 *
2294 * Return: None
2295 */
2296 void hif_set_initial_wakeup_cb(struct hif_opaque_softc *hif_ctx,
2297 void (*callback)(void *),
2298 void *priv);
2299 /*
2300 * Note: For MCL, #if defined (HIF_CONFIG_SLUB_DEBUG_ON) needs to be checked
2301 * for defined here
2302 */
2303 #if defined(HIF_CONFIG_SLUB_DEBUG_ON) || defined(HIF_CE_DEBUG_DATA_BUF)
2304 ssize_t hif_dump_desc_trace_buf(struct device *dev,
2305 struct device_attribute *attr, char *buf);
2306 ssize_t hif_input_desc_trace_buf_index(struct hif_softc *scn,
2307 const char *buf, size_t size);
2308 ssize_t hif_ce_en_desc_hist(struct hif_softc *scn,
2309 const char *buf, size_t size);
2310 ssize_t hif_disp_ce_enable_desc_data_hist(struct hif_softc *scn, char *buf);
2311 ssize_t hif_dump_desc_event(struct hif_softc *scn, char *buf);
2312 #endif/*#if defined(HIF_CONFIG_SLUB_DEBUG_ON)||defined(HIF_CE_DEBUG_DATA_BUF)*/
2313
2314 /**
2315 * hif_set_ce_service_max_yield_time() - sets CE service max yield time
2316 * @hif: hif context
2317 * @ce_service_max_yield_time: CE service max yield time to set
2318 *
2319 * This API storess CE service max yield time in hif context based
2320 * on ini value.
2321 *
2322 * Return: void
2323 */
2324 void hif_set_ce_service_max_yield_time(struct hif_opaque_softc *hif,
2325 uint32_t ce_service_max_yield_time);
2326
2327 /**
2328 * hif_get_ce_service_max_yield_time() - get CE service max yield time
2329 * @hif: hif context
2330 *
2331 * This API returns CE service max yield time.
2332 *
2333 * Return: CE service max yield time
2334 */
2335 unsigned long long
2336 hif_get_ce_service_max_yield_time(struct hif_opaque_softc *hif);
2337
2338 /**
2339 * hif_set_ce_service_max_rx_ind_flush() - sets CE service max rx ind flush
2340 * @hif: hif context
2341 * @ce_service_max_rx_ind_flush: CE service max rx ind flush to set
2342 *
2343 * This API stores CE service max rx ind flush in hif context based
2344 * on ini value.
2345 *
2346 * Return: void
2347 */
2348 void hif_set_ce_service_max_rx_ind_flush(struct hif_opaque_softc *hif,
2349 uint8_t ce_service_max_rx_ind_flush);
2350
2351 #ifdef OL_ATH_SMART_LOGGING
2352 /**
2353 * hif_log_dump_ce() - Copy all the CE DEST ring to buf
2354 * @scn: HIF handler
2355 * @buf_cur: Current pointer in ring buffer
2356 * @buf_init:Start of the ring buffer
2357 * @buf_sz: Size of the ring buffer
2358 * @ce: Copy Engine id
2359 * @skb_sz: Max size of the SKB buffer to be copied
2360 *
2361 * Calls the respective function to dump all the CE SRC/DEST ring descriptors
2362 * and buffers pointed by them in to the given buf
2363 *
2364 * Return: Current pointer in ring buffer
2365 */
2366 uint8_t *hif_log_dump_ce(struct hif_softc *scn, uint8_t *buf_cur,
2367 uint8_t *buf_init, uint32_t buf_sz,
2368 uint32_t ce, uint32_t skb_sz);
2369 #endif /* OL_ATH_SMART_LOGGING */
2370
2371 /**
2372 * hif_softc_to_hif_opaque_softc() - API to convert hif_softc handle
2373 * to hif_opaque_softc handle
2374 * @hif_handle: hif_softc type
2375 *
2376 * Return: hif_opaque_softc type
2377 */
2378 static inline struct hif_opaque_softc *
hif_softc_to_hif_opaque_softc(struct hif_softc * hif_handle)2379 hif_softc_to_hif_opaque_softc(struct hif_softc *hif_handle)
2380 {
2381 return (struct hif_opaque_softc *)hif_handle;
2382 }
2383
2384 /**
2385 * hif_try_complete_dp_tasks() - Try to complete all DP related tasks
2386 * @hif_ctx: opaque softc handle
2387 *
2388 * Return: QDF_STATUS of operation
2389 */
2390 QDF_STATUS hif_try_complete_dp_tasks(struct hif_opaque_softc *hif_ctx);
2391
2392 #if defined(HIF_IPCI) && defined(FEATURE_HAL_DELAYED_REG_WRITE)
2393 QDF_STATUS hif_try_prevent_ep_vote_access(struct hif_opaque_softc *hif_ctx);
2394 void hif_set_ep_intermediate_vote_access(struct hif_opaque_softc *hif_ctx);
2395 void hif_allow_ep_vote_access(struct hif_opaque_softc *hif_ctx);
2396 void hif_set_ep_vote_access(struct hif_opaque_softc *hif_ctx,
2397 uint8_t type, uint8_t access);
2398 uint8_t hif_get_ep_vote_access(struct hif_opaque_softc *hif_ctx,
2399 uint8_t type);
2400 #else
2401 static inline QDF_STATUS
hif_try_prevent_ep_vote_access(struct hif_opaque_softc * hif_ctx)2402 hif_try_prevent_ep_vote_access(struct hif_opaque_softc *hif_ctx)
2403 {
2404 return QDF_STATUS_SUCCESS;
2405 }
2406
2407 static inline void
hif_set_ep_intermediate_vote_access(struct hif_opaque_softc * hif_ctx)2408 hif_set_ep_intermediate_vote_access(struct hif_opaque_softc *hif_ctx)
2409 {
2410 }
2411
2412 static inline void
hif_allow_ep_vote_access(struct hif_opaque_softc * hif_ctx)2413 hif_allow_ep_vote_access(struct hif_opaque_softc *hif_ctx)
2414 {
2415 }
2416
2417 static inline void
hif_set_ep_vote_access(struct hif_opaque_softc * hif_ctx,uint8_t type,uint8_t access)2418 hif_set_ep_vote_access(struct hif_opaque_softc *hif_ctx,
2419 uint8_t type, uint8_t access)
2420 {
2421 }
2422
2423 static inline uint8_t
hif_get_ep_vote_access(struct hif_opaque_softc * hif_ctx,uint8_t type)2424 hif_get_ep_vote_access(struct hif_opaque_softc *hif_ctx,
2425 uint8_t type)
2426 {
2427 return HIF_EP_VOTE_ACCESS_ENABLE;
2428 }
2429 #endif
2430
2431 #ifdef FORCE_WAKE
2432 /**
2433 * hif_srng_init_phase(): Indicate srng initialization phase
2434 * to avoid force wake as UMAC power collapse is not yet
2435 * enabled
2436 * @hif_ctx: hif opaque handle
2437 * @init_phase: initialization phase
2438 *
2439 * Return: None
2440 */
2441 void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx,
2442 bool init_phase);
2443 #else
2444 static inline
hif_srng_init_phase(struct hif_opaque_softc * hif_ctx,bool init_phase)2445 void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx,
2446 bool init_phase)
2447 {
2448 }
2449 #endif /* FORCE_WAKE */
2450
2451 #ifdef HIF_IPCI
2452 /**
2453 * hif_shutdown_notifier_cb - Call back for shutdown notifier
2454 * @ctx: hif handle
2455 *
2456 * Return: None
2457 */
2458 void hif_shutdown_notifier_cb(void *ctx);
2459 #else
2460 static inline
hif_shutdown_notifier_cb(void * ctx)2461 void hif_shutdown_notifier_cb(void *ctx)
2462 {
2463 }
2464 #endif /* HIF_IPCI */
2465
2466 #ifdef HIF_CE_LOG_INFO
2467 /**
2468 * hif_log_ce_info() - API to log ce info
2469 * @scn: hif handle
2470 * @data: hang event data buffer
2471 * @offset: offset at which data needs to be written
2472 *
2473 * Return: None
2474 */
2475 void hif_log_ce_info(struct hif_softc *scn, uint8_t *data,
2476 unsigned int *offset);
2477 #else
2478 static inline
hif_log_ce_info(struct hif_softc * scn,uint8_t * data,unsigned int * offset)2479 void hif_log_ce_info(struct hif_softc *scn, uint8_t *data,
2480 unsigned int *offset)
2481 {
2482 }
2483 #endif
2484
2485 #if defined(HIF_CPU_PERF_AFFINE_MASK) || \
2486 defined(FEATURE_ENABLE_CE_DP_IRQ_AFFINE)
2487 /**
2488 * hif_config_irq_set_perf_affinity_hint() - API to set affinity
2489 * @hif_ctx: hif opaque handle
2490 *
2491 * This function is used to move the WLAN IRQs to perf cores in
2492 * case of defconfig builds.
2493 *
2494 * Return: None
2495 */
2496 void hif_config_irq_set_perf_affinity_hint(
2497 struct hif_opaque_softc *hif_ctx);
2498
2499 #else
hif_config_irq_set_perf_affinity_hint(struct hif_opaque_softc * hif_ctx)2500 static inline void hif_config_irq_set_perf_affinity_hint(
2501 struct hif_opaque_softc *hif_ctx)
2502 {
2503 }
2504 #endif
2505
2506 /**
2507 * hif_apps_grp_irqs_enable() - enable ext grp irqs
2508 * @hif_ctx: HIF opaque context
2509 *
2510 * Return: 0 on success. Error code on failure.
2511 */
2512 int hif_apps_grp_irqs_enable(struct hif_opaque_softc *hif_ctx);
2513
2514 /**
2515 * hif_apps_grp_irqs_disable() - disable ext grp irqs
2516 * @hif_ctx: HIF opaque context
2517 *
2518 * Return: 0 on success. Error code on failure.
2519 */
2520 int hif_apps_grp_irqs_disable(struct hif_opaque_softc *hif_ctx);
2521
2522 /**
2523 * hif_disable_grp_irqs() - disable ext grp irqs
2524 * @scn: HIF opaque context
2525 *
2526 * Return: 0 on success. Error code on failure.
2527 */
2528 int hif_disable_grp_irqs(struct hif_opaque_softc *scn);
2529
2530 /**
2531 * hif_enable_grp_irqs() - enable ext grp irqs
2532 * @scn: HIF opaque context
2533 *
2534 * Return: 0 on success. Error code on failure.
2535 */
2536 int hif_enable_grp_irqs(struct hif_opaque_softc *scn);
2537
2538 enum hif_credit_exchange_type {
2539 HIF_REQUEST_CREDIT,
2540 HIF_PROCESS_CREDIT_REPORT,
2541 };
2542
2543 enum hif_detect_latency_type {
2544 HIF_DETECT_TASKLET,
2545 HIF_DETECT_CREDIT,
2546 HIF_DETECT_UNKNOWN
2547 };
2548
2549 #ifdef HIF_DETECTION_LATENCY_ENABLE
2550 void hif_latency_detect_credit_record_time(
2551 enum hif_credit_exchange_type type,
2552 struct hif_opaque_softc *hif_ctx);
2553
2554 void hif_latency_detect_timer_start(struct hif_opaque_softc *hif_ctx);
2555 void hif_latency_detect_timer_stop(struct hif_opaque_softc *hif_ctx);
2556 void hif_check_detection_latency(struct hif_softc *scn,
2557 bool from_timer,
2558 uint32_t bitmap_type);
2559 void hif_set_enable_detection(struct hif_opaque_softc *hif_ctx, bool value);
2560
2561 /**
2562 * hif_tasklet_latency_record_exec() - record execute time and
2563 * check the latency
2564 * @scn: HIF opaque context
2565 * @idx: CE id
2566 *
2567 * Return: None
2568 */
2569 void hif_tasklet_latency_record_exec(struct hif_softc *scn, int idx);
2570
2571 /**
2572 * hif_tasklet_latency_record_sched() - record schedule time of a tasklet
2573 * @scn: HIF opaque context
2574 * @idx: CE id
2575 *
2576 * Return: None
2577 */
2578 void hif_tasklet_latency_record_sched(struct hif_softc *scn, int idx);
2579 #else
2580 static inline
hif_latency_detect_timer_start(struct hif_opaque_softc * hif_ctx)2581 void hif_latency_detect_timer_start(struct hif_opaque_softc *hif_ctx)
2582 {}
2583
2584 static inline
hif_latency_detect_timer_stop(struct hif_opaque_softc * hif_ctx)2585 void hif_latency_detect_timer_stop(struct hif_opaque_softc *hif_ctx)
2586 {}
2587
2588 static inline
hif_latency_detect_credit_record_time(enum hif_credit_exchange_type type,struct hif_opaque_softc * hif_ctx)2589 void hif_latency_detect_credit_record_time(
2590 enum hif_credit_exchange_type type,
2591 struct hif_opaque_softc *hif_ctx)
2592 {}
2593 static inline
hif_check_detection_latency(struct hif_softc * scn,bool from_timer,uint32_t bitmap_type)2594 void hif_check_detection_latency(struct hif_softc *scn,
2595 bool from_timer,
2596 uint32_t bitmap_type)
2597 {}
2598
2599 static inline
hif_set_enable_detection(struct hif_opaque_softc * hif_ctx,bool value)2600 void hif_set_enable_detection(struct hif_opaque_softc *hif_ctx, bool value)
2601 {}
2602
2603 static inline
hif_tasklet_latency_record_exec(struct hif_softc * scn,int idx)2604 void hif_tasklet_latency_record_exec(struct hif_softc *scn, int idx)
2605 {}
2606
2607 static inline
hif_tasklet_latency_record_sched(struct hif_softc * scn,int idx)2608 void hif_tasklet_latency_record_sched(struct hif_softc *scn, int idx)
2609 {}
2610 #endif
2611
2612 #ifdef SYSTEM_PM_CHECK
2613 /**
2614 * __hif_system_pm_set_state() - Set system pm state
2615 * @hif: hif opaque handle
2616 * @state: system state
2617 *
2618 * Return: None
2619 */
2620 void __hif_system_pm_set_state(struct hif_opaque_softc *hif,
2621 enum hif_system_pm_state state);
2622
2623 /**
2624 * hif_system_pm_set_state_on() - Set system pm state to ON
2625 * @hif: hif opaque handle
2626 *
2627 * Return: None
2628 */
2629 static inline
hif_system_pm_set_state_on(struct hif_opaque_softc * hif)2630 void hif_system_pm_set_state_on(struct hif_opaque_softc *hif)
2631 {
2632 __hif_system_pm_set_state(hif, HIF_SYSTEM_PM_STATE_ON);
2633 }
2634
2635 /**
2636 * hif_system_pm_set_state_resuming() - Set system pm state to resuming
2637 * @hif: hif opaque handle
2638 *
2639 * Return: None
2640 */
2641 static inline
hif_system_pm_set_state_resuming(struct hif_opaque_softc * hif)2642 void hif_system_pm_set_state_resuming(struct hif_opaque_softc *hif)
2643 {
2644 __hif_system_pm_set_state(hif, HIF_SYSTEM_PM_STATE_BUS_RESUMING);
2645 }
2646
2647 /**
2648 * hif_system_pm_set_state_suspending() - Set system pm state to suspending
2649 * @hif: hif opaque handle
2650 *
2651 * Return: None
2652 */
2653 static inline
hif_system_pm_set_state_suspending(struct hif_opaque_softc * hif)2654 void hif_system_pm_set_state_suspending(struct hif_opaque_softc *hif)
2655 {
2656 __hif_system_pm_set_state(hif, HIF_SYSTEM_PM_STATE_BUS_SUSPENDING);
2657 }
2658
2659 /**
2660 * hif_system_pm_set_state_suspended() - Set system pm state to suspended
2661 * @hif: hif opaque handle
2662 *
2663 * Return: None
2664 */
2665 static inline
hif_system_pm_set_state_suspended(struct hif_opaque_softc * hif)2666 void hif_system_pm_set_state_suspended(struct hif_opaque_softc *hif)
2667 {
2668 __hif_system_pm_set_state(hif, HIF_SYSTEM_PM_STATE_BUS_SUSPENDED);
2669 }
2670
2671 /**
2672 * hif_system_pm_get_state() - Get system pm state
2673 * @hif: hif opaque handle
2674 *
2675 * Return: system state
2676 */
2677 int32_t hif_system_pm_get_state(struct hif_opaque_softc *hif);
2678
2679 /**
2680 * hif_system_pm_state_check() - Check system state and trigger resume
2681 * if required
2682 * @hif: hif opaque handle
2683 *
2684 * Return: 0 if system is in on state else error code
2685 */
2686 int hif_system_pm_state_check(struct hif_opaque_softc *hif);
2687 #else
2688 static inline
__hif_system_pm_set_state(struct hif_opaque_softc * hif,enum hif_system_pm_state state)2689 void __hif_system_pm_set_state(struct hif_opaque_softc *hif,
2690 enum hif_system_pm_state state)
2691 {
2692 }
2693
2694 static inline
hif_system_pm_set_state_on(struct hif_opaque_softc * hif)2695 void hif_system_pm_set_state_on(struct hif_opaque_softc *hif)
2696 {
2697 }
2698
2699 static inline
hif_system_pm_set_state_resuming(struct hif_opaque_softc * hif)2700 void hif_system_pm_set_state_resuming(struct hif_opaque_softc *hif)
2701 {
2702 }
2703
2704 static inline
hif_system_pm_set_state_suspending(struct hif_opaque_softc * hif)2705 void hif_system_pm_set_state_suspending(struct hif_opaque_softc *hif)
2706 {
2707 }
2708
2709 static inline
hif_system_pm_set_state_suspended(struct hif_opaque_softc * hif)2710 void hif_system_pm_set_state_suspended(struct hif_opaque_softc *hif)
2711 {
2712 }
2713
2714 static inline
hif_system_pm_get_state(struct hif_opaque_softc * hif)2715 int32_t hif_system_pm_get_state(struct hif_opaque_softc *hif)
2716 {
2717 return 0;
2718 }
2719
hif_system_pm_state_check(struct hif_opaque_softc * hif)2720 static inline int hif_system_pm_state_check(struct hif_opaque_softc *hif)
2721 {
2722 return 0;
2723 }
2724 #endif
2725
2726 #ifdef FEATURE_IRQ_AFFINITY
2727 /**
2728 * hif_set_grp_intr_affinity() - API to set affinity for grp
2729 * intrs set in the bitmap
2730 * @scn: hif handle
2731 * @grp_intr_bitmask: grp intrs for which perf affinity should be
2732 * applied
2733 * @perf: affine to perf or non-perf cluster
2734 *
2735 * Return: None
2736 */
2737 void hif_set_grp_intr_affinity(struct hif_opaque_softc *scn,
2738 uint32_t grp_intr_bitmask, bool perf);
2739 #else
2740 static inline
hif_set_grp_intr_affinity(struct hif_opaque_softc * scn,uint32_t grp_intr_bitmask,bool perf)2741 void hif_set_grp_intr_affinity(struct hif_opaque_softc *scn,
2742 uint32_t grp_intr_bitmask, bool perf)
2743 {
2744 }
2745 #endif
2746 /**
2747 * hif_get_max_wmi_ep() - Get max WMI EPs configured in target svc map
2748 * @scn: hif opaque handle
2749 *
2750 * Description:
2751 * Gets number of WMI EPs configured in target svc map. Since EP map
2752 * include IN and OUT direction pipes, count only OUT pipes to get EPs
2753 * configured for WMI service.
2754 *
2755 * Return:
2756 * uint8_t: count for WMI eps in target svc map
2757 */
2758 uint8_t hif_get_max_wmi_ep(struct hif_opaque_softc *scn);
2759
2760 #ifdef DP_UMAC_HW_RESET_SUPPORT
2761 /**
2762 * hif_register_umac_reset_handler() - Register UMAC HW reset handler
2763 * @hif_scn: hif opaque handle
2764 * @irq_handler: irq callback handler function
2765 * @tl_handler: tasklet callback handler function
2766 * @cb_ctx: context to passed to @handler
2767 * @irq: irq number to be used for UMAC HW reset interrupt
2768 *
2769 * Return: QDF_STATUS of operation
2770 */
2771 QDF_STATUS hif_register_umac_reset_handler(struct hif_opaque_softc *hif_scn,
2772 bool (*irq_handler)(void *cb_ctx),
2773 int (*tl_handler)(void *cb_ctx),
2774 void *cb_ctx, int irq);
2775
2776 /**
2777 * hif_unregister_umac_reset_handler() - Unregister UMAC HW reset handler
2778 * @hif_scn: hif opaque handle
2779 *
2780 * Return: QDF_STATUS of operation
2781 */
2782 QDF_STATUS hif_unregister_umac_reset_handler(struct hif_opaque_softc *hif_scn);
2783 QDF_STATUS hif_get_umac_reset_irq(struct hif_opaque_softc *hif_scn,
2784 int *umac_reset_irq);
2785 #else
2786 static inline
hif_register_umac_reset_handler(struct hif_opaque_softc * hif_scn,bool (* irq_handler)(void * cb_ctx),int (* tl_handler)(void * cb_ctx),void * cb_ctx,int irq)2787 QDF_STATUS hif_register_umac_reset_handler(struct hif_opaque_softc *hif_scn,
2788 bool (*irq_handler)(void *cb_ctx),
2789 int (*tl_handler)(void *cb_ctx),
2790 void *cb_ctx, int irq)
2791 {
2792 return QDF_STATUS_SUCCESS;
2793 }
2794
2795 static inline
hif_unregister_umac_reset_handler(struct hif_opaque_softc * hif_scn)2796 QDF_STATUS hif_unregister_umac_reset_handler(struct hif_opaque_softc *hif_scn)
2797 {
2798 return QDF_STATUS_SUCCESS;
2799 }
2800
2801 static inline
hif_get_umac_reset_irq(struct hif_opaque_softc * hif_scn,int * umac_reset_irq)2802 QDF_STATUS hif_get_umac_reset_irq(struct hif_opaque_softc *hif_scn,
2803 int *umac_reset_irq)
2804 {
2805 return QDF_STATUS_SUCCESS;
2806 }
2807
2808 #endif /* DP_UMAC_HW_RESET_SUPPORT */
2809
2810 #ifdef FEATURE_DIRECT_LINK
2811 /**
2812 * hif_set_irq_config_by_ceid() - Set irq configuration for CE given by id
2813 * @scn: hif opaque handle
2814 * @ce_id: CE id
2815 * @addr: irq trigger address
2816 * @data: irq trigger data
2817 *
2818 * Return: QDF status
2819 */
2820 QDF_STATUS
2821 hif_set_irq_config_by_ceid(struct hif_opaque_softc *scn, uint8_t ce_id,
2822 uint64_t addr, uint32_t data);
2823
2824 /**
2825 * hif_get_direct_link_ce_dest_srng_buffers() - Get Direct Link ce dest srng
2826 * buffer information
2827 * @scn: hif opaque handle
2828 * @dma_addr: pointer to array of dma addresses
2829 * @buf_size: ce dest ring buffer size
2830 *
2831 * Return: Number of buffers attached to the dest srng.
2832 */
2833 uint16_t hif_get_direct_link_ce_dest_srng_buffers(struct hif_opaque_softc *scn,
2834 uint64_t **dma_addr,
2835 uint32_t *buf_size);
2836
2837 /**
2838 * hif_get_direct_link_ce_srng_info() - Get Direct Link CE srng information
2839 * @scn: hif opaque handle
2840 * @info: Direct Link CEs information
2841 * @max_ce_info_len: max array size of ce info
2842 *
2843 * Return: QDF status
2844 */
2845 QDF_STATUS
2846 hif_get_direct_link_ce_srng_info(struct hif_opaque_softc *scn,
2847 struct hif_direct_link_ce_info *info,
2848 uint8_t max_ce_info_len);
2849 #else
2850 static inline QDF_STATUS
hif_set_irq_config_by_ceid(struct hif_opaque_softc * scn,uint8_t ce_id,uint64_t addr,uint32_t data)2851 hif_set_irq_config_by_ceid(struct hif_opaque_softc *scn, uint8_t ce_id,
2852 uint64_t addr, uint32_t data)
2853 {
2854 return QDF_STATUS_SUCCESS;
2855 }
2856
2857 static inline
hif_get_direct_link_ce_dest_srng_buffers(struct hif_opaque_softc * scn,uint64_t ** dma_addr,uint32_t * buf_size)2858 uint16_t hif_get_direct_link_ce_dest_srng_buffers(struct hif_opaque_softc *scn,
2859 uint64_t **dma_addr,
2860 uint32_t *buf_size)
2861 {
2862 return 0;
2863 }
2864
2865 static inline QDF_STATUS
hif_get_direct_link_ce_srng_info(struct hif_opaque_softc * scn,struct hif_direct_link_ce_info * info,uint8_t max_ce_info_len)2866 hif_get_direct_link_ce_srng_info(struct hif_opaque_softc *scn,
2867 struct hif_direct_link_ce_info *info,
2868 uint8_t max_ce_info_len)
2869 {
2870 return QDF_STATUS_SUCCESS;
2871 }
2872 #endif
2873
2874 static inline QDF_STATUS
hif_irq_set_affinity_hint(int irq_num,qdf_cpu_mask * cpu_mask)2875 hif_irq_set_affinity_hint(int irq_num, qdf_cpu_mask *cpu_mask)
2876 {
2877 QDF_STATUS status;
2878
2879 qdf_dev_modify_irq_status(irq_num, IRQ_NO_BALANCING, 0);
2880 status = qdf_dev_set_irq_affinity(irq_num,
2881 (struct qdf_cpu_mask *)cpu_mask);
2882 qdf_dev_modify_irq_status(irq_num, 0, IRQ_NO_BALANCING);
2883
2884 return status;
2885 }
2886
2887 #ifdef WLAN_FEATURE_AFFINITY_MGR
2888 /**
2889 * hif_affinity_mgr_init_ce_irq() - Init for CE IRQ
2890 * @scn: hif opaque handle
2891 * @id: CE ID
2892 * @irq: IRQ assigned
2893 *
2894 * Return: None
2895 */
2896 void
2897 hif_affinity_mgr_init_ce_irq(struct hif_softc *scn, int id, int irq);
2898
2899 /**
2900 * hif_affinity_mgr_init_grp_irq() - Init for group IRQ
2901 * @scn: hif opaque handle
2902 * @grp_id: GRP ID
2903 * @irq_num: IRQ number of hif ext group
2904 * @irq: IRQ number assigned
2905 *
2906 * Return: None
2907 */
2908 void
2909 hif_affinity_mgr_init_grp_irq(struct hif_softc *scn, int grp_id,
2910 int irq_num, int irq);
2911
2912 /**
2913 * hif_affinity_mgr_set_qrg_irq_affinity() - Set affinity for group IRQ
2914 * @scn: hif opaque handle
2915 * @irq: IRQ assigned
2916 * @grp_id: GRP ID
2917 * @irq_index: IRQ number of hif ext group
2918 * @cpu_mask: reuquested cpu_mask for IRQ
2919 *
2920 * Return: status
2921 */
2922 QDF_STATUS
2923 hif_affinity_mgr_set_qrg_irq_affinity(struct hif_softc *scn, uint32_t irq,
2924 uint32_t grp_id, uint32_t irq_index,
2925 qdf_cpu_mask *cpu_mask);
2926
2927 /**
2928 * hif_affinity_mgr_set_ce_irq_affinity() - Set affinity for CE IRQ
2929 * @scn: hif opaque handle
2930 * @irq: IRQ assigned
2931 * @ce_id: CE ID
2932 * @cpu_mask: reuquested cpu_mask for IRQ
2933 *
2934 * Return: status
2935 */
2936 QDF_STATUS
2937 hif_affinity_mgr_set_ce_irq_affinity(struct hif_softc *scn, uint32_t irq,
2938 uint32_t ce_id, qdf_cpu_mask *cpu_mask);
2939
2940 /**
2941 * hif_affinity_mgr_affine_irq() - Affine CE and GRP IRQs
2942 * @scn: hif opaque handle
2943 *
2944 * Return: None
2945 */
2946 void hif_affinity_mgr_affine_irq(struct hif_softc *scn);
2947 #else
2948 static inline void
hif_affinity_mgr_init_ce_irq(struct hif_softc * scn,int id,int irq)2949 hif_affinity_mgr_init_ce_irq(struct hif_softc *scn, int id, int irq)
2950 {
2951 }
2952
2953 static inline void
hif_affinity_mgr_init_grp_irq(struct hif_softc * scn,int grp_id,int irq_num,int irq)2954 hif_affinity_mgr_init_grp_irq(struct hif_softc *scn, int grp_id, int irq_num,
2955 int irq)
2956 {
2957 }
2958
2959 static inline QDF_STATUS
hif_affinity_mgr_set_qrg_irq_affinity(struct hif_softc * scn,uint32_t irq,uint32_t grp_id,uint32_t irq_index,qdf_cpu_mask * cpu_mask)2960 hif_affinity_mgr_set_qrg_irq_affinity(struct hif_softc *scn, uint32_t irq,
2961 uint32_t grp_id, uint32_t irq_index,
2962 qdf_cpu_mask *cpu_mask)
2963 {
2964 return hif_irq_set_affinity_hint(irq, cpu_mask);
2965 }
2966
2967 static inline QDF_STATUS
hif_affinity_mgr_set_ce_irq_affinity(struct hif_softc * scn,uint32_t irq,uint32_t ce_id,qdf_cpu_mask * cpu_mask)2968 hif_affinity_mgr_set_ce_irq_affinity(struct hif_softc *scn, uint32_t irq,
2969 uint32_t ce_id, qdf_cpu_mask *cpu_mask)
2970 {
2971 return hif_irq_set_affinity_hint(irq, cpu_mask);
2972 }
2973
2974 static inline
hif_affinity_mgr_affine_irq(struct hif_softc * scn)2975 void hif_affinity_mgr_affine_irq(struct hif_softc *scn)
2976 {
2977 }
2978 #endif
2979
2980 /**
2981 * hif_affinity_mgr_set_affinity() - Affine CE and GRP IRQs
2982 * @scn: hif opaque handle
2983 *
2984 * Return: None
2985 */
2986 void hif_affinity_mgr_set_affinity(struct hif_opaque_softc *scn);
2987
2988 #ifdef FEATURE_HIF_DELAYED_REG_WRITE
2989 /**
2990 * hif_print_reg_write_stats() - Print hif delayed reg write stats
2991 * @hif_ctx: hif opaque handle
2992 *
2993 * Return: None
2994 */
2995 void hif_print_reg_write_stats(struct hif_opaque_softc *hif_ctx);
2996 #else
hif_print_reg_write_stats(struct hif_opaque_softc * hif_ctx)2997 static inline void hif_print_reg_write_stats(struct hif_opaque_softc *hif_ctx)
2998 {
2999 }
3000 #endif
3001 void hif_ce_print_ring_stats(struct hif_opaque_softc *hif_ctx);
3002 #endif /* _HIF_H_ */
3003