1 /*
2 * Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
3 * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for
6 * any purpose with or without fee is hereby granted, provided that the
7 * above copyright notice and this permission notice appear in all
8 * copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17 * PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 #ifndef __PLD_PCIE_H__
21 #define __PLD_PCIE_H__
22
23 #ifdef CONFIG_PLD_PCIE_CNSS
24 #ifdef CONFIG_CNSS_OUT_OF_TREE
25 #include "cnss2.h"
26 #else
27 #include <net/cnss2.h>
28 #endif
29 #endif
30 #include <linux/pci.h>
31 #include "pld_internal.h"
32
33 #ifdef DYNAMIC_SINGLE_CHIP
34 #define PREFIX DYNAMIC_SINGLE_CHIP "/"
35 #else
36
37 #ifdef MULTI_IF_NAME
38 #define PREFIX MULTI_IF_NAME "/"
39 #else
40 #define PREFIX ""
41 #endif
42
43 #endif
44
45 #if !defined(HIF_PCI) || defined(CONFIG_PLD_PCIE_FW_SIM)
pld_pcie_register_driver(void)46 static inline int pld_pcie_register_driver(void)
47 {
48 return 0;
49 }
50
pld_pcie_unregister_driver(void)51 static inline void pld_pcie_unregister_driver(void)
52 {
53 }
54
pld_pcie_get_ce_id(struct device * dev,int irq)55 static inline int pld_pcie_get_ce_id(struct device *dev, int irq)
56 {
57 return 0;
58 }
59 #else
60 /**
61 * pld_pcie_register_driver() - Register PCIE device callback functions
62 *
63 * Return: int
64 */
65 int pld_pcie_register_driver(void);
66
67 /**
68 * pld_pcie_unregister_driver() - Unregister PCIE device callback functions
69 *
70 * Return: void
71 */
72 void pld_pcie_unregister_driver(void);
73
74 /**
75 * pld_pcie_get_ce_id() - Get CE number for the provided IRQ
76 * @dev: device
77 * @irq: IRQ number
78 *
79 * Return: CE number
80 */
81 int pld_pcie_get_ce_id(struct device *dev, int irq);
82 #endif
83
84 #ifndef CONFIG_PLD_PCIE_CNSS
pld_pcie_wlan_enable(struct device * dev,struct pld_wlan_enable_cfg * config,enum pld_driver_mode mode,const char * host_version)85 static inline int pld_pcie_wlan_enable(struct device *dev,
86 struct pld_wlan_enable_cfg *config,
87 enum pld_driver_mode mode,
88 const char *host_version)
89 {
90 return 0;
91 }
92
pld_pcie_wlan_disable(struct device * dev,enum pld_driver_mode mode)93 static inline int pld_pcie_wlan_disable(struct device *dev,
94 enum pld_driver_mode mode)
95 {
96 return 0;
97 }
98
pld_pcie_wlan_hw_enable(void)99 static inline int pld_pcie_wlan_hw_enable(void)
100 {
101 return 0;
102 }
103
104 #else
105
106 /**
107 * pld_pcie_wlan_enable() - Enable WLAN
108 * @dev: device
109 * @config: WLAN configuration data
110 * @mode: WLAN mode
111 * @host_version: host software version
112 *
113 * This function enables WLAN FW. It passed WLAN configuration data,
114 * WLAN mode and host software version to FW.
115 *
116 * Return: 0 for success
117 * Non zero failure code for errors
118 */
119 int pld_pcie_wlan_enable(struct device *dev, struct pld_wlan_enable_cfg *config,
120 enum pld_driver_mode mode, const char *host_version);
121
122 /**
123 * pld_pcie_wlan_disable() - Disable WLAN
124 * @dev: device
125 * @mode: WLAN mode
126 *
127 * This function disables WLAN FW. It passes WLAN mode to FW.
128 *
129 * Return: 0 for success
130 * Non zero failure code for errors
131 */
132 int pld_pcie_wlan_disable(struct device *dev, enum pld_driver_mode mode);
133
134 #ifdef FEATURE_CNSS_HW_SECURE_DISABLE
pld_pcie_wlan_hw_enable(void)135 static inline int pld_pcie_wlan_hw_enable(void)
136 {
137 return cnss_wlan_hw_enable();
138 }
139 #else
pld_pcie_wlan_hw_enable(void)140 static inline int pld_pcie_wlan_hw_enable(void)
141 {
142 return -EINVAL;
143 }
144 #endif
145 #endif
146
147 #if defined(CONFIG_PLD_PCIE_CNSS)
pld_pcie_set_fw_log_mode(struct device * dev,u8 fw_log_mode)148 static inline int pld_pcie_set_fw_log_mode(struct device *dev, u8 fw_log_mode)
149 {
150 return cnss_set_fw_log_mode(dev, fw_log_mode);
151 }
152
pld_pcie_intr_notify_q6(struct device * dev)153 static inline void pld_pcie_intr_notify_q6(struct device *dev)
154 {
155 }
156 #else
pld_pcie_set_fw_log_mode(struct device * dev,u8 fw_log_mode)157 static inline int pld_pcie_set_fw_log_mode(struct device *dev, u8 fw_log_mode)
158 {
159 return 0;
160 }
161
pld_pcie_intr_notify_q6(struct device * dev)162 static inline void pld_pcie_intr_notify_q6(struct device *dev)
163 {
164 }
165 #endif
166
167 #if (!defined(CONFIG_PLD_PCIE_CNSS)) || (!defined(CONFIG_CNSS_SECURE_FW))
pld_pcie_get_sha_hash(struct device * dev,const u8 * data,u32 data_len,u8 * hash_idx,u8 * out)168 static inline int pld_pcie_get_sha_hash(struct device *dev, const u8 *data,
169 u32 data_len, u8 *hash_idx, u8 *out)
170 {
171 return 0;
172 }
173
pld_pcie_get_fw_ptr(struct device * dev)174 static inline void *pld_pcie_get_fw_ptr(struct device *dev)
175 {
176 return NULL;
177 }
178 #else
pld_pcie_get_sha_hash(struct device * dev,const u8 * data,u32 data_len,u8 * hash_idx,u8 * out)179 static inline int pld_pcie_get_sha_hash(struct device *dev, const u8 *data,
180 u32 data_len, u8 *hash_idx, u8 *out)
181 {
182 return cnss_get_sha_hash(data, data_len, hash_idx, out);
183 }
184
pld_pcie_get_fw_ptr(struct device * dev)185 static inline void *pld_pcie_get_fw_ptr(struct device *dev)
186 {
187 return cnss_get_fw_ptr();
188 }
189 #endif
190
191 #ifdef CONFIG_PLD_PCIE_CNSS
pld_pcie_wlan_pm_control(struct device * dev,bool vote)192 static inline int pld_pcie_wlan_pm_control(struct device *dev, bool vote)
193 {
194 return cnss_wlan_pm_control(dev, vote);
195 }
196 #else
pld_pcie_wlan_pm_control(struct device * dev,bool vote)197 static inline int pld_pcie_wlan_pm_control(struct device *dev, bool vote)
198 {
199 return 0;
200 }
201 #endif
202
203 #ifndef CONFIG_PLD_PCIE_CNSS
204 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0))
pld_pcie_smmu_get_domain(struct device * dev)205 static inline void *pld_pcie_smmu_get_domain(struct device *dev)
206 {
207 return NULL;
208 }
209 #else
pld_pcie_smmu_get_mapping(struct device * dev)210 static inline void *pld_pcie_smmu_get_mapping(struct device *dev)
211 {
212 return NULL;
213 }
214 #endif
215
216 static inline int
pld_pcie_smmu_map(struct device * dev,phys_addr_t paddr,uint32_t * iova_addr,size_t size)217 pld_pcie_smmu_map(struct device *dev,
218 phys_addr_t paddr, uint32_t *iova_addr, size_t size)
219 {
220 return 0;
221 }
222
223 static inline int
pld_pcie_smmu_unmap(struct device * dev,uint32_t iova_addr,size_t size)224 pld_pcie_smmu_unmap(struct device *dev, uint32_t iova_addr, size_t size)
225 {
226 return 0;
227 }
228
229 static inline int
pld_pcie_get_fw_files_for_target(struct device * dev,struct pld_fw_files * pfw_files,u32 target_type,u32 target_version)230 pld_pcie_get_fw_files_for_target(struct device *dev,
231 struct pld_fw_files *pfw_files,
232 u32 target_type, u32 target_version)
233 {
234 pld_get_default_fw_files(pfw_files);
235 return 0;
236 }
237
pld_pcie_prevent_l1(struct device * dev)238 static inline int pld_pcie_prevent_l1(struct device *dev)
239 {
240 return 0;
241 }
242
pld_pcie_allow_l1(struct device * dev)243 static inline void pld_pcie_allow_l1(struct device *dev)
244 {
245 }
246
pld_pcie_set_gen_speed(struct device * dev,u8 pcie_gen_speed)247 static inline int pld_pcie_set_gen_speed(struct device *dev, u8 pcie_gen_speed)
248 {
249 return 0;
250 }
251
252
pld_pcie_link_down(struct device * dev)253 static inline void pld_pcie_link_down(struct device *dev)
254 {
255 }
256
pld_pcie_get_reg_dump(struct device * dev,uint8_t * buf,uint32_t len)257 static inline int pld_pcie_get_reg_dump(struct device *dev, uint8_t *buf,
258 uint32_t len)
259 {
260 return 0;
261 }
262
pld_pcie_is_fw_down(struct device * dev)263 static inline int pld_pcie_is_fw_down(struct device *dev)
264 {
265 return 0;
266 }
267
pld_pcie_athdiag_read(struct device * dev,uint32_t offset,uint32_t memtype,uint32_t datalen,uint8_t * output)268 static inline int pld_pcie_athdiag_read(struct device *dev, uint32_t offset,
269 uint32_t memtype, uint32_t datalen,
270 uint8_t *output)
271 {
272 return 0;
273 }
274
pld_pcie_athdiag_write(struct device * dev,uint32_t offset,uint32_t memtype,uint32_t datalen,uint8_t * input)275 static inline int pld_pcie_athdiag_write(struct device *dev, uint32_t offset,
276 uint32_t memtype, uint32_t datalen,
277 uint8_t *input)
278 {
279 return 0;
280 }
281
282 static inline void
pld_pcie_schedule_recovery_work(struct device * dev,enum pld_recovery_reason reason)283 pld_pcie_schedule_recovery_work(struct device *dev,
284 enum pld_recovery_reason reason)
285 {
286 }
287
pld_pcie_get_virt_ramdump_mem(struct device * dev,unsigned long * size)288 static inline void *pld_pcie_get_virt_ramdump_mem(struct device *dev,
289 unsigned long *size)
290 {
291 size_t length = 0;
292 int flags = GFP_KERNEL;
293
294 length = TOTAL_DUMP_SIZE;
295
296 if (!size)
297 return NULL;
298
299 *size = (unsigned long)length;
300
301 if (in_interrupt() || irqs_disabled() || in_atomic())
302 flags = GFP_ATOMIC;
303
304 return kzalloc(length, flags);
305 }
306
pld_pcie_release_virt_ramdump_mem(void * address)307 static inline void pld_pcie_release_virt_ramdump_mem(void *address)
308 {
309 kfree(address);
310 }
311
pld_pcie_device_crashed(struct device * dev)312 static inline void pld_pcie_device_crashed(struct device *dev)
313 {
314 }
315
pld_pcie_device_self_recovery(struct device * dev,enum pld_recovery_reason reason)316 static inline void pld_pcie_device_self_recovery(struct device *dev,
317 enum pld_recovery_reason reason)
318 {
319 }
320
pld_pcie_request_pm_qos(struct device * dev,u32 qos_val)321 static inline void pld_pcie_request_pm_qos(struct device *dev, u32 qos_val)
322 {
323 }
324
pld_pcie_remove_pm_qos(struct device * dev)325 static inline void pld_pcie_remove_pm_qos(struct device *dev)
326 {
327 }
328
pld_pcie_set_tsf_sync_period(struct device * dev,u32 val)329 static inline void pld_pcie_set_tsf_sync_period(struct device *dev, u32 val)
330 {
331 }
332
pld_pcie_reset_tsf_sync_period(struct device * dev)333 static inline void pld_pcie_reset_tsf_sync_period(struct device *dev)
334 {
335 }
336
pld_pcie_request_bus_bandwidth(struct device * dev,int bandwidth)337 static inline int pld_pcie_request_bus_bandwidth(struct device *dev,
338 int bandwidth)
339 {
340 return 0;
341 }
342
pld_pcie_get_platform_cap(struct device * dev,struct pld_platform_cap * cap)343 static inline int pld_pcie_get_platform_cap(struct device *dev,
344 struct pld_platform_cap *cap)
345 {
346 return 0;
347 }
348
pld_pcie_get_soc_info(struct device * dev,struct pld_soc_info * info)349 static inline int pld_pcie_get_soc_info(struct device *dev,
350 struct pld_soc_info *info)
351 {
352 return 0;
353 }
354
pld_pcie_auto_suspend(struct device * dev)355 static inline int pld_pcie_auto_suspend(struct device *dev)
356 {
357 return 0;
358 }
359
pld_pcie_auto_resume(struct device * dev)360 static inline int pld_pcie_auto_resume(struct device *dev)
361 {
362 return 0;
363 }
364
pld_pcie_force_wake_request(struct device * dev)365 static inline int pld_pcie_force_wake_request(struct device *dev)
366 {
367 return 0;
368 }
369
pld_pcie_force_wake_request_sync(struct device * dev,int timeout_us)370 static inline int pld_pcie_force_wake_request_sync(struct device *dev,
371 int timeout_us)
372 {
373 return 0;
374 }
375
pld_pcie_is_device_awake(struct device * dev)376 static inline int pld_pcie_is_device_awake(struct device *dev)
377 {
378 return true;
379 }
380
pld_pcie_force_wake_release(struct device * dev)381 static inline int pld_pcie_force_wake_release(struct device *dev)
382 {
383 return 0;
384 }
385
pld_pcie_lock_reg_window(struct device * dev,unsigned long * flags)386 static inline void pld_pcie_lock_reg_window(struct device *dev,
387 unsigned long *flags)
388 {
389 }
390
pld_pcie_unlock_reg_window(struct device * dev,unsigned long * flags)391 static inline void pld_pcie_unlock_reg_window(struct device *dev,
392 unsigned long *flags)
393 {
394 }
395
pld_pcie_get_pci_slot(struct device * dev)396 static inline int pld_pcie_get_pci_slot(struct device *dev)
397 {
398 return 0;
399 }
400
pld_pcie_get_wifi_kobj(struct device * dev)401 static inline struct kobject *pld_pcie_get_wifi_kobj(struct device *dev)
402 {
403 return NULL;
404 }
405
pld_pcie_power_on(struct device * dev)406 static inline int pld_pcie_power_on(struct device *dev)
407 {
408 return 0;
409 }
410
pld_pcie_power_off(struct device * dev)411 static inline int pld_pcie_power_off(struct device *dev)
412 {
413 return 0;
414 }
415
pld_pcie_idle_restart(struct device * dev)416 static inline int pld_pcie_idle_restart(struct device *dev)
417 {
418 return 0;
419 }
420
pld_pcie_idle_shutdown(struct device * dev)421 static inline int pld_pcie_idle_shutdown(struct device *dev)
422 {
423 return 0;
424 }
425
pld_pcie_force_assert_target(struct device * dev)426 static inline int pld_pcie_force_assert_target(struct device *dev)
427 {
428 return -EINVAL;
429 }
430
pld_pcie_collect_rddm(struct device * dev)431 static inline int pld_pcie_collect_rddm(struct device *dev)
432 {
433 return 0;
434 }
435
pld_pcie_qmi_send_get(struct device * dev)436 static inline int pld_pcie_qmi_send_get(struct device *dev)
437 {
438 return 0;
439 }
440
pld_pcie_qmi_send_put(struct device * dev)441 static inline int pld_pcie_qmi_send_put(struct device *dev)
442 {
443 return 0;
444 }
445
446 static inline int
pld_pcie_qmi_send(struct device * dev,int type,void * cmd,int cmd_len,void * cb_ctx,int (* cb)(void * ctx,void * event,int event_len))447 pld_pcie_qmi_send(struct device *dev, int type, void *cmd,
448 int cmd_len, void *cb_ctx,
449 int (*cb)(void *ctx, void *event, int event_len))
450 {
451 return -EINVAL;
452 }
453
454 static inline int
pld_pcie_register_qmi_ind(struct device * dev,void * cb_ctx,int (* cb)(void * ctx,uint16_t type,void * event,int event_len))455 pld_pcie_register_qmi_ind(struct device *dev, void *cb_ctx,
456 int (*cb)(void *ctx, uint16_t type,
457 void *event, int event_len))
458 {
459 return -EINVAL;
460 }
461
pld_pcie_get_user_msi_assignment(struct device * dev,char * user_name,int * num_vectors,uint32_t * user_base_data,uint32_t * base_vector)462 static inline int pld_pcie_get_user_msi_assignment(struct device *dev,
463 char *user_name,
464 int *num_vectors,
465 uint32_t *user_base_data,
466 uint32_t *base_vector)
467 {
468 return -EINVAL;
469 }
470
pld_pcie_get_msi_irq(struct device * dev,unsigned int vector)471 static inline int pld_pcie_get_msi_irq(struct device *dev, unsigned int vector)
472 {
473 return 0;
474 }
475
pld_pcie_is_one_msi(struct device * dev)476 static inline bool pld_pcie_is_one_msi(struct device *dev)
477 {
478 return false;
479 }
480
pld_pcie_get_msi_address(struct device * dev,uint32_t * msi_addr_low,uint32_t * msi_addr_high)481 static inline void pld_pcie_get_msi_address(struct device *dev,
482 uint32_t *msi_addr_low,
483 uint32_t *msi_addr_high)
484 {
485 return;
486 }
487
pld_pcie_is_drv_connected(struct device * dev)488 static inline int pld_pcie_is_drv_connected(struct device *dev)
489 {
490 return 0;
491 }
492
pld_pcie_platform_driver_support(void)493 static inline bool pld_pcie_platform_driver_support(void)
494 {
495 return false;
496 }
497
pld_pcie_is_direct_link_supported(struct device * dev)498 static inline bool pld_pcie_is_direct_link_supported(struct device *dev)
499 {
500 return false;
501 }
502
503 static inline
pld_pcie_audio_smmu_map(struct device * dev,phys_addr_t paddr,dma_addr_t iova,size_t size)504 int pld_pcie_audio_smmu_map(struct device *dev, phys_addr_t paddr,
505 dma_addr_t iova, size_t size)
506 {
507 return 0;
508 }
509
510 static inline
pld_pcie_audio_smmu_unmap(struct device * dev,dma_addr_t iova,size_t size)511 void pld_pcie_audio_smmu_unmap(struct device *dev, dma_addr_t iova, size_t size)
512 {
513 }
514
pld_pcie_set_wfc_mode(struct device * dev,enum pld_wfc_mode wfc_mode)515 static inline int pld_pcie_set_wfc_mode(struct device *dev,
516 enum pld_wfc_mode wfc_mode)
517 {
518 return 0;
519 }
520
pld_pci_thermal_register(struct device * dev,unsigned long max_state,int mon_id)521 static inline int pld_pci_thermal_register(struct device *dev,
522 unsigned long max_state,
523 int mon_id)
524 {
525 return 0;
526 }
527
pld_pci_thermal_unregister(struct device * dev,int mon_id)528 static inline void pld_pci_thermal_unregister(struct device *dev,
529 int mon_id)
530 {
531 }
532
pld_pci_get_thermal_state(struct device * dev,unsigned long * thermal_state,int mon_id)533 static inline int pld_pci_get_thermal_state(struct device *dev,
534 unsigned long *thermal_state,
535 int mon_id)
536 {
537 return 0;
538 }
539
540 #else
541 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0))
542 int pld_pcie_set_wfc_mode(struct device *dev,
543 enum pld_wfc_mode wfc_mode);
544 #else
pld_pcie_set_wfc_mode(struct device * dev,enum pld_wfc_mode wfc_mode)545 static inline int pld_pcie_set_wfc_mode(struct device *dev,
546 enum pld_wfc_mode wfc_mode)
547 {
548 return 0;
549 }
550 #endif
551
552 /**
553 * pld_pcie_get_fw_files_for_target() - Get FW file names
554 * @dev: device
555 * @pfw_files: buffer for FW file names
556 * @target_type: target type
557 * @target_version: target version
558 *
559 * Return target specific FW file names to the buffer.
560 *
561 * Return: 0 for success
562 * Non zero failure code for errors
563 */
564 int pld_pcie_get_fw_files_for_target(struct device *dev,
565 struct pld_fw_files *pfw_files,
566 u32 target_type, u32 target_version);
567
568 /**
569 * pld_pcie_get_platform_cap() - Get platform capabilities
570 * @dev: device
571 * @cap: buffer to the capabilities
572 *
573 * Return capabilities to the buffer.
574 *
575 * Return: 0 for success
576 * Non zero failure code for errors
577 */
578 int pld_pcie_get_platform_cap(struct device *dev, struct pld_platform_cap *cap);
579
580 /**
581 * pld_pcie_get_soc_info() - Get SOC information
582 * @dev: device
583 * @info: buffer to SOC information
584 *
585 * Return SOC info to the buffer.
586 *
587 * Return: 0 for success
588 * Non zero failure code for errors
589 */
590 int pld_pcie_get_soc_info(struct device *dev, struct pld_soc_info *info);
591
592 /**
593 * pld_pcie_schedule_recovery_work() - schedule recovery work
594 * @dev: device
595 * @reason: recovery reason
596 *
597 * Return: void
598 */
599 void pld_pcie_schedule_recovery_work(struct device *dev,
600 enum pld_recovery_reason reason);
601
602 /**
603 * pld_pcie_device_self_recovery() - device self recovery
604 * @dev: device
605 * @reason: recovery reason
606 *
607 * Return: void
608 */
609 void pld_pcie_device_self_recovery(struct device *dev,
610 enum pld_recovery_reason reason);
pld_pcie_collect_rddm(struct device * dev)611 static inline int pld_pcie_collect_rddm(struct device *dev)
612 {
613 return cnss_force_collect_rddm(dev);
614 }
615
pld_pcie_qmi_send_get(struct device * dev)616 static inline int pld_pcie_qmi_send_get(struct device *dev)
617 {
618 return cnss_qmi_send_get(dev);
619 }
620
pld_pcie_qmi_send_put(struct device * dev)621 static inline int pld_pcie_qmi_send_put(struct device *dev)
622 {
623 return cnss_qmi_send_put(dev);
624 }
625
626 static inline int
pld_pcie_qmi_send(struct device * dev,int type,void * cmd,int cmd_len,void * cb_ctx,int (* cb)(void * ctx,void * event,int event_len))627 pld_pcie_qmi_send(struct device *dev, int type, void *cmd,
628 int cmd_len, void *cb_ctx,
629 int (*cb)(void *ctx, void *event, int event_len))
630 {
631 return cnss_qmi_send(dev, type, cmd, cmd_len, cb_ctx, cb);
632 }
633
634 #if defined(WLAN_CHIPSET_STATS) && \
635 (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
636 static inline int
pld_pcie_register_qmi_ind(struct device * dev,void * cb_ctx,int (* cb)(void * ctx,uint16_t type,void * event,int event_len))637 pld_pcie_register_qmi_ind(struct device *dev, void *cb_ctx,
638 int (*cb)(void *ctx, uint16_t type,
639 void *event, int event_len))
640 {
641 return cnss_register_driver_async_data_cb(dev, cb_ctx, cb);
642 }
643 #else
644 static inline int
pld_pcie_register_qmi_ind(struct device * dev,void * cb_ctx,int (* cb)(void * ctx,uint16_t type,void * event,int event_len))645 pld_pcie_register_qmi_ind(struct device *dev, void *cb_ctx,
646 int (*cb)(void *ctx, uint16_t type,
647 void *event, int event_len))
648 {
649 return 0;
650 }
651 #endif
652
653 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0))
pld_pcie_smmu_get_domain(struct device * dev)654 static inline void *pld_pcie_smmu_get_domain(struct device *dev)
655 {
656 return cnss_smmu_get_domain(dev);
657 }
658 #else
pld_pcie_smmu_get_mapping(struct device * dev)659 static inline void *pld_pcie_smmu_get_mapping(struct device *dev)
660 {
661 return cnss_smmu_get_mapping(dev);
662 }
663 #endif
664
665 static inline int
pld_pcie_smmu_map(struct device * dev,phys_addr_t paddr,uint32_t * iova_addr,size_t size)666 pld_pcie_smmu_map(struct device *dev,
667 phys_addr_t paddr, uint32_t *iova_addr, size_t size)
668 {
669 return cnss_smmu_map(dev, paddr, iova_addr, size);
670 }
671
672 #ifdef CONFIG_SMMU_S1_UNMAP
673 static inline int
pld_pcie_smmu_unmap(struct device * dev,uint32_t iova_addr,size_t size)674 pld_pcie_smmu_unmap(struct device *dev, uint32_t iova_addr, size_t size)
675 {
676 return cnss_smmu_unmap(dev, iova_addr, size);
677 }
678 #else /* !CONFIG_SMMU_S1_UNMAP */
679 static inline int
pld_pcie_smmu_unmap(struct device * dev,uint32_t iova_addr,size_t size)680 pld_pcie_smmu_unmap(struct device *dev, uint32_t iova_addr, size_t size)
681 {
682 return 0;
683 }
684 #endif /* CONFIG_SMMU_S1_UNMAP */
685
pld_pcie_prevent_l1(struct device * dev)686 static inline int pld_pcie_prevent_l1(struct device *dev)
687 {
688 return cnss_pci_prevent_l1(dev);
689 }
690
pld_pcie_allow_l1(struct device * dev)691 static inline void pld_pcie_allow_l1(struct device *dev)
692 {
693 cnss_pci_allow_l1(dev);
694 }
695
696 #ifdef PCIE_GEN_SWITCH
697 /**
698 * pld_pcie_set_gen_speed() - Wrapper for platform API to set PCIE gen speed
699 * @dev: device
700 * @pcie_gen_speed: PCIE gen speed required
701 *
702 * Send required PCIE Gen speed to platform driver
703 *
704 * Return: 0 for success. Negative error codes.
705 */
pld_pcie_set_gen_speed(struct device * dev,u8 pcie_gen_speed)706 static inline int pld_pcie_set_gen_speed(struct device *dev, u8 pcie_gen_speed)
707 {
708 return cnss_set_pcie_gen_speed(dev, pcie_gen_speed);
709 }
710 #else
pld_pcie_set_gen_speed(struct device * dev,u8 pcie_gen_speed)711 static inline int pld_pcie_set_gen_speed(struct device *dev, u8 pcie_gen_speed)
712 {
713 return 0;
714 }
715 #endif
716
pld_pcie_link_down(struct device * dev)717 static inline void pld_pcie_link_down(struct device *dev)
718 {
719 cnss_pci_link_down(dev);
720 }
721
722 #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0)) && \
723 (LINUX_VERSION_CODE < KERNEL_VERSION(5, 11, 0)))
pld_pcie_get_reg_dump(struct device * dev,uint8_t * buf,uint32_t len)724 static inline int pld_pcie_get_reg_dump(struct device *dev, uint8_t *buf,
725 uint32_t len)
726 {
727 return cnss_pci_get_reg_dump(dev, buf, len);
728 }
729 #else
pld_pcie_get_reg_dump(struct device * dev,uint8_t * buf,uint32_t len)730 static inline int pld_pcie_get_reg_dump(struct device *dev, uint8_t *buf,
731 uint32_t len)
732 {
733 return 0;
734 }
735 #endif
736
pld_pcie_is_fw_down(struct device * dev)737 static inline int pld_pcie_is_fw_down(struct device *dev)
738 {
739 return cnss_pci_is_device_down(dev);
740 }
741
pld_pcie_athdiag_read(struct device * dev,uint32_t offset,uint32_t memtype,uint32_t datalen,uint8_t * output)742 static inline int pld_pcie_athdiag_read(struct device *dev, uint32_t offset,
743 uint32_t memtype, uint32_t datalen,
744 uint8_t *output)
745 {
746 return cnss_athdiag_read(dev, offset, memtype, datalen, output);
747 }
748
pld_pcie_athdiag_write(struct device * dev,uint32_t offset,uint32_t memtype,uint32_t datalen,uint8_t * input)749 static inline int pld_pcie_athdiag_write(struct device *dev, uint32_t offset,
750 uint32_t memtype, uint32_t datalen,
751 uint8_t *input)
752 {
753 return cnss_athdiag_write(dev, offset, memtype, datalen, input);
754 }
755
pld_pcie_get_virt_ramdump_mem(struct device * dev,unsigned long * size)756 static inline void *pld_pcie_get_virt_ramdump_mem(struct device *dev,
757 unsigned long *size)
758 {
759 return cnss_get_virt_ramdump_mem(dev, size);
760 }
761
pld_pcie_release_virt_ramdump_mem(void * address)762 static inline void pld_pcie_release_virt_ramdump_mem(void *address)
763 {
764 }
765
pld_pcie_device_crashed(struct device * dev)766 static inline void pld_pcie_device_crashed(struct device *dev)
767 {
768 cnss_device_crashed(dev);
769 }
770
pld_pcie_request_pm_qos(struct device * dev,u32 qos_val)771 static inline void pld_pcie_request_pm_qos(struct device *dev, u32 qos_val)
772 {
773 cnss_request_pm_qos(dev, qos_val);
774 }
775
pld_pcie_remove_pm_qos(struct device * dev)776 static inline void pld_pcie_remove_pm_qos(struct device *dev)
777 {
778 cnss_remove_pm_qos(dev);
779 }
780
781 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0))
pld_pcie_set_tsf_sync_period(struct device * dev,u32 val)782 static inline void pld_pcie_set_tsf_sync_period(struct device *dev, u32 val)
783 {
784 cnss_update_time_sync_period(dev, val);
785 }
786
pld_pcie_reset_tsf_sync_period(struct device * dev)787 static inline void pld_pcie_reset_tsf_sync_period(struct device *dev)
788 {
789 cnss_reset_time_sync_period(dev);
790 }
791 #else
pld_pcie_set_tsf_sync_period(struct device * dev,u32 val)792 static inline void pld_pcie_set_tsf_sync_period(struct device *dev, u32 val)
793 {
794 }
795
pld_pcie_reset_tsf_sync_period(struct device * dev)796 static inline void pld_pcie_reset_tsf_sync_period(struct device *dev)
797 {
798 }
799 #endif
pld_pcie_request_bus_bandwidth(struct device * dev,int bandwidth)800 static inline int pld_pcie_request_bus_bandwidth(struct device *dev,
801 int bandwidth)
802 {
803 return cnss_request_bus_bandwidth(dev, bandwidth);
804 }
805
pld_pcie_auto_suspend(struct device * dev)806 static inline int pld_pcie_auto_suspend(struct device *dev)
807 {
808 return cnss_auto_suspend(dev);
809 }
810
pld_pcie_auto_resume(struct device * dev)811 static inline int pld_pcie_auto_resume(struct device *dev)
812 {
813 return cnss_auto_resume(dev);
814 }
815
pld_pcie_force_wake_request(struct device * dev)816 static inline int pld_pcie_force_wake_request(struct device *dev)
817 {
818 return cnss_pci_force_wake_request(dev);
819 }
820
pld_pcie_force_wake_request_sync(struct device * dev,int timeout_us)821 static inline int pld_pcie_force_wake_request_sync(struct device *dev,
822 int timeout_us)
823 {
824 return cnss_pci_force_wake_request_sync(dev, timeout_us);
825 }
826
pld_pcie_is_device_awake(struct device * dev)827 static inline int pld_pcie_is_device_awake(struct device *dev)
828 {
829 return cnss_pci_is_device_awake(dev);
830 }
831
pld_pcie_force_wake_release(struct device * dev)832 static inline int pld_pcie_force_wake_release(struct device *dev)
833 {
834 return cnss_pci_force_wake_release(dev);
835 }
836
pld_pcie_lock_reg_window(struct device * dev,unsigned long * flags)837 static inline void pld_pcie_lock_reg_window(struct device *dev,
838 unsigned long *flags)
839 {
840 cnss_pci_lock_reg_window(dev, flags);
841 }
842
pld_pcie_unlock_reg_window(struct device * dev,unsigned long * flags)843 static inline void pld_pcie_unlock_reg_window(struct device *dev,
844 unsigned long *flags)
845 {
846 cnss_pci_unlock_reg_window(dev, flags);
847 }
848
849 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0))
pld_pcie_get_pci_slot(struct device * dev)850 static inline int pld_pcie_get_pci_slot(struct device *dev)
851 {
852 return cnss_get_pci_slot(dev);
853 }
854 #else
pld_pcie_get_pci_slot(struct device * dev)855 static inline int pld_pcie_get_pci_slot(struct device *dev)
856 {
857 return 0;
858 }
859 #endif
860
861 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
pld_pcie_get_wifi_kobj(struct device * dev)862 static inline struct kobject *pld_pcie_get_wifi_kobj(struct device *dev)
863 {
864 return cnss_get_wifi_kobj(dev);
865 }
866 #else
pld_pcie_get_wifi_kobj(struct device * dev)867 static inline struct kobject *pld_pcie_get_wifi_kobj(struct device *dev)
868 {
869 return NULL;
870 }
871 #endif
872
pld_pcie_power_on(struct device * dev)873 static inline int pld_pcie_power_on(struct device *dev)
874 {
875 return cnss_power_up(dev);
876 }
877
pld_pcie_power_off(struct device * dev)878 static inline int pld_pcie_power_off(struct device *dev)
879 {
880 return cnss_power_down(dev);
881 }
882
pld_pcie_idle_restart(struct device * dev)883 static inline int pld_pcie_idle_restart(struct device *dev)
884 {
885 return cnss_idle_restart(dev);
886 }
887
pld_pcie_idle_shutdown(struct device * dev)888 static inline int pld_pcie_idle_shutdown(struct device *dev)
889 {
890 return cnss_idle_shutdown(dev);
891 }
892
pld_pcie_force_assert_target(struct device * dev)893 static inline int pld_pcie_force_assert_target(struct device *dev)
894 {
895 return cnss_force_fw_assert(dev);
896 }
897
pld_pcie_get_user_msi_assignment(struct device * dev,char * user_name,int * num_vectors,uint32_t * user_base_data,uint32_t * base_vector)898 static inline int pld_pcie_get_user_msi_assignment(struct device *dev,
899 char *user_name,
900 int *num_vectors,
901 uint32_t *user_base_data,
902 uint32_t *base_vector)
903 {
904 return cnss_get_user_msi_assignment(dev, user_name, num_vectors,
905 user_base_data, base_vector);
906 }
907
pld_pcie_get_msi_irq(struct device * dev,unsigned int vector)908 static inline int pld_pcie_get_msi_irq(struct device *dev, unsigned int vector)
909 {
910 return cnss_get_msi_irq(dev, vector);
911 }
912
913 #ifdef WLAN_ONE_MSI_VECTOR
pld_pcie_is_one_msi(struct device * dev)914 static inline bool pld_pcie_is_one_msi(struct device *dev)
915 {
916 return cnss_is_one_msi(dev);
917 }
918 #else
pld_pcie_is_one_msi(struct device * dev)919 static inline bool pld_pcie_is_one_msi(struct device *dev)
920 {
921 return false;
922 }
923 #endif
924
pld_pcie_get_msi_address(struct device * dev,uint32_t * msi_addr_low,uint32_t * msi_addr_high)925 static inline void pld_pcie_get_msi_address(struct device *dev,
926 uint32_t *msi_addr_low,
927 uint32_t *msi_addr_high)
928 {
929 cnss_get_msi_address(dev, msi_addr_low, msi_addr_high);
930 }
931
pld_pcie_is_drv_connected(struct device * dev)932 static inline int pld_pcie_is_drv_connected(struct device *dev)
933 {
934 return cnss_pci_is_drv_connected(dev);
935 }
936
pld_pcie_platform_driver_support(void)937 static inline bool pld_pcie_platform_driver_support(void)
938 {
939 return true;
940 }
941
pld_pci_thermal_register(struct device * dev,unsigned long max_state,int mon_id)942 static inline int pld_pci_thermal_register(struct device *dev,
943 unsigned long max_state,
944 int mon_id)
945 {
946 return cnss_thermal_cdev_register(dev, max_state, mon_id);
947 }
948
pld_pci_thermal_unregister(struct device * dev,int mon_id)949 static inline void pld_pci_thermal_unregister(struct device *dev,
950 int mon_id)
951 {
952 cnss_thermal_cdev_unregister(dev, mon_id);
953 }
954
pld_pci_get_thermal_state(struct device * dev,unsigned long * thermal_state,int mon_id)955 static inline int pld_pci_get_thermal_state(struct device *dev,
956 unsigned long *thermal_state,
957 int mon_id)
958 {
959 return cnss_get_curr_therm_cdev_state(dev, thermal_state, mon_id);
960 }
961
962 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
pld_pcie_is_direct_link_supported(struct device * dev)963 static inline bool pld_pcie_is_direct_link_supported(struct device *dev)
964 {
965 return cnss_get_fw_cap(dev, CNSS_FW_CAP_DIRECT_LINK_SUPPORT);
966 }
967
968 static inline
pld_pcie_audio_smmu_map(struct device * dev,phys_addr_t paddr,dma_addr_t iova,size_t size)969 int pld_pcie_audio_smmu_map(struct device *dev, phys_addr_t paddr,
970 dma_addr_t iova, size_t size)
971 {
972 return cnss_audio_smmu_map(dev, paddr, iova, size);
973 }
974
975 static inline
pld_pcie_audio_smmu_unmap(struct device * dev,dma_addr_t iova,size_t size)976 void pld_pcie_audio_smmu_unmap(struct device *dev, dma_addr_t iova, size_t size)
977 {
978 cnss_audio_smmu_unmap(dev, iova, size);
979 }
980 #else
pld_pcie_is_direct_link_supported(struct device * dev)981 static inline bool pld_pcie_is_direct_link_supported(struct device *dev)
982 {
983 return false;
984 }
985
986 static inline
pld_pcie_audio_smmu_map(struct device * dev,phys_addr_t paddr,dma_addr_t iova,size_t size)987 int pld_pcie_audio_smmu_map(struct device *dev, phys_addr_t paddr,
988 dma_addr_t iova, size_t size)
989 {
990 return 0;
991 }
992
993 static inline
pld_pcie_audio_smmu_unmap(struct device * dev,dma_addr_t iova,size_t size)994 void pld_pcie_audio_smmu_unmap(struct device *dev, dma_addr_t iova, size_t size)
995 {
996 }
997 #endif
998 #endif
999 #endif
1000