1 /*
2 * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
3 * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for
6 * any purpose with or without fee is hereby granted, provided that the
7 * above copyright notice and this permission notice appear in all
8 * copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17 * PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 /**
21 * DOC: i_qdf_mem.h
22 * Linux-specific definitions for QDF memory API's
23 */
24
25 #ifndef __I_QDF_MEM_H
26 #define __I_QDF_MEM_H
27
28 #ifdef __KERNEL__
29 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 17)
30 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)
31 #include <linux/autoconf.h>
32 #else
33 #include <generated/autoconf.h>
34 #endif
35 #endif
36 #include <linux/slab.h>
37 #include <linux/hardirq.h>
38 #include <linux/vmalloc.h>
39 #include <linux/pci.h> /* pci_alloc_consistent */
40 #include <linux/cache.h> /* L1_CACHE_BYTES */
41
42 #define __qdf_cache_line_sz L1_CACHE_BYTES
43 #include "queue.h"
44
45 #else
46 /*
47 * Provide dummy defs for kernel data types, functions, and enums
48 * used in this header file.
49 */
50 #define GFP_KERNEL 0
51 #define GFP_ATOMIC 0
52 #define __GFP_KSWAPD_RECLAIM 0
53 #define __GFP_DIRECT_RECLAIM 0
54 #define kzalloc(size, flags) NULL
55 #define vmalloc(size) NULL
56 #define kfree(buf)
57 #define vfree(buf)
58 #define pci_alloc_consistent(dev, size, paddr) NULL
59 #define __qdf_mempool_t void*
60 #define QDF_RET_IP NULL
61 #endif /* __KERNEL__ */
62 #include <qdf_status.h>
63 #if (defined(__ANDROID_COMMON_KERNEL__) && \
64 (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 9, 0)) && \
65 (defined(MSM_PLATFORM) || defined(QCA_IPA_LL_TX_FLOW_CONTROL)))
66 #include <linux/qcom-iommu-util.h>
67 #endif
68
69 #if IS_ENABLED(CONFIG_ARM_SMMU)
70 #include <pld_common.h>
71 #ifdef ENABLE_SMMU_S1_TRANSLATION
72 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0))
73 #include <asm/dma-iommu.h>
74 #endif
75 #endif
76 #include <linux/iommu.h>
77 #endif
78
79 #ifdef __KERNEL__
80 typedef struct mempool_elem {
81 STAILQ_ENTRY(mempool_elem) mempool_entry;
82 } mempool_elem_t;
83
84 /**
85 * typedef __qdf_mempool_ctxt_t - Memory pool context
86 * @pool_id: pool identifier
87 * @flags: flags
88 * @elem_size: size of each pool element in bytes
89 * @pool_mem: pool_addr address of the pool created
90 * @mem_size: Total size of the pool in bytes
91 * @free_list: free pool list
92 * @lock: spinlock object
93 * @max_elem: Maximum number of elements in the pool
94 * @free_cnt: Number of free elements available
95 */
96 typedef struct __qdf_mempool_ctxt {
97 int pool_id;
98 u_int32_t flags;
99 size_t elem_size;
100 void *pool_mem;
101 u_int32_t mem_size;
102
103 STAILQ_HEAD(, mempool_elem) free_list;
104 spinlock_t lock;
105 u_int32_t max_elem;
106 u_int32_t free_cnt;
107 } __qdf_mempool_ctxt_t;
108
109 typedef struct kmem_cache *qdf_kmem_cache_t;
110 #endif /* __KERNEL__ */
111
112 #define __page_size ((size_t)PAGE_SIZE)
113 #define __qdf_align(a, mask) ALIGN(a, mask)
114
115 #ifdef DISABLE_MEMDEBUG_PANIC
116 #define QDF_MEMDEBUG_PANIC(reason_fmt, args...) \
117 do { \
118 /* no-op */ \
119 } while (false)
120 #else
121 #define QDF_MEMDEBUG_PANIC(reason_fmt, args...) \
122 QDF_DEBUG_PANIC(reason_fmt, ## args)
123 #endif
124
125 /**
126 * typedef __dma_data_direction - typedef for dma_data_direction
127 */
128 typedef enum dma_data_direction __dma_data_direction;
129
130 /**
131 * __qdf_dma_dir_to_os() - Convert DMA data direction to OS specific enum
132 * @qdf_dir: QDF DMA data direction
133 *
134 * Return:
135 * enum dma_data_direction
136 */
137 static inline
__qdf_dma_dir_to_os(qdf_dma_dir_t qdf_dir)138 enum dma_data_direction __qdf_dma_dir_to_os(qdf_dma_dir_t qdf_dir)
139 {
140 switch (qdf_dir) {
141 case QDF_DMA_BIDIRECTIONAL:
142 return DMA_BIDIRECTIONAL;
143 case QDF_DMA_TO_DEVICE:
144 return DMA_TO_DEVICE;
145 case QDF_DMA_FROM_DEVICE:
146 return DMA_FROM_DEVICE;
147 default:
148 return DMA_NONE;
149 }
150 }
151
152
153 /**
154 * __qdf_mem_map_nbytes_single - Map memory for DMA
155 * @osdev: pomter OS device context
156 * @buf: pointer to memory to be dma mapped
157 * @dir: DMA map direction
158 * @nbytes: number of bytes to be mapped.
159 * @phy_addr: pointer to receive physical address.
160 *
161 * Return: success/failure
162 */
__qdf_mem_map_nbytes_single(qdf_device_t osdev,void * buf,qdf_dma_dir_t dir,int nbytes,qdf_dma_addr_t * phy_addr)163 static inline uint32_t __qdf_mem_map_nbytes_single(qdf_device_t osdev,
164 void *buf, qdf_dma_dir_t dir,
165 int nbytes,
166 qdf_dma_addr_t *phy_addr)
167 {
168 /* assume that the OS only provides a single fragment */
169 *phy_addr = dma_map_single(osdev->dev, buf, nbytes,
170 __qdf_dma_dir_to_os(dir));
171 return dma_mapping_error(osdev->dev, *phy_addr) ?
172 QDF_STATUS_E_FAILURE : QDF_STATUS_SUCCESS;
173 }
174
175 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
__qdf_mem_dma_cache_sync(qdf_device_t osdev,qdf_dma_addr_t buf,qdf_dma_dir_t dir,int nbytes)176 static inline void __qdf_mem_dma_cache_sync(qdf_device_t osdev,
177 qdf_dma_addr_t buf,
178 qdf_dma_dir_t dir,
179 int nbytes)
180 {
181 dma_cache_sync(osdev->dev, buf, nbytes, __qdf_dma_dir_to_os(dir));
182 }
183 #else
__qdf_mem_dma_cache_sync(qdf_device_t osdev,qdf_dma_addr_t buf,qdf_dma_dir_t dir,int nbytes)184 static inline void __qdf_mem_dma_cache_sync(qdf_device_t osdev,
185 qdf_dma_addr_t buf,
186 qdf_dma_dir_t dir,
187 int nbytes)
188 {
189 dma_sync_single_for_cpu(osdev->dev, buf, nbytes,
190 __qdf_dma_dir_to_os(dir));
191 }
192 #endif
193
194 /**
195 * __qdf_mem_unmap_nbytes_single() - un_map memory for DMA
196 *
197 * @osdev: pomter OS device context
198 * @phy_addr: physical address of memory to be dma unmapped
199 * @dir: DMA unmap direction
200 * @nbytes: number of bytes to be unmapped.
201 *
202 * Return - none
203 */
__qdf_mem_unmap_nbytes_single(qdf_device_t osdev,qdf_dma_addr_t phy_addr,qdf_dma_dir_t dir,int nbytes)204 static inline void __qdf_mem_unmap_nbytes_single(qdf_device_t osdev,
205 qdf_dma_addr_t phy_addr,
206 qdf_dma_dir_t dir, int nbytes)
207 {
208 dma_unmap_single(osdev->dev, phy_addr, nbytes,
209 __qdf_dma_dir_to_os(dir));
210 }
211 #ifdef __KERNEL__
212
213 typedef __qdf_mempool_ctxt_t *__qdf_mempool_t;
214
215 /**
216 * __qdf_mempool_init() - Create and initialize memory pool
217 * @osdev: platform device object
218 * @pool_addr: address of the pool created
219 * @elem_cnt: no. of elements in pool
220 * @elem_size: size of each pool element in bytes
221 * @flags: flags
222 *
223 * Return: Handle to memory pool or NULL if allocation failed
224 */
225 int __qdf_mempool_init(qdf_device_t osdev, __qdf_mempool_t *pool_addr,
226 int elem_cnt, size_t elem_size, u_int32_t flags);
227
228 /**
229 * __qdf_mempool_destroy() - Destroy memory pool
230 * @osdev: platform device object
231 * @pool: memory pool
232 *
233 * Returns: none
234 */
235 void __qdf_mempool_destroy(qdf_device_t osdev, __qdf_mempool_t pool);
236
237 /**
238 * __qdf_mempool_alloc() - Allocate an element memory pool
239 * @osdev: platform device object
240 * @pool: to memory pool
241 *
242 * Return: Pointer to the allocated element or NULL if the pool is empty
243 */
244 void *__qdf_mempool_alloc(qdf_device_t osdev, __qdf_mempool_t pool);
245
246 /**
247 * __qdf_mempool_free() - Free a memory pool element
248 * @osdev: Platform device object
249 * @pool: Handle to memory pool
250 * @buf: Element to be freed
251 *
252 * Return: none
253 */
254 void __qdf_mempool_free(qdf_device_t osdev, __qdf_mempool_t pool, void *buf);
255
256 /**
257 * __qdf_kmem_cache_create() - OS abstraction for cache creation
258 * @cache_name: Cache name
259 * @size: Size of the object to be created
260 *
261 * Return: Cache address on successful creation, else NULL
262 */
263 qdf_kmem_cache_t __qdf_kmem_cache_create(const char *cache_name,
264 qdf_size_t size);
265
266 /**
267 * __qdf_kmem_cache_destroy() - OS abstraction for cache destruction
268 * @cache: Cache pointer
269 *
270 * Return: void
271 */
272 void __qdf_kmem_cache_destroy(qdf_kmem_cache_t cache);
273
274 /**
275 * __qdf_kmem_cache_alloc() - Function to allocation object from a cache
276 * @cache: Cache address
277 *
278 * Return: Object from cache
279 *
280 */
281 void *__qdf_kmem_cache_alloc(qdf_kmem_cache_t cache);
282
283 /**
284 * __qdf_kmem_cache_free() - Function to free cache object
285 * @cache: Cache address
286 * @node: Object to be returned to cache
287 *
288 * Return: void
289 */
290 void __qdf_kmem_cache_free(qdf_kmem_cache_t cache, void *node);
291
292 #define QDF_RET_IP ((void *)_RET_IP_)
293
294 #define __qdf_mempool_elem_size(_pool) ((_pool)->elem_size)
295 #endif
296
297 /**
298 * __qdf_ioremap() - map bus memory into cpu space
299 * @HOST_CE_ADDRESS: bus address of the memory
300 * @HOST_CE_SIZE: memory size to map
301 */
302 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0))
303 #define __qdf_ioremap(HOST_CE_ADDRESS, HOST_CE_SIZE) \
304 ioremap(HOST_CE_ADDRESS, HOST_CE_SIZE)
305 #else
306 #define __qdf_ioremap(HOST_CE_ADDRESS, HOST_CE_SIZE) \
307 ioremap_nocache(HOST_CE_ADDRESS, HOST_CE_SIZE)
308 #endif
309
310 /**
311 * __qdf_mem_smmu_s1_enabled() - Return SMMU stage 1 translation enable status
312 * @osdev: parent device instance
313 *
314 * Return: true if smmu s1 enabled, false if smmu s1 is bypassed
315 */
__qdf_mem_smmu_s1_enabled(qdf_device_t osdev)316 static inline bool __qdf_mem_smmu_s1_enabled(qdf_device_t osdev)
317 {
318 return osdev->smmu_s1_enabled;
319 }
320
321 #if IS_ENABLED(CONFIG_ARM_SMMU) && defined(ENABLE_SMMU_S1_TRANSLATION)
322 /**
323 * typedef __qdf_iommu_domain_t - abstraction for struct iommu_domain
324 */
325 typedef struct iommu_domain __qdf_iommu_domain_t;
326
327 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 13, 0))
328 #if IS_ENABLED(CONFIG_QCOM_IOMMU_UTIL)
329 /**
330 * __qdf_iommu_attr_to_os() - Convert qdf iommu attribute to OS mapping
331 * configurations bitmap
332 * @attr: QDF iommu attribute
333 *
334 * Return: IOMMU mapping configuration bitmaps
335 */
__qdf_iommu_attr_to_os(enum qdf_iommu_attr attr)336 static inline int __qdf_iommu_attr_to_os(enum qdf_iommu_attr attr)
337 {
338 switch (attr) {
339 case QDF_DOMAIN_ATTR_S1_BYPASS:
340 return QCOM_IOMMU_MAPPING_CONF_S1_BYPASS;
341 case QDF_DOMAIN_ATTR_ATOMIC:
342 return QCOM_IOMMU_MAPPING_CONF_ATOMIC;
343 case QDF_DOMAIN_ATTR_FAST:
344 return QCOM_IOMMU_MAPPING_CONF_FAST;
345 default:
346 return -EINVAL;
347 }
348 }
349
350 /**
351 * __qdf_iommu_domain_get_attr() - API to get iommu domain attributes
352 *
353 * @domain: iommu domain
354 * @attr: iommu attribute
355 * @data: data pointer
356 *
357 * Return: 0 for success, and negative values otherwise
358 */
359 static inline int
__qdf_iommu_domain_get_attr(__qdf_iommu_domain_t * domain,enum qdf_iommu_attr attr,void * data)360 __qdf_iommu_domain_get_attr(__qdf_iommu_domain_t *domain,
361 enum qdf_iommu_attr attr, void *data)
362 {
363 int mapping_config;
364 int mapping_bitmap;
365 int *value;
366
367 mapping_bitmap = __qdf_iommu_attr_to_os(attr);
368 if (mapping_bitmap < 0)
369 return -EINVAL;
370
371 mapping_config = qcom_iommu_get_mappings_configuration(domain);
372 if (mapping_config < 0)
373 return -EINVAL;
374
375 value = data;
376 *value = (mapping_config & mapping_bitmap) ? 1 : 0;
377
378 return 0;
379 }
380 #else /* !CONFIG_QCOM_IOMMU_UTIL */
381 static inline int
__qdf_iommu_domain_get_attr(__qdf_iommu_domain_t * domain,enum qdf_iommu_attr attr,void * data)382 __qdf_iommu_domain_get_attr(__qdf_iommu_domain_t *domain,
383 enum qdf_iommu_attr attr, void *data)
384 {
385 return -ENOTSUPP;
386 }
387 #endif /* CONFIG_QCOM_IOMMU_UTIL */
388 #else
389 /**
390 * __qdf_iommu_attr_to_os() - Convert qdf iommu attribute to OS specific enum
391 * @attr: QDF iommu attribute
392 *
393 * Return: enum iommu_attr
394 */
395 static inline
__qdf_iommu_attr_to_os(enum qdf_iommu_attr attr)396 enum iommu_attr __qdf_iommu_attr_to_os(enum qdf_iommu_attr attr)
397 {
398 switch (attr) {
399 case QDF_DOMAIN_ATTR_GEOMETRY:
400 return DOMAIN_ATTR_GEOMETRY;
401 case QDF_DOMAIN_ATTR_PAGING:
402 return DOMAIN_ATTR_PAGING;
403 case QDF_DOMAIN_ATTR_WINDOWS:
404 return DOMAIN_ATTR_WINDOWS;
405 case QDF_DOMAIN_ATTR_FSL_PAMU_STASH:
406 return DOMAIN_ATTR_FSL_PAMU_STASH;
407 case QDF_DOMAIN_ATTR_FSL_PAMU_ENABLE:
408 return DOMAIN_ATTR_FSL_PAMU_ENABLE;
409 case QDF_DOMAIN_ATTR_FSL_PAMUV1:
410 return DOMAIN_ATTR_FSL_PAMUV1;
411 case QDF_DOMAIN_ATTR_NESTING:
412 return DOMAIN_ATTR_NESTING;
413 case QDF_DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
414 return DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE;
415 case QDF_DOMAIN_ATTR_CONTEXT_BANK:
416 return DOMAIN_ATTR_CONTEXT_BANK;
417 case QDF_DOMAIN_ATTR_NON_FATAL_FAULTS:
418 return DOMAIN_ATTR_NON_FATAL_FAULTS;
419 case QDF_DOMAIN_ATTR_S1_BYPASS:
420 return DOMAIN_ATTR_S1_BYPASS;
421 case QDF_DOMAIN_ATTR_ATOMIC:
422 return DOMAIN_ATTR_ATOMIC;
423 case QDF_DOMAIN_ATTR_SECURE_VMID:
424 return DOMAIN_ATTR_SECURE_VMID;
425 case QDF_DOMAIN_ATTR_FAST:
426 return DOMAIN_ATTR_FAST;
427 case QDF_DOMAIN_ATTR_PGTBL_INFO:
428 return DOMAIN_ATTR_PGTBL_INFO;
429 case QDF_DOMAIN_ATTR_USE_UPSTREAM_HINT:
430 return DOMAIN_ATTR_USE_UPSTREAM_HINT;
431 case QDF_DOMAIN_ATTR_EARLY_MAP:
432 return DOMAIN_ATTR_EARLY_MAP;
433 case QDF_DOMAIN_ATTR_PAGE_TABLE_IS_COHERENT:
434 return DOMAIN_ATTR_PAGE_TABLE_IS_COHERENT;
435 case QDF_DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT:
436 return DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT;
437 case QDF_DOMAIN_ATTR_USE_LLC_NWA:
438 return DOMAIN_ATTR_USE_LLC_NWA;
439 case QDF_DOMAIN_ATTR_SPLIT_TABLES:
440 return DOMAIN_ATTR_SPLIT_TABLES;
441 case QDF_DOMAIN_ATTR_FAULT_MODEL_NO_CFRE:
442 return DOMAIN_ATTR_FAULT_MODEL_NO_CFRE;
443 case QDF_DOMAIN_ATTR_FAULT_MODEL_NO_STALL:
444 return DOMAIN_ATTR_FAULT_MODEL_NO_STALL;
445 case QDF_DOMAIN_ATTR_FAULT_MODEL_HUPCF:
446 return DOMAIN_ATTR_FAULT_MODEL_HUPCF;
447 default:
448 return DOMAIN_ATTR_EXTENDED_MAX;
449 }
450 }
451
452 /**
453 * __qdf_iommu_domain_get_attr() - API to get iommu domain attributes
454 *
455 * @domain: iommu domain
456 * @attr: iommu attribute
457 * @data: data pointer
458 *
459 * Return: iommu domain attr
460 */
461 static inline int
__qdf_iommu_domain_get_attr(__qdf_iommu_domain_t * domain,enum qdf_iommu_attr attr,void * data)462 __qdf_iommu_domain_get_attr(__qdf_iommu_domain_t *domain,
463 enum qdf_iommu_attr attr, void *data)
464 {
465 return iommu_domain_get_attr(domain, __qdf_iommu_attr_to_os(attr),
466 data);
467 }
468 #endif
469
470 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0))
471 /**
472 * __qdf_dev_get_domain() - get iommu domain from osdev
473 * @osdev: parent device instance
474 *
475 * Return: iommu domain
476 */
477 static inline struct iommu_domain *
__qdf_dev_get_domain(qdf_device_t osdev)478 __qdf_dev_get_domain(qdf_device_t osdev)
479 {
480 return osdev->domain;
481 }
482 #else
483 static inline struct iommu_domain *
__qdf_dev_get_domain(qdf_device_t osdev)484 __qdf_dev_get_domain(qdf_device_t osdev)
485 {
486 if (osdev->iommu_mapping)
487 return osdev->iommu_mapping->domain;
488
489 return NULL;
490 }
491 #endif
492
493 /**
494 * __qdf_mem_paddr_from_dmaaddr() - get actual physical address from dma_addr
495 * @osdev: parent device instance
496 * @dma_addr: dma_addr
497 *
498 * Get actual physical address from dma_addr based on SMMU enablement status.
499 * IF SMMU Stage 1 translation is enabled, DMA APIs return IO virtual address
500 * (IOVA) otherwise returns physical address. So get SMMU physical address
501 * mapping from IOVA.
502 *
503 * Return: dmaable physical address
504 */
505 static inline unsigned long
__qdf_mem_paddr_from_dmaaddr(qdf_device_t osdev,qdf_dma_addr_t dma_addr)506 __qdf_mem_paddr_from_dmaaddr(qdf_device_t osdev,
507 qdf_dma_addr_t dma_addr)
508 {
509 struct iommu_domain *domain;
510
511 if (__qdf_mem_smmu_s1_enabled(osdev)) {
512 domain = __qdf_dev_get_domain(osdev);
513 if (domain)
514 return iommu_iova_to_phys(domain, dma_addr);
515 }
516
517 return dma_addr;
518 }
519 #else
520 static inline unsigned long
__qdf_mem_paddr_from_dmaaddr(qdf_device_t osdev,qdf_dma_addr_t dma_addr)521 __qdf_mem_paddr_from_dmaaddr(qdf_device_t osdev,
522 qdf_dma_addr_t dma_addr)
523 {
524 return dma_addr;
525 }
526 #endif
527
528 /**
529 * __qdf_os_mem_dma_get_sgtable() - Returns DMA memory scatter gather table
530 * @dev: device instance
531 * @sgt: scatter gather table pointer
532 * @cpu_addr: HLOS virtual address
533 * @dma_addr: dma/iova
534 * @size: allocated memory size
535 *
536 * Return: physical address
537 */
538 static inline int
__qdf_os_mem_dma_get_sgtable(struct device * dev,void * sgt,void * cpu_addr,qdf_dma_addr_t dma_addr,size_t size)539 __qdf_os_mem_dma_get_sgtable(struct device *dev, void *sgt, void *cpu_addr,
540 qdf_dma_addr_t dma_addr, size_t size)
541 {
542 return dma_get_sgtable(dev, (struct sg_table *)sgt, cpu_addr, dma_addr,
543 size);
544 }
545
546 /**
547 * __qdf_os_mem_free_sgtable() - Free a previously allocated sg table
548 * @sgt: the mapped sg table header
549 *
550 * Return: None
551 */
552 static inline void
__qdf_os_mem_free_sgtable(struct sg_table * sgt)553 __qdf_os_mem_free_sgtable(struct sg_table *sgt)
554 {
555 sg_free_table(sgt);
556 }
557
558 /**
559 * __qdf_dma_get_sgtable_dma_addr()-Assigns DMA address to scatterlist elements
560 * @sgt: scatter gather table pointer
561 *
562 * Return: None
563 */
564 static inline void
__qdf_dma_get_sgtable_dma_addr(struct sg_table * sgt)565 __qdf_dma_get_sgtable_dma_addr(struct sg_table *sgt)
566 {
567 struct scatterlist *sg;
568 int i;
569
570 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
571 if (!sg)
572 break;
573
574 sg->dma_address = sg_phys(sg);
575 }
576 }
577
578 /**
579 * __qdf_mem_get_dma_addr() - Return dma addr based on SMMU translation status
580 * @osdev: parent device instance
581 * @mem_info: Pointer to allocated memory information
582 *
583 * Based on smmu stage 1 translation enablement status, return corresponding dma
584 * address from qdf_mem_info_t. If stage 1 translation enabled, return
585 * IO virtual address otherwise return physical address.
586 *
587 * Return: dma address
588 */
__qdf_mem_get_dma_addr(qdf_device_t osdev,qdf_mem_info_t * mem_info)589 static inline qdf_dma_addr_t __qdf_mem_get_dma_addr(qdf_device_t osdev,
590 qdf_mem_info_t *mem_info)
591 {
592 if (__qdf_mem_smmu_s1_enabled(osdev))
593 return (qdf_dma_addr_t)mem_info->iova;
594 else
595 return (qdf_dma_addr_t)mem_info->pa;
596 }
597
598 /**
599 * __qdf_mem_get_dma_addr_ptr() - Return DMA address storage pointer
600 * @osdev: parent device instance
601 * @mem_info: Pointer to allocated memory information
602 *
603 * Based on smmu stage 1 translation enablement status, return corresponding
604 * dma address pointer from qdf_mem_info_t structure. If stage 1 translation
605 * enabled, return pointer to IO virtual address otherwise return pointer to
606 * physical address
607 *
608 * Return: dma address storage pointer
609 */
610 static inline qdf_dma_addr_t *
__qdf_mem_get_dma_addr_ptr(qdf_device_t osdev,qdf_mem_info_t * mem_info)611 __qdf_mem_get_dma_addr_ptr(qdf_device_t osdev,
612 qdf_mem_info_t *mem_info)
613 {
614 if (__qdf_mem_smmu_s1_enabled(osdev))
615 return (qdf_dma_addr_t *)(&mem_info->iova);
616 else
617 return (qdf_dma_addr_t *)(&mem_info->pa);
618 }
619
620 /**
621 * __qdf_update_mem_map_table() - Update DMA memory map info
622 * @osdev: Parent device instance
623 * @mem_info: Pointer to shared memory information
624 * @dma_addr: dma address
625 * @mem_size: memory size allocated
626 *
627 * Store DMA shared memory information
628 *
629 * Return: none
630 */
__qdf_update_mem_map_table(qdf_device_t osdev,qdf_mem_info_t * mem_info,qdf_dma_addr_t dma_addr,uint32_t mem_size)631 static inline void __qdf_update_mem_map_table(qdf_device_t osdev,
632 qdf_mem_info_t *mem_info,
633 qdf_dma_addr_t dma_addr,
634 uint32_t mem_size)
635 {
636 mem_info->pa = __qdf_mem_paddr_from_dmaaddr(osdev, dma_addr);
637 mem_info->iova = dma_addr;
638 mem_info->size = mem_size;
639 }
640
641 /**
642 * __qdf_mem_get_dma_size() - Return DMA memory size
643 * @osdev: parent device instance
644 * @mem_info: Pointer to allocated memory information
645 *
646 * Return: DMA memory size
647 */
648 static inline uint32_t
__qdf_mem_get_dma_size(qdf_device_t osdev,qdf_mem_info_t * mem_info)649 __qdf_mem_get_dma_size(qdf_device_t osdev,
650 qdf_mem_info_t *mem_info)
651 {
652 return mem_info->size;
653 }
654
655 /**
656 * __qdf_mem_set_dma_size() - Set DMA memory size
657 * @osdev: parent device instance
658 * @mem_info: Pointer to allocated memory information
659 * @mem_size: memory size allocated
660 *
661 * Return: none
662 */
663 static inline void
__qdf_mem_set_dma_size(qdf_device_t osdev,qdf_mem_info_t * mem_info,uint32_t mem_size)664 __qdf_mem_set_dma_size(qdf_device_t osdev,
665 qdf_mem_info_t *mem_info,
666 uint32_t mem_size)
667 {
668 mem_info->size = mem_size;
669 }
670
671 /**
672 * __qdf_mem_get_dma_pa() - Return DMA physical address
673 * @osdev: parent device instance
674 * @mem_info: Pointer to allocated memory information
675 *
676 * Return: DMA physical address
677 */
678 static inline qdf_dma_addr_t
__qdf_mem_get_dma_pa(qdf_device_t osdev,qdf_mem_info_t * mem_info)679 __qdf_mem_get_dma_pa(qdf_device_t osdev,
680 qdf_mem_info_t *mem_info)
681 {
682 return mem_info->pa;
683 }
684
685 /**
686 * __qdf_mem_set_dma_pa() - Set DMA physical address
687 * @osdev: parent device instance
688 * @mem_info: Pointer to allocated memory information
689 * @dma_pa: DMA phsical address
690 *
691 * Return: none
692 */
693 static inline void
__qdf_mem_set_dma_pa(qdf_device_t osdev,qdf_mem_info_t * mem_info,qdf_dma_addr_t dma_pa)694 __qdf_mem_set_dma_pa(qdf_device_t osdev,
695 qdf_mem_info_t *mem_info,
696 qdf_dma_addr_t dma_pa)
697 {
698 mem_info->pa = dma_pa;
699 }
700
701
702 /**
703 * __qdf_mem_alloc_consistent() - allocates consistent qdf memory
704 * @osdev: OS device handle
705 * @dev: Pointer to device handle
706 * @size: Size to be allocated
707 * @paddr: Physical address
708 * @func: Function name of the call site
709 * @line: line numbe rof the call site
710 *
711 * Return: pointer of allocated memory or null if memory alloc fails
712 */
713 void *__qdf_mem_alloc_consistent(qdf_device_t osdev, void *dev,
714 qdf_size_t size, qdf_dma_addr_t *paddr,
715 const char *func, uint32_t line);
716
717 /**
718 * __qdf_mem_malloc() - allocates QDF memory
719 * @size: Number of bytes of memory to allocate.
720 *
721 * @func: Function name of the call site
722 * @line: line numbe rof the call site
723 *
724 * This function will dynamicallly allocate the specified number of bytes of
725 * memory.
726 *
727 * Return:
728 * Upon successful allocate, returns a non-NULL pointer to the allocated
729 * memory. If this function is unable to allocate the amount of memory
730 * specified (for any reason) it returns NULL.
731 */
732 void *__qdf_mem_malloc(qdf_size_t size, const char *func, uint32_t line);
733
734 /**
735 * __qdf_mem_free() - free QDF memory
736 * @ptr: Pointer to the starting address of the memory to be freed.
737 *
738 * This function will free the memory pointed to by 'ptr'.
739 * Return: None
740 */
741 void __qdf_mem_free(void *ptr);
742
743 /**
744 * __qdf_mem_valloc() - QDF virtual memory allocation API
745 * @size: Number of bytes of virtual memory to allocate.
746 * @func: Caller function name
747 * @line: Line number
748 *
749 * Return: A valid memory location on success, or NULL on failure
750 */
751 void *__qdf_mem_valloc(size_t size, const char *func, uint32_t line);
752
753 /**
754 * __qdf_mem_vfree() - QDF API to free virtual memory
755 * @ptr: Pointer to the virtual memory to free
756 *
757 * Return: None
758 */
759 void __qdf_mem_vfree(void *ptr);
760
761 /**
762 * __qdf_mem_virt_to_phys() - Convert virtual address to physical
763 * @vaddr: virtual address
764 *
765 * Return: physical address
766 */
767 #define __qdf_mem_virt_to_phys(vaddr) virt_to_phys(vaddr)
768
769 #ifdef QCA_WIFI_MODULE_PARAMS_FROM_INI
770 /**
771 * __qdf_untracked_mem_malloc() - allocates non-QDF memory
772 * @size: Number of bytes of memory to allocate.
773 * @func: Function name of the call site
774 * @line: line number of the call site
775 *
776 * This function will dynamically allocate the specified number of bytes of
777 * memory. Memory allocated is not tracked by qdf memory debug framework.
778 *
779 * Return:
780 * Upon successful allocation, returns a non-NULL pointer to the allocated
781 * memory. If this function is unable to allocate the amount of memory
782 * specified (for any reason) it returns NULL.
783 */
784 void *__qdf_untracked_mem_malloc(qdf_size_t size, const char *func,
785 uint32_t line);
786
787 /**
788 * __qdf_untracked_mem_free() - free non-QDF memory
789 * @ptr: Pointer to the starting address of the memory to be freed.
790 *
791 * This function will free the memory pointed to by 'ptr'.
792 * Return: None
793 */
794
795 void __qdf_untracked_mem_free(void *ptr);
796 #endif
797
798 /**
799 * __qdf_mem_free_consistent() - free consistent qdf memory
800 * @osdev: OS device handle
801 * @dev: Pointer to device handle
802 * @size: Size to be allocated
803 * @vaddr: virtual address
804 * @paddr: Physical address
805 * @memctx: Pointer to DMA context
806 *
807 * Return: none
808 */
809 void __qdf_mem_free_consistent(qdf_device_t osdev, void *dev,
810 qdf_size_t size, void *vaddr,
811 qdf_dma_addr_t paddr, qdf_dma_context_t memctx);
812
813 #endif /* __I_QDF_MEM_H */
814