1 /*
2 * Copyright (c) 2020 The Linux Foundation. All rights reserved.
3 * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for
6 * any purpose with or without fee is hereby granted, provided that the
7 * above copyright notice and this permission notice appear in all
8 * copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17 * PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 /**
21 * DOC: i_qdf_nbuf_frag.h
22 * This file provides OS dependent nbuf frag API's.
23 */
24
25 #ifndef _I_QDF_NBUF_FRAG_H
26 #define _I_QDF_NBUF_FRAG_H
27
28 #include <qdf_net_types.h>
29 #include <qdf_mem.h>
30
31 #define QDF_NBUF_FRAG_DEBUG_COUNT_ZERO 0
32 #define QDF_NBUF_FRAG_DEBUG_COUNT_ONE 1
33
34 /**
35 * typedef __qdf_frag_cache_t - Abstraction for void * for frag address
36 */
37 typedef struct page_frag_cache __qdf_frag_cache_t;
38
39 /*
40 * typedef __qdf_frag_t - Abstraction for void * for frag address
41 */
42 typedef void *__qdf_frag_t;
43
44 #ifdef QDF_NBUF_FRAG_GLOBAL_COUNT
45
46 /**
47 * __qdf_frag_count_get() - Get global frag count
48 *
49 * Return: Global frag gauge
50 */
51 uint32_t __qdf_frag_count_get(void);
52
53 /**
54 * __qdf_frag_count_inc() - Increment frag global count
55 * @value: Increment value
56 *
57 * Return: none
58 */
59 void __qdf_frag_count_inc(uint32_t value);
60
61 /**
62 * __qdf_frag_count_dec() - Decrement frag global count
63 * @value: Decrement value
64 *
65 * Return: none
66 */
67 void __qdf_frag_count_dec(uint32_t value);
68
69 /*
70 * __qdf_frag_mod_init() - Initialization routine for qdf_frag
71 *
72 * Return: none
73 */
74 void __qdf_frag_mod_init(void);
75
76 /**
77 * __qdf_frag_mod_exit() - Uninitialization routine for qdf_frag
78 *
79 * Return: none
80 */
81 void __qdf_frag_mod_exit(void);
82
83 #else
__qdf_frag_count_get(void)84 static inline uint32_t __qdf_frag_count_get(void)
85 {
86 return 0;
87 }
88
__qdf_frag_count_inc(uint32_t value)89 static inline void __qdf_frag_count_inc(uint32_t value)
90 {
91 }
92
__qdf_frag_count_dec(uint32_t value)93 static inline void __qdf_frag_count_dec(uint32_t value)
94 {
95 }
96
__qdf_frag_mod_init(void)97 static inline void __qdf_frag_mod_init(void)
98 {
99 }
100
__qdf_frag_mod_exit(void)101 static inline void __qdf_frag_mod_exit(void)
102 {
103 }
104 #endif /* QDF_NBUF_FRAG_GLOBAL_COUNT */
105
106 /*
107 * Maximum number of frags an SKB can hold
108 */
109 #define __QDF_NBUF_MAX_FRAGS MAX_SKB_FRAGS
110
111 /**
112 * __qdf_mem_unmap_page() - Unmap frag memory
113 * @osdev: qdf_device_t
114 * @paddr: Address to be unmapped
115 * @nbytes: Number of bytes to be unmapped
116 * @dir: qdf_dma_dir_t
117 */
118 void __qdf_mem_unmap_page(qdf_device_t osdev, qdf_dma_addr_t paddr,
119 size_t nbytes, qdf_dma_dir_t dir);
120
121 /**
122 * __qdf_mem_map_page() - Map frag memory
123 * @osdev: qdf_device_t
124 * @buf: Vaddr to be mapped
125 * @dir: qdf_dma_dir_t
126 * @nbytes: Number of bytes to be mapped
127 * @phy_addr: Mapped physical address
128 *
129 * Return: QDF_STATUS
130 */
131 QDF_STATUS __qdf_mem_map_page(qdf_device_t osdev, __qdf_frag_t buf,
132 qdf_dma_dir_t dir, size_t nbytes,
133 qdf_dma_addr_t *phy_addr);
134
135 /**
136 * __qdf_frag_cache_drain() - Drain page frag cache
137 * @pf_cache: page frag cache
138 *
139 * Return: void
140 */
141 void __qdf_frag_cache_drain(__qdf_frag_cache_t *pf_cache);
142
143 /**
144 * __qdf_frag_free() - Free allocated frag memory
145 * @vaddr: Frag address to be freed
146 *
147 * Return: none
148 */
__qdf_frag_free(__qdf_frag_t vaddr)149 static inline void __qdf_frag_free(__qdf_frag_t vaddr)
150 {
151 if (qdf_likely(vaddr)) {
152 skb_free_frag(vaddr);
153 __qdf_frag_count_dec(QDF_NBUF_FRAG_DEBUG_COUNT_ONE);
154 }
155 }
156
157 /**
158 * __qdf_frag_alloc() - Allocate frag Memory
159 * @pf_cache: page frag cache
160 * @fragsz: Size of frag memory to be allocated
161 *
162 * Return: Allocated frag addr.
163 */
164 #if defined(QDF_FRAG_CACHE_SUPPORT)
__qdf_frag_alloc(__qdf_frag_cache_t * pf_cache,unsigned int fragsz)165 static inline __qdf_frag_t __qdf_frag_alloc(__qdf_frag_cache_t *pf_cache,
166 unsigned int fragsz)
167 {
168 __qdf_frag_t p_frag;
169
170 if (pf_cache) {
171 unsigned int sz = SKB_DATA_ALIGN(fragsz);
172
173 p_frag = page_frag_alloc(pf_cache, sz, GFP_ATOMIC);
174 } else {
175 p_frag = netdev_alloc_frag(fragsz);
176 }
177 if (p_frag)
178 __qdf_frag_count_inc(QDF_NBUF_FRAG_DEBUG_COUNT_ONE);
179 return p_frag;
180 }
181 #else
__qdf_frag_alloc(__qdf_frag_cache_t * pf_cache,unsigned int fragsz)182 static inline __qdf_frag_t __qdf_frag_alloc(__qdf_frag_cache_t *pf_cache,
183 unsigned int fragsz)
184 {
185 __qdf_frag_t p_frag = netdev_alloc_frag(fragsz);
186
187 if (p_frag)
188 __qdf_frag_count_inc(QDF_NBUF_FRAG_DEBUG_COUNT_ONE);
189
190 return p_frag;
191 }
192 #endif
193
194 #endif /* _I_QDF_NBUF_FRAG_H */
195