1 /*
2 * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
3 * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for
6 * any purpose with or without fee is hereby granted, provided that the
7 * above copyright notice and this permission notice appear in all
8 * copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17 * PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 /**
21 * DOC: i_qdf_util.h
22 * This file provides OS dependent API's.
23 */
24
25 #ifndef _I_QDF_UTIL_H
26 #define _I_QDF_UTIL_H
27
28 #include <linux/compiler.h>
29 #include <linux/kernel.h>
30 #include <linux/types.h>
31 #include <linux/mm.h>
32 #include <linux/errno.h>
33 #include <linux/average.h>
34
35 #include <linux/random.h>
36 #include <linux/io.h>
37
38 #include <qdf_types.h>
39 #include <asm/byteorder.h>
40
41 #if LINUX_VERSION_CODE <= KERNEL_VERSION(3, 3, 8)
42 #include <asm/system.h>
43 #else
44 #if defined(__LINUX_MIPS32_ARCH__) || defined(__LINUX_MIPS64_ARCH__)
45 #include <asm/dec/system.h>
46 #else
47 #endif
48 #endif
49
50 #include <qdf_types.h>
51 #include <linux/io.h>
52 #include <asm/byteorder.h>
53
54 #ifdef QCA_PARTNER_PLATFORM
55 #include "ath_carr_pltfrm.h"
56 #else
57 #include <linux/byteorder/generic.h>
58 #endif
59
60 #include <linux/rcupdate.h>
61
62 typedef wait_queue_head_t __qdf_wait_queue_head_t;
63
64 /* Generic compiler-dependent macros if defined by the OS */
65 #define __qdf_wait_queue_interruptible(wait_queue, condition) \
66 wait_event_interruptible(wait_queue, condition)
67
68 #define __qdf_wait_queue_timeout(wait_queue, condition, timeout) \
69 wait_event_timeout(wait_queue, condition, timeout)
70
71
72 #define __qdf_init_waitqueue_head(_q) init_waitqueue_head(_q)
73
74 #define __qdf_wake_up_interruptible(_q) wake_up_interruptible(_q)
75
76 #define __qdf_wake_up(_q) wake_up(_q)
77
78 #define __qdf_wake_up_completion(_q) wake_up_completion(_q)
79
80 #define __qdf_unlikely(_expr) unlikely(_expr)
81 #define __qdf_likely(_expr) likely(_expr)
82
83 #define __qdf_bitmap(name, bits) DECLARE_BITMAP(name, bits)
84
85 /**
86 * __qdf_set_bit() - set bit in address
87 * @nr: bit number to be set
88 * @addr: address buffer pointer
89 *
90 * Return: none
91 */
__qdf_set_bit(unsigned int nr,unsigned long * addr)92 static inline void __qdf_set_bit(unsigned int nr, unsigned long *addr)
93 {
94 __set_bit(nr, addr);
95 }
96
__qdf_clear_bit(unsigned int nr,unsigned long * addr)97 static inline void __qdf_clear_bit(unsigned int nr, unsigned long *addr)
98 {
99 __clear_bit(nr, addr);
100 }
101
__qdf_test_bit(unsigned int nr,unsigned long * addr)102 static inline bool __qdf_test_bit(unsigned int nr, unsigned long *addr)
103 {
104 return test_bit(nr, addr);
105 }
106
__qdf_test_and_clear_bit(unsigned int nr,unsigned long * addr)107 static inline bool __qdf_test_and_clear_bit(unsigned int nr,
108 unsigned long *addr)
109 {
110 return __test_and_clear_bit(nr, addr);
111 }
112
__qdf_find_first_bit(unsigned long * addr,unsigned long nbits)113 static inline unsigned long __qdf_find_first_bit(unsigned long *addr,
114 unsigned long nbits)
115 {
116 return find_first_bit(addr, nbits);
117 }
118
__qdf_bitmap_empty(unsigned long * addr,unsigned long nbits)119 static inline bool __qdf_bitmap_empty(unsigned long *addr,
120 unsigned long nbits)
121 {
122 return bitmap_empty(addr, nbits);
123 }
124
__qdf_bitmap_and(unsigned long * dst,unsigned long * src1,unsigned long * src2,unsigned long nbits)125 static inline int __qdf_bitmap_and(unsigned long *dst, unsigned long *src1,
126 unsigned long *src2, unsigned long nbits)
127 {
128 return bitmap_and(dst, src1, src2, nbits);
129 }
130
131 /**
132 * __qdf_set_macaddr_broadcast() - set a QDF MacAddress to the 'broadcast'
133 * @mac_addr: pointer to the qdf MacAddress to set to broadcast
134 *
135 * This function sets a QDF MacAddress to the 'broadcast' MacAddress. Broadcast
136 * MacAddress contains all 0xFF bytes.
137 *
138 * Return: none
139 */
__qdf_set_macaddr_broadcast(struct qdf_mac_addr * mac_addr)140 static inline void __qdf_set_macaddr_broadcast(struct qdf_mac_addr *mac_addr)
141 {
142 memset(mac_addr, 0xff, QDF_MAC_ADDR_SIZE);
143 }
144
145 /**
146 * __qdf_zero_macaddr() - zero out a MacAddress
147 * @mac_addr: pointer to the struct qdf_mac_addr to zero.
148 *
149 * This function zeros out a QDF MacAddress type.
150 *
151 * Return: none
152 */
__qdf_zero_macaddr(struct qdf_mac_addr * mac_addr)153 static inline void __qdf_zero_macaddr(struct qdf_mac_addr *mac_addr)
154 {
155 memset(mac_addr, 0, QDF_MAC_ADDR_SIZE);
156 }
157
158 /**
159 * __qdf_is_macaddr_equal() - compare two QDF MacAddress
160 * @mac_addr1: Pointer to one qdf MacAddress to compare
161 * @mac_addr2: Pointer to the other qdf MacAddress to compare
162 *
163 * This function returns a bool that tells if a two QDF MacAddress'
164 * are equivalent.
165 *
166 * Return: true if the MacAddress's are equal
167 * not true if the MacAddress's are not equal
168 */
__qdf_is_macaddr_equal(const struct qdf_mac_addr * mac_addr1,const struct qdf_mac_addr * mac_addr2)169 static inline bool __qdf_is_macaddr_equal(const struct qdf_mac_addr *mac_addr1,
170 const struct qdf_mac_addr *mac_addr2)
171 {
172 return 0 == memcmp(mac_addr1, mac_addr2, QDF_MAC_ADDR_SIZE);
173 }
174
175 #define __qdf_in_interrupt in_interrupt
176
177 #define __qdf_min(_a, _b) min(_a, _b)
178 #define __qdf_max(_a, _b) max(_a, _b)
179
180 /*
181 * Setting it to blank as feature is not intended to be supported
182 * on linux version less than 4.3
183 */
184 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0)
185 #define __QDF_DECLARE_EWMA(name, _factor, _weight)
186
187 #define __qdf_ewma_tx_lag int
188 #define __qdf_ewma_rx_rssi int
189 #else
190 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
191 #define __QDF_DECLARE_EWMA(name, _factor, _weight) \
192 DECLARE_EWMA(name, ilog2(_factor), _weight)
193 #else
194 #define __QDF_DECLARE_EWMA(name, _factor, _weight) \
195 DECLARE_EWMA(name, _factor, _weight)
196 #endif
197
198 #define __qdf_ewma_tx_lag struct ewma_tx_lag
199 #define __qdf_ewma_rx_rssi struct ewma_rx_rssi
200 #endif
201
202 #define __qdf_ffz(mask) (~(mask) == 0 ? -1 : ffz(mask))
203
204 #define MEMINFO_KB(x) ((x) << (PAGE_SHIFT - 10)) /* In kilobytes */
205
206 #define __qdf_assert(expr) do { \
207 if (unlikely(!(expr))) { \
208 pr_err("Assertion failed! %s:%s %s:%d\n", \
209 # expr, __func__, __FILE__, __LINE__); \
210 dump_stack(); \
211 QDF_BUG_ON_ASSERT(0); \
212 } \
213 } while (0)
214
215 #define __qdf_assert_with_debug(expr, debug_fp, ...) \
216 do { \
217 typeof(debug_fp) _debug_fp = debug_fp; \
218 if (unlikely(!(expr))) { \
219 pr_err("Assertion failed! %s:%s %s:%d\n", \
220 # expr, __func__, __FILE__, __LINE__); \
221 if (_debug_fp) \
222 _debug_fp(__VA_ARGS__); \
223 QDF_BUG_ON_ASSERT(0); \
224 } \
225 } while (0)
226
227 #define __qdf_target_assert(expr) do { \
228 if (unlikely(!(expr))) { \
229 qdf_err("Assertion failed! %s:%s %s:%d", \
230 #expr, __FUNCTION__, __FILE__, __LINE__); \
231 dump_stack(); \
232 QDF_DEBUG_PANIC("Take care of the TARGET ASSERT first\n"); \
233 } \
234 } while (0)
235
236 #define QDF_COMPILE_TIME_ASSERT(assertion_name, predicate) \
237 typedef char assertion_name[(predicate) ? 1 : -1]
238
239 #define __qdf_container_of(ptr, type, member) container_of(ptr, type, member)
240
241 #define __qdf_ntohs ntohs
242 #define __qdf_ntohl ntohl
243
244 #define __qdf_htons htons
245 #define __qdf_htonl htonl
246
247 #define __qdf_cpu_to_le16 cpu_to_le16
248 #define __qdf_cpu_to_le32 cpu_to_le32
249 #define __qdf_cpu_to_le64 cpu_to_le64
250
251 #define __qdf_le16_to_cpu le16_to_cpu
252 #define __qdf_le32_to_cpu le32_to_cpu
253 #define __qdf_le64_to_cpu le64_to_cpu
254
255 #define __qdf_cpu_to_be16 cpu_to_be16
256 #define __qdf_cpu_to_be32 cpu_to_be32
257 #define __qdf_cpu_to_be64 cpu_to_be64
258
259 #define __qdf_be16_to_cpu be16_to_cpu
260 #define __qdf_be32_to_cpu be32_to_cpu
261 #define __qdf_be64_to_cpu be64_to_cpu
262
263 #define __qdf_wmb() wmb()
264 #define __qdf_rmb() rmb()
265 #define __qdf_mb() mb()
266 #define __qdf_ioread32(offset) ioread32(offset)
267 #define __qdf_iowrite32(offset, value) iowrite32(value, offset)
268
269 #define __qdf_roundup(x, y) roundup(x, y)
270 #define __qdf_ceil(x, y) DIV_ROUND_UP(x, y)
271 #define __qdf_abs(x) abs(x)
272
273 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0)
274 #define __qdf_ewma_tx_lag_init(tx_lag)
275 #define __qdf_ewma_tx_lag_add(tx_lag, value)
276 #define __qdf_ewma_tx_lag_read(tx_lag)
277
278 #define __qdf_ewma_rx_rssi_init(rx_rssi)
279 #define __qdf_ewma_rx_rssi_add(rx_rssi, value)
280 #define __qdf_ewma_rx_rssi_read(rx_rssi)
281 #else
282 #define __qdf_ewma_tx_lag_init(tx_lag) \
283 ewma_tx_lag_init(tx_lag)
284
285 #define __qdf_ewma_tx_lag_add(tx_lag, value) \
286 ewma_tx_lag_add(tx_lag, value)
287
288 #define __qdf_ewma_tx_lag_read(tx_lag) \
289 ewma_tx_lag_read(tx_lag)
290
291 #define __qdf_ewma_rx_rssi_init(rx_rssi) \
292 ewma_rx_rssi_init(rx_rssi)
293
294 #define __qdf_ewma_rx_rssi_add(rx_rssi, value) \
295 ewma_rx_rssi_add(rx_rssi, value)
296
297 #define __qdf_ewma_rx_rssi_read(rx_rssi) \
298 ewma_rx_rssi_read(rx_rssi)
299 #endif
300
301 #define __qdf_prefetch(x) prefetch(x)
302
303 #ifdef QCA_CONFIG_SMP
304 /**
305 * __qdf_get_cpu() - get cpu_index
306 *
307 * Return: cpu_index
308 */
309 static inline
__qdf_get_cpu(void)310 int __qdf_get_cpu(void)
311 {
312 int cpu_index = get_cpu();
313
314 put_cpu();
315 return cpu_index;
316 }
317 #else
318 static inline
__qdf_get_cpu(void)319 int __qdf_get_cpu(void)
320 {
321 return 0;
322 }
323 #endif
324
__qdf_device_init_wakeup(__qdf_device_t qdf_dev,bool enable)325 static inline int __qdf_device_init_wakeup(__qdf_device_t qdf_dev, bool enable)
326 {
327 return device_init_wakeup(qdf_dev->dev, enable);
328 }
329
330 /**
331 * __qdf_get_totalramsize() - Get total ram size in Kb
332 *
333 * Return: Total ram size in Kb
334 */
335 static inline uint64_t
__qdf_get_totalramsize(void)336 __qdf_get_totalramsize(void)
337 {
338 struct sysinfo meminfo;
339
340 si_meminfo(&meminfo);
341 return MEMINFO_KB(meminfo.totalram);
342 }
343
344 /**
345 * __qdf_get_lower_32_bits() - get lower 32 bits from an address.
346 * @addr: address
347 *
348 * This api returns the lower 32 bits of an address.
349 *
350 * Return: lower 32 bits.
351 */
352 static inline
__qdf_get_lower_32_bits(__qdf_dma_addr_t addr)353 uint32_t __qdf_get_lower_32_bits(__qdf_dma_addr_t addr)
354 {
355 return lower_32_bits(addr);
356 }
357
358 /**
359 * __qdf_get_upper_32_bits() - get upper 32 bits from an address.
360 * @addr: address
361 *
362 * This api returns the upper 32 bits of an address.
363 *
364 * Return: upper 32 bits.
365 */
366 static inline
__qdf_get_upper_32_bits(__qdf_dma_addr_t addr)367 uint32_t __qdf_get_upper_32_bits(__qdf_dma_addr_t addr)
368 {
369 return upper_32_bits(addr);
370 }
371
372 /**
373 * __qdf_rounddown_pow_of_two() - Round down to nearest power of two
374 * @n: number to be tested
375 *
376 * Test if the input number is power of two, and return the nearest power of two
377 *
378 * Return: number rounded down to the nearest power of two
379 */
380 static inline
__qdf_rounddown_pow_of_two(unsigned long n)381 unsigned long __qdf_rounddown_pow_of_two(unsigned long n)
382 {
383 if (is_power_of_2(n))
384 return n; /* already a power of 2 */
385
386 return __rounddown_pow_of_two(n);
387 }
388
389 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)
390
391 /**
392 * __qdf_set_dma_coherent_mask() - set max number of bits allowed in dma addr
393 * @dev: device pointer
394 * @addr_bits: max number of bits allowed in dma address
395 *
396 * This API sets the maximum allowed number of bits in the dma address.
397 *
398 * Return: 0 - success, non zero - failure
399 */
400 static inline
__qdf_set_dma_coherent_mask(struct device * dev,uint8_t addr_bits)401 int __qdf_set_dma_coherent_mask(struct device *dev, uint8_t addr_bits)
402 {
403 return dma_set_mask_and_coherent(dev, DMA_BIT_MASK(addr_bits));
404 }
405
406 #else
407
408 /**
409 * __qdf_set_dma_coherent_mask() - set max number of bits allowed in dma addr
410 * @dev: device pointer
411 * @addr_bits: max number of bits allowed in dma address
412 *
413 * This API sets the maximum allowed number of bits in the dma address.
414 *
415 * Return: 0 - success, non zero - failure
416 */
417 static inline
__qdf_set_dma_coherent_mask(struct device * dev,uint8_t addr_bits)418 int __qdf_set_dma_coherent_mask(struct device *dev, uint8_t addr_bits)
419 {
420 return dma_set_coherent_mask(dev, DMA_BIT_MASK(addr_bits));
421 }
422 #endif
423 /**
424 * __qdf_get_random_bytes() - returns nbytes bytes of random data
425 * @buf: buffer to fill
426 * @nbytes: number of bytes to fill
427 *
428 * Return: void
429 */
430 static inline
__qdf_get_random_bytes(void * buf,int nbytes)431 void __qdf_get_random_bytes(void *buf, int nbytes)
432 {
433 return get_random_bytes(buf, nbytes);
434 }
435
436 /**
437 * __qdf_do_div() - wrapper function for kernel macro(do_div).
438 * @dividend: Dividend value
439 * @divisor : Divisor value
440 *
441 * Return: Quotient
442 */
443 static inline
__qdf_do_div(uint64_t dividend,uint32_t divisor)444 uint64_t __qdf_do_div(uint64_t dividend, uint32_t divisor)
445 {
446 do_div(dividend, divisor);
447 /*do_div macro updates dividend with Quotient of dividend/divisor */
448 return dividend;
449 }
450
451 /**
452 * __qdf_do_div_rem() - wrapper function for kernel macro(do_div)
453 * to get remainder.
454 * @dividend: Dividend value
455 * @divisor : Divisor value
456 *
457 * Return: remainder
458 */
459 static inline
__qdf_do_div_rem(uint64_t dividend,uint32_t divisor)460 uint64_t __qdf_do_div_rem(uint64_t dividend, uint32_t divisor)
461 {
462 return do_div(dividend, divisor);
463 }
464
465 /**
466 * __qdf_hex_to_bin() - Wrapper function to kernel API to get unsigned
467 * integer from hexa decimal ASCII character.
468 * @ch: hexa decimal ASCII character
469 *
470 * Return: For hexa decimal ASCII char return actual decimal value
471 * else -1 for bad input.
472 */
473 static inline
__qdf_hex_to_bin(char ch)474 int __qdf_hex_to_bin(char ch)
475 {
476 return hex_to_bin(ch);
477 }
478
479 /**
480 * __qdf_hex_str_to_binary() - Wrapper function to get array of unsigned
481 * integers from string of hexa decimal ASCII characters.
482 * @dst: output array to hold converted values
483 * @src: input string of hexa decimal ASCII characters
484 * @count: size of dst string
485 *
486 * Return: For a string of hexa decimal ASCII characters return 0
487 * else -1 for bad input.
488 */
489 static inline
__qdf_hex_str_to_binary(u8 * dst,const char * src,size_t count)490 int __qdf_hex_str_to_binary(u8 *dst, const char *src, size_t count)
491 {
492 return hex2bin(dst, src, count);
493 }
494
495 /**
496 * __qdf_fls() - find last set bit in a given 32 bit input
497 * @x: 32 bit mask
498 *
499 * Return: zero if the input is zero, otherwise returns the bit
500 * position of the last set bit, where the LSB is 1 and MSB is 32.
501 */
502 static inline
__qdf_fls(uint32_t x)503 int __qdf_fls(uint32_t x)
504 {
505 return fls(x);
506 }
507
508 /**
509 * __qdf_ffs() - find first set bit in a given 32 bit input
510 * @x: 32 bit mask
511 *
512 * Return: zero if the input is zero, otherwise returns the bit
513 * position of the first set bit, where the LSB is 1 and MSB is 32.
514 */
515 static inline
__qdf_ffs(uint32_t x)516 int __qdf_ffs(uint32_t x)
517 {
518 return ffs(x);
519 }
520
521 /**
522 * __qdf_get_smp_processor_id() - Get the current CPU id
523 *
524 * Return: current CPU id
525 */
__qdf_get_smp_processor_id(void)526 static inline int __qdf_get_smp_processor_id(void)
527 {
528 return smp_processor_id();
529 }
530
531 /**
532 * __qdf_in_atomic: Check whether current thread running in atomic context
533 *
534 * Return: true if current thread is running in the atomic context
535 * else it will be return false.
536 */
__qdf_in_atomic(void)537 static inline bool __qdf_in_atomic(void)
538 {
539 if (in_interrupt() || !preemptible() || rcu_preempt_depth())
540 return true;
541
542 return false;
543 }
544
545 #endif /*_I_QDF_UTIL_H*/
546