1 /*
2 * Copyright (c) 2014-2018, 2020 The Linux Foundation. All rights reserved.
3 * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for
6 * any purpose with or without fee is hereby granted, provided that the
7 * above copyright notice and this permission notice appear in all
8 * copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17 * PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 /**
21 * DOC: i_qdf_lock.h
22 * Linux-specific definitions for QDF Lock API's
23 */
24
25 #if !defined(__I_QDF_LOCK_H)
26 #define __I_QDF_LOCK_H
27
28 /* Include Files */
29 #include <qdf_types.h>
30 #include <qdf_status.h>
31 #include <linux/mutex.h>
32 #include <linux/spinlock.h>
33 #include <linux/sched.h>
34 #include <linux/device.h>
35 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
36 #include <asm/semaphore.h>
37 #else
38 #include <linux/semaphore.h>
39 #endif
40 #include <linux/interrupt.h>
41 #include <linux/pm_wakeup.h>
42
43 /* define for flag */
44 #define QDF_LINUX_UNLOCK_BH 1
45
46 #ifdef __cplusplus
47 extern "C" {
48 #endif /* __cplusplus */
49
50 enum {
51 LOCK_RELEASED = 0x11223344,
52 LOCK_ACQUIRED,
53 LOCK_DESTROYED
54 };
55
56 /**
57 * struct qdf_lock_s - mutex abstraction
58 * @m_lock: Mutex lock
59 * @cookie: Lock cookie
60 * @process_id: Process ID to track lock
61 * @state: Lock status
62 * @refcount: Reference count for recursive lock
63 * @stats: a structure that contains usage statistics
64 */
65 struct qdf_lock_s {
66 struct mutex m_lock;
67 uint32_t cookie;
68 int process_id;
69 uint32_t state;
70 uint8_t refcount;
71 struct lock_stats stats;
72 };
73
74 /**
75 * typedef __qdf_mutex_t - Mutex abstraction
76 */
77 typedef struct qdf_lock_s __qdf_mutex_t;
78
79 /**
80 * typedef __qdf_spinlock_t - spinlock abstraction
81 * @spinlock: Spin lock
82 * @flags: Lock flag
83 */
84 typedef struct __qdf_spinlock {
85 spinlock_t spinlock;
86 unsigned long flags;
87 } __qdf_spinlock_t;
88
89 /**
90 * typedef __qdf_semaphore_t - semaphore abstraction
91 */
92 typedef struct semaphore __qdf_semaphore_t;
93
94 /**
95 * typedef qdf_wake_lock_t - wakelock abstraction
96 * @lock: this lock needs to be used in kernel version < 5.4
97 * @priv: this lock pointer needs to be used in kernel version >= 5.4
98 */
99 typedef struct qdf_wake_lock {
100 struct wakeup_source lock;
101 struct wakeup_source *priv;
102 } qdf_wake_lock_t;
103
104 struct hif_pm_runtime_lock;
105 typedef struct qdf_runtime_lock {
106 struct hif_pm_runtime_lock *lock;
107 } qdf_runtime_lock_t;
108
109 #define LINUX_LOCK_COOKIE 0x12345678
110
111 /* Function declarations and documentation */
112
113 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 37)
114 /**
115 * __qdf_semaphore_init() - initialize the semaphore
116 * @m: Semaphore object
117 *
118 * Return: QDF_STATUS_SUCCESS
119 */
__qdf_semaphore_init(struct semaphore * m)120 static inline QDF_STATUS __qdf_semaphore_init(struct semaphore *m)
121 {
122 init_MUTEX(m);
123 return QDF_STATUS_SUCCESS;
124 }
125 #else
__qdf_semaphore_init(struct semaphore * m)126 static inline QDF_STATUS __qdf_semaphore_init(struct semaphore *m)
127 {
128 sema_init(m, 1);
129 return QDF_STATUS_SUCCESS;
130 }
131 #endif
132
133 /**
134 * __qdf_semaphore_acquire() - acquire semaphore
135 * @m: Semaphore object
136 *
137 * Return: 0
138 */
__qdf_semaphore_acquire(struct semaphore * m)139 static inline int __qdf_semaphore_acquire(struct semaphore *m)
140 {
141 down(m);
142 return 0;
143 }
144
145 /**
146 * __qdf_semaphore_acquire_intr() - Take the semaphore, interruptible
147 * @m: Semaphore object
148 *
149 * This function allows a user-space process that is waiting on a
150 * semaphore to be interrupted by the user. If the operation is
151 * interrupted, the function returns a nonzero value, and the caller
152 * does not hold the semaphore. Always check the return value and
153 * responding accordingly.
154 *
155 * Return: 0 if the semaphore was acquired, non-zero if not acquired
156 */
__qdf_semaphore_acquire_intr(struct semaphore * m)157 static inline int __qdf_semaphore_acquire_intr(struct semaphore *m)
158 {
159 return down_interruptible(m);
160 }
161
162 /**
163 * __qdf_semaphore_release() - release semaphore
164 * @m: Semaphore object
165 *
166 * Return: result of UP operation in integer
167 */
__qdf_semaphore_release(struct semaphore * m)168 static inline void __qdf_semaphore_release(struct semaphore *m)
169 {
170 up(m);
171 }
172
173 /**
174 * __qdf_semaphore_acquire_timeout() - Take the semaphore before timeout
175 * @m: semaphore to take
176 * @timeout: maximum time to try to take the semaphore
177 *
178 * Return: int
179 */
__qdf_semaphore_acquire_timeout(struct semaphore * m,unsigned long timeout)180 static inline int __qdf_semaphore_acquire_timeout(struct semaphore *m,
181 unsigned long timeout)
182 {
183 unsigned long jiffie_val = msecs_to_jiffies(timeout);
184
185 return down_timeout(m, jiffie_val);
186 }
187
188 /**
189 * __qdf_spinlock_create() - initialize spin lock
190 * @lock: Spin lock object
191 *
192 * Return: QDF_STATUS_SUCCESS
193 */
__qdf_spinlock_create(__qdf_spinlock_t * lock)194 static inline QDF_STATUS __qdf_spinlock_create(__qdf_spinlock_t *lock)
195 {
196 spin_lock_init(&lock->spinlock);
197 lock->flags = 0;
198 return QDF_STATUS_SUCCESS;
199 }
200
201 #define __qdf_spinlock_destroy(lock)
202
203 /**
204 * __qdf_spin_lock() - Acquire a Spinlock(SMP) & disable Preemption (Preemptive)
205 * @lock: Lock object
206 *
207 * Return: none
208 */
__qdf_spin_lock(__qdf_spinlock_t * lock)209 static inline void __qdf_spin_lock(__qdf_spinlock_t *lock)
210 {
211 spin_lock(&lock->spinlock);
212 }
213
214 /**
215 * __qdf_spin_unlock() - Unlock the spinlock and enables the Preemption
216 * @lock: Lock object
217 *
218 * Return: none
219 */
__qdf_spin_unlock(__qdf_spinlock_t * lock)220 static inline void __qdf_spin_unlock(__qdf_spinlock_t *lock)
221 {
222 spin_unlock(&lock->spinlock);
223 }
224
225 /**
226 * __qdf_spin_lock_irqsave() - Acquire a Spinlock (SMP) & disable Preemption
227 * (Preemptive) and disable IRQs
228 * @lock: Lock object
229 *
230 * Return: none
231 */
__qdf_spin_lock_irqsave(__qdf_spinlock_t * lock)232 static inline void __qdf_spin_lock_irqsave(__qdf_spinlock_t *lock)
233 {
234 spin_lock_irqsave(&lock->spinlock, lock->flags);
235 }
236
237 /**
238 * __qdf_spin_unlock_irqrestore() - Unlock the spinlock and enables the
239 * Preemption and enable IRQ
240 * @lock: Lock object
241 *
242 * Return: none
243 */
__qdf_spin_unlock_irqrestore(__qdf_spinlock_t * lock)244 static inline void __qdf_spin_unlock_irqrestore(__qdf_spinlock_t *lock)
245 {
246 spin_unlock_irqrestore(&lock->spinlock, lock->flags);
247 }
248
249 /*
250 * Synchronous versions - only for OS' that have interrupt disable
251 */
252 #define __qdf_spin_lock_irq(_p_lock, _flags) spin_lock_irqsave(_p_lock, _flags)
253 #define __qdf_spin_unlock_irq(_p_lock, _flags) \
254 spin_unlock_irqrestore(_p_lock, _flags)
255
256 /**
257 * __qdf_spin_is_locked() - Test if spinlock is locked
258 * @lock: spinlock object
259 *
260 * Return: nonzero if lock is held.
261 */
__qdf_spin_is_locked(__qdf_spinlock_t * lock)262 static inline int __qdf_spin_is_locked(__qdf_spinlock_t *lock)
263 {
264 return spin_is_locked(&lock->spinlock);
265 }
266
267 /**
268 * __qdf_spin_trylock_bh() - spin trylock bottomhalf
269 * @lock: spinlock object
270 *
271 * Return: nonzero if lock is acquired
272 */
__qdf_spin_trylock_bh(__qdf_spinlock_t * lock)273 static inline int __qdf_spin_trylock_bh(__qdf_spinlock_t *lock)
274 {
275 if (likely(irqs_disabled() || in_irq() || in_softirq()))
276 return spin_trylock(&lock->spinlock);
277
278 if (spin_trylock_bh(&lock->spinlock)) {
279 lock->flags |= QDF_LINUX_UNLOCK_BH;
280 return 1;
281 }
282
283 return 0;
284 }
285
286 /**
287 * __qdf_spin_trylock() - spin trylock
288 * @lock: spinlock object
289 *
290 * Return: int
291 */
__qdf_spin_trylock(__qdf_spinlock_t * lock)292 static inline int __qdf_spin_trylock(__qdf_spinlock_t *lock)
293 {
294 return spin_trylock(&lock->spinlock);
295 }
296
297 /**
298 * __qdf_spin_lock_bh() - Acquire the spinlock and disable bottom halves
299 * @lock: Lock object
300 *
301 * Return: none
302 */
__qdf_spin_lock_bh(__qdf_spinlock_t * lock)303 static inline void __qdf_spin_lock_bh(__qdf_spinlock_t *lock)
304 {
305 if (likely(irqs_disabled() || in_irq() || in_softirq())) {
306 spin_lock(&lock->spinlock);
307 } else {
308 spin_lock_bh(&lock->spinlock);
309 lock->flags |= QDF_LINUX_UNLOCK_BH;
310 }
311 }
312
313 /**
314 * __qdf_spin_unlock_bh() - Release the spinlock and enable bottom halves
315 * @lock: Lock object
316 *
317 * Return: none
318 */
__qdf_spin_unlock_bh(__qdf_spinlock_t * lock)319 static inline void __qdf_spin_unlock_bh(__qdf_spinlock_t *lock)
320 {
321 if (unlikely(lock->flags & QDF_LINUX_UNLOCK_BH)) {
322 lock->flags &= (unsigned long)~QDF_LINUX_UNLOCK_BH;
323 spin_unlock_bh(&lock->spinlock);
324 } else
325 spin_unlock(&lock->spinlock);
326 }
327
328 /**
329 * __qdf_spinlock_irq_exec() - Execute the input function with
330 * spinlock held and interrupt disabled.
331 * @hdl: OS handle
332 * @lock: spinlock to be held for the critical region
333 * @func: critical region function that to be executed
334 * @arg: context of the critical region function
335 *
336 * Return: Boolean status returned by the critical region function
337 */
__qdf_spinlock_irq_exec(qdf_handle_t hdl,__qdf_spinlock_t * lock,qdf_irqlocked_func_t func,void * arg)338 static inline bool __qdf_spinlock_irq_exec(qdf_handle_t hdl,
339 __qdf_spinlock_t *lock,
340 qdf_irqlocked_func_t func,
341 void *arg)
342 {
343 unsigned long flags;
344 bool ret;
345
346 spin_lock_irqsave(&lock->spinlock, flags);
347 ret = func(arg);
348 spin_unlock_irqrestore(&lock->spinlock, flags);
349
350 return ret;
351 }
352
353 /**
354 * __qdf_in_softirq() - in soft irq context
355 *
356 * Return: true if in softirs context else false
357 */
__qdf_in_softirq(void)358 static inline bool __qdf_in_softirq(void)
359 {
360 return in_softirq();
361 }
362
363 #ifdef __cplusplus
364 }
365 #endif /* __cplusplus */
366
367 #endif /* __I_QDF_LOCK_H */
368