1 /*
2 * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
3 * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for
6 * any purpose with or without fee is hereby granted, provided that the
7 * above copyright notice and this permission notice appear in all
8 * copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17 * PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 #include <linux/module.h>
21 #include <qdf_lock.h>
22 #include <qdf_trace.h>
23 #include <qdf_module.h>
24
25 #include <qdf_types.h>
26 #include <i_host_diag_core_event.h>
27 #ifdef FEATURE_RUNTIME_PM
28 #include <cds_api.h>
29 #include <hif.h>
30 #endif
31 #include <i_qdf_lock.h>
32 #include <linux/suspend.h>
33
34 #undef qdf_mutex_create
qdf_mutex_create(qdf_mutex_t * lock,const char * func,int line)35 QDF_STATUS qdf_mutex_create(qdf_mutex_t *lock, const char *func, int line)
36 {
37 /* check for invalid pointer */
38 if (!lock) {
39 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
40 "%s: NULL pointer passed in", __func__);
41 return QDF_STATUS_E_FAULT;
42 }
43 /* check for 'already initialized' lock */
44 if (LINUX_LOCK_COOKIE == lock->cookie) {
45 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
46 "%s: already initialized lock", __func__);
47 return QDF_STATUS_E_BUSY;
48 }
49
50 if (in_interrupt()) {
51 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
52 "%s cannot be called from interrupt context!!!",
53 __func__);
54 return QDF_STATUS_E_FAULT;
55 }
56
57 qdf_lock_stats_create(&lock->stats, func, line);
58
59 /* initialize new lock */
60 mutex_init(&lock->m_lock);
61 lock->cookie = LINUX_LOCK_COOKIE;
62 lock->state = LOCK_RELEASED;
63 lock->process_id = 0;
64 lock->refcount = 0;
65
66 return QDF_STATUS_SUCCESS;
67 }
68 qdf_export_symbol(qdf_mutex_create);
69
qdf_mutex_acquire(qdf_mutex_t * lock)70 QDF_STATUS qdf_mutex_acquire(qdf_mutex_t *lock)
71 {
72 int rc;
73 /* check for invalid pointer */
74 if (!lock) {
75 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
76 "%s: NULL pointer passed in", __func__);
77 QDF_ASSERT(0);
78 return QDF_STATUS_E_FAULT;
79 }
80 /* check if lock refers to an initialized object */
81 if (LINUX_LOCK_COOKIE != lock->cookie) {
82 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
83 "%s: uninitialized lock", __func__);
84 QDF_ASSERT(0);
85 return QDF_STATUS_E_INVAL;
86 }
87
88 if (in_interrupt()) {
89 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
90 "%s cannot be called from interrupt context!!!",
91 __func__);
92 QDF_ASSERT(0);
93 return QDF_STATUS_E_FAULT;
94 }
95 if ((lock->process_id == current->pid) &&
96 (lock->state == LOCK_ACQUIRED)) {
97 lock->refcount++;
98 #ifdef QDF_NESTED_LOCK_DEBUG
99 pe_err("%s: %x %d %d", __func__, lock, current->pid,
100 lock->refcount);
101 #endif
102 return QDF_STATUS_SUCCESS;
103 }
104
105 BEFORE_LOCK(lock, mutex_is_locked(&lock->m_lock));
106 /* acquire a Lock */
107 mutex_lock(&lock->m_lock);
108 AFTER_LOCK(lock, __func__);
109 rc = mutex_is_locked(&lock->m_lock);
110 if (rc == 0) {
111 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
112 "%s: unable to lock mutex (rc = %d)", __func__, rc);
113 QDF_ASSERT(0);
114 return QDF_STATUS_E_FAILURE;
115 }
116 #ifdef QDF_NESTED_LOCK_DEBUG
117 pe_err("%s: %x %d", __func__, lock, current->pid);
118 #endif
119 if (LOCK_DESTROYED != lock->state) {
120 lock->process_id = current->pid;
121 lock->refcount++;
122 lock->state = LOCK_ACQUIRED;
123 return QDF_STATUS_SUCCESS;
124 }
125
126 /* lock is already destroyed */
127 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
128 "%s: Lock is already destroyed", __func__);
129 mutex_unlock(&lock->m_lock);
130 QDF_ASSERT(0);
131 return QDF_STATUS_E_FAILURE;
132 }
133 qdf_export_symbol(qdf_mutex_acquire);
134
qdf_mutex_release(qdf_mutex_t * lock)135 QDF_STATUS qdf_mutex_release(qdf_mutex_t *lock)
136 {
137 /* check for invalid pointer */
138 if (!lock) {
139 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
140 "%s: NULL pointer passed in", __func__);
141 QDF_ASSERT(0);
142 return QDF_STATUS_E_FAULT;
143 }
144
145 /* check if lock refers to an uninitialized object */
146 if (LINUX_LOCK_COOKIE != lock->cookie) {
147 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
148 "%s: uninitialized lock", __func__);
149 QDF_ASSERT(0);
150 return QDF_STATUS_E_INVAL;
151 }
152
153 if (in_interrupt()) {
154 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
155 "%s cannot be called from interrupt context!!!",
156 __func__);
157 QDF_ASSERT(0);
158 return QDF_STATUS_E_FAULT;
159 }
160
161 /* current_thread = get_current_thread_id();
162 * Check thread ID of caller against thread ID
163 * of the thread which acquire the lock
164 */
165 if (lock->process_id != current->pid) {
166 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
167 "%s: current task pid does not match original task pid!!",
168 __func__);
169 #ifdef QDF_NESTED_LOCK_DEBUG
170 pe_err("%s: Lock held by=%d being released by=%d",
171 __func__, lock->process_id, current->pid);
172 #endif
173 QDF_ASSERT(0);
174 return QDF_STATUS_E_PERM;
175 }
176 if ((lock->process_id == current->pid) &&
177 (lock->state == LOCK_ACQUIRED)) {
178 if (lock->refcount > 0)
179 lock->refcount--;
180 }
181 #ifdef QDF_NESTED_LOCK_DEBUG
182 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, "%s: %x %d %d", __func__, lock, lock->process_id,
183 lock->refcount);
184 #endif
185 if (lock->refcount)
186 return QDF_STATUS_SUCCESS;
187
188 lock->process_id = 0;
189 lock->refcount = 0;
190 lock->state = LOCK_RELEASED;
191 /* release a Lock */
192 BEFORE_UNLOCK(lock, 0);
193 mutex_unlock(&lock->m_lock);
194 #ifdef QDF_NESTED_LOCK_DEBUG
195 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR, "%s: Freeing lock %x %d %d", lock, lock->process_id,
196 lock->refcount);
197 #endif
198 return QDF_STATUS_SUCCESS;
199 }
200 qdf_export_symbol(qdf_mutex_release);
201
202 #ifdef WLAN_WAKE_LOCK_DEBUG
203 #include "qdf_tracker.h"
204
205 #define qdf_wake_lock_tracker_bits 2 /* 4 buckets */
206 static qdf_tracker_declare(qdf_wake_lock_tracker, qdf_wake_lock_tracker_bits,
207 "wake lock leaks", "wake lock create",
208 "wake lock destroy");
209
qdf_wake_lock_feature_init(void)210 void qdf_wake_lock_feature_init(void)
211 {
212 qdf_tracker_init(&qdf_wake_lock_tracker);
213 }
214
qdf_wake_lock_feature_deinit(void)215 void qdf_wake_lock_feature_deinit(void)
216 {
217 qdf_tracker_deinit(&qdf_wake_lock_tracker);
218 }
219
qdf_wake_lock_check_for_leaks(void)220 void qdf_wake_lock_check_for_leaks(void)
221 {
222 qdf_tracker_check_for_leaks(&qdf_wake_lock_tracker);
223 }
224
qdf_wake_lock_dbg_track(qdf_wake_lock_t * lock,const char * func,uint32_t line)225 static inline QDF_STATUS qdf_wake_lock_dbg_track(qdf_wake_lock_t *lock,
226 const char *func,
227 uint32_t line)
228 {
229 return qdf_tracker_track(&qdf_wake_lock_tracker, lock, func, line);
230 }
231
qdf_wake_lock_dbg_untrack(qdf_wake_lock_t * lock,const char * func,uint32_t line)232 static inline void qdf_wake_lock_dbg_untrack(qdf_wake_lock_t *lock,
233 const char *func, uint32_t line)
234 {
235 qdf_tracker_untrack(&qdf_wake_lock_tracker, lock, func, line);
236 }
237 #else
qdf_wake_lock_dbg_track(qdf_wake_lock_t * lock,const char * func,uint32_t line)238 static inline QDF_STATUS qdf_wake_lock_dbg_track(qdf_wake_lock_t *lock,
239 const char *func,
240 uint32_t line)
241 {
242 return QDF_STATUS_SUCCESS;
243 }
244
qdf_wake_lock_dbg_untrack(qdf_wake_lock_t * lock,const char * func,uint32_t line)245 static inline void qdf_wake_lock_dbg_untrack(qdf_wake_lock_t *lock,
246 const char *func, uint32_t line)
247 { }
248 #endif /* WLAN_WAKE_LOCK_DEBUG */
249
250 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0))
qdf_wake_lock_name(qdf_wake_lock_t * lock)251 const char *qdf_wake_lock_name(qdf_wake_lock_t *lock)
252 {
253 if (lock)
254 return lock->lock.name;
255 return "UNNAMED_WAKELOCK";
256 }
257 #else
qdf_wake_lock_name(qdf_wake_lock_t * lock)258 const char *qdf_wake_lock_name(qdf_wake_lock_t *lock)
259 {
260 return "NO_WAKELOCK_SUPPORT";
261 }
262 #endif
263 qdf_export_symbol(qdf_wake_lock_name);
264
265 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 110)) || \
266 defined(WAKEUP_SOURCE_DEV)
__qdf_wake_lock_create(qdf_wake_lock_t * lock,const char * name,const char * func,uint32_t line)267 QDF_STATUS __qdf_wake_lock_create(qdf_wake_lock_t *lock, const char *name,
268 const char *func, uint32_t line)
269 {
270 QDF_STATUS status;
271
272 status = qdf_wake_lock_dbg_track(lock, func, line);
273 if (QDF_IS_STATUS_ERROR(status))
274 return status;
275
276 qdf_mem_zero(lock, sizeof(*lock));
277 lock->priv = wakeup_source_register(lock->lock.dev, name);
278 if (!(lock->priv)) {
279 QDF_BUG(0);
280 return QDF_STATUS_E_FAILURE;
281 }
282
283 lock->lock = *(lock->priv);
284
285 return QDF_STATUS_SUCCESS;
286 }
287 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0))
__qdf_wake_lock_create(qdf_wake_lock_t * lock,const char * name,const char * func,uint32_t line)288 QDF_STATUS __qdf_wake_lock_create(qdf_wake_lock_t *lock, const char *name,
289 const char *func, uint32_t line)
290 {
291 QDF_STATUS status;
292
293 status = qdf_wake_lock_dbg_track(lock, func, line);
294 if (QDF_IS_STATUS_ERROR(status))
295 return status;
296
297 wakeup_source_init(&(lock->lock), name);
298 lock->priv = &(lock->lock);
299
300 return QDF_STATUS_SUCCESS;
301 }
302 #else
__qdf_wake_lock_create(qdf_wake_lock_t * lock,const char * name,const char * func,uint32_t line)303 QDF_STATUS __qdf_wake_lock_create(qdf_wake_lock_t *lock, const char *name,
304 const char *func, uint32_t line)
305 {
306 return QDF_STATUS_SUCCESS;
307 }
308 #endif
309 qdf_export_symbol(__qdf_wake_lock_create);
310
311 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0))
qdf_wake_lock_acquire(qdf_wake_lock_t * lock,uint32_t reason)312 QDF_STATUS qdf_wake_lock_acquire(qdf_wake_lock_t *lock, uint32_t reason)
313 {
314 host_diag_log_wlock(reason, qdf_wake_lock_name(lock),
315 WIFI_POWER_EVENT_DEFAULT_WAKELOCK_TIMEOUT,
316 WIFI_POWER_EVENT_WAKELOCK_TAKEN);
317 __pm_stay_awake(lock->priv);
318
319 return QDF_STATUS_SUCCESS;
320 }
321 #else
qdf_wake_lock_acquire(qdf_wake_lock_t * lock,uint32_t reason)322 QDF_STATUS qdf_wake_lock_acquire(qdf_wake_lock_t *lock, uint32_t reason)
323 {
324 return QDF_STATUS_SUCCESS;
325 }
326 #endif
327 qdf_export_symbol(qdf_wake_lock_acquire);
328
329 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
qdf_wake_lock_timeout_acquire(qdf_wake_lock_t * lock,uint32_t msec)330 QDF_STATUS qdf_wake_lock_timeout_acquire(qdf_wake_lock_t *lock, uint32_t msec)
331 {
332 pm_wakeup_ws_event(lock->priv, msec, true);
333 return QDF_STATUS_SUCCESS;
334 }
335 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
qdf_wake_lock_timeout_acquire(qdf_wake_lock_t * lock,uint32_t msec)336 QDF_STATUS qdf_wake_lock_timeout_acquire(qdf_wake_lock_t *lock, uint32_t msec)
337 {
338 /* Wakelock for Rx is frequent.
339 * It is reported only during active debug
340 */
341 __pm_wakeup_event(&(lock->lock), msec);
342 return QDF_STATUS_SUCCESS;
343 }
344 #else /* LINUX_VERSION_CODE */
qdf_wake_lock_timeout_acquire(qdf_wake_lock_t * lock,uint32_t msec)345 QDF_STATUS qdf_wake_lock_timeout_acquire(qdf_wake_lock_t *lock, uint32_t msec)
346 {
347 return QDF_STATUS_SUCCESS;
348 }
349 #endif /* LINUX_VERSION_CODE */
350 qdf_export_symbol(qdf_wake_lock_timeout_acquire);
351
352 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0))
qdf_wake_lock_release(qdf_wake_lock_t * lock,uint32_t reason)353 QDF_STATUS qdf_wake_lock_release(qdf_wake_lock_t *lock, uint32_t reason)
354 {
355 host_diag_log_wlock(reason, qdf_wake_lock_name(lock),
356 WIFI_POWER_EVENT_DEFAULT_WAKELOCK_TIMEOUT,
357 WIFI_POWER_EVENT_WAKELOCK_RELEASED);
358 __pm_relax(lock->priv);
359
360 return QDF_STATUS_SUCCESS;
361 }
362 #else
qdf_wake_lock_release(qdf_wake_lock_t * lock,uint32_t reason)363 QDF_STATUS qdf_wake_lock_release(qdf_wake_lock_t *lock, uint32_t reason)
364 {
365 return QDF_STATUS_SUCCESS;
366 }
367 #endif
368 qdf_export_symbol(qdf_wake_lock_release);
369
370 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 110)) || \
371 defined(WAKEUP_SOURCE_DEV)
__qdf_wake_lock_destroy(qdf_wake_lock_t * lock,const char * func,uint32_t line)372 void __qdf_wake_lock_destroy(qdf_wake_lock_t *lock,
373 const char *func, uint32_t line)
374 {
375 wakeup_source_unregister(lock->priv);
376 qdf_wake_lock_dbg_untrack(lock, func, line);
377 }
378 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0))
__qdf_wake_lock_destroy(qdf_wake_lock_t * lock,const char * func,uint32_t line)379 void __qdf_wake_lock_destroy(qdf_wake_lock_t *lock,
380 const char *func, uint32_t line)
381 {
382 wakeup_source_trash(&(lock->lock));
383 qdf_wake_lock_dbg_untrack(lock, func, line);
384 }
385 #else
__qdf_wake_lock_destroy(qdf_wake_lock_t * lock,const char * func,uint32_t line)386 void __qdf_wake_lock_destroy(qdf_wake_lock_t *lock,
387 const char *func, uint32_t line)
388 {
389 }
390 #endif
391 qdf_export_symbol(__qdf_wake_lock_destroy);
392
qdf_pm_system_wakeup(void)393 void qdf_pm_system_wakeup(void)
394 {
395 pm_system_wakeup();
396 }
397
398 qdf_export_symbol(qdf_pm_system_wakeup);
399
400 #ifdef FEATURE_RUNTIME_PM
401 /**
402 * qdf_to_hif_convert_rtpm_id() - Convert QDF Runtime PM ID to HIF RTPM ID
403 * @id: Client id
404 *
405 * Return: HIF Runtime pm ID of client
406 */
qdf_to_hif_convert_rtpm_id(uint32_t id)407 static uint32_t qdf_to_hif_convert_rtpm_id(uint32_t id)
408 {
409 switch (id) {
410 case QDF_RTPM_ID_RESERVED:
411 return HIF_RTPM_ID_RESERVED;
412 case QDF_RTPM_ID_PM_QOS_NOTIFY:
413 return HIF_RTPM_ID_PM_QOS_NOTIFY;
414 case QDF_RTPM_ID_WIPHY_SUSPEND:
415 return HIF_RTPM_ID_WIPHY_SUSPEND;
416 default:
417 return HIF_RTPM_ID_MAX;
418 }
419 }
420
421 /**
422 * qdf_to_hif_convert_rtpm_type() - Convert QDF Runtime PM call type to HIF
423 * call type
424 * @type: call type
425 *
426 * Return: HIF runtime PM call type
427 */
qdf_to_hif_convert_rtpm_type(uint8_t type)428 static uint8_t qdf_to_hif_convert_rtpm_type(uint8_t type)
429 {
430 switch (type) {
431 case QDF_RTPM_GET:
432 return HIF_RTPM_GET_ASYNC;
433 case QDF_RTPM_GET_FORCE:
434 return HIF_RTPM_GET_FORCE;
435 case QDF_RTPM_GET_SYNC:
436 return HIF_RTPM_GET_SYNC;
437 case QDF_RTPM_GET_NORESUME:
438 return HIF_RTPM_GET_NORESUME;
439 case QDF_RTPM_PUT:
440 return HIF_RTPM_PUT_ASYNC;
441 case QDF_RTPM_PUT_SYNC_SUSPEND:
442 return HIF_RTPM_PUT_SYNC_SUSPEND;
443 case QDF_RTPM_PUT_NOIDLE:
444 return HIF_RTPM_PUT_NOIDLE;
445 default:
446 return QDF_STATUS_E_NOSUPPORT;
447 }
448 }
449
qdf_rtpm_register(uint32_t id,void (* hif_rpm_cbk)(void))450 QDF_STATUS qdf_rtpm_register(uint32_t id, void (*hif_rpm_cbk)(void))
451 {
452 return hif_rtpm_register(qdf_to_hif_convert_rtpm_id(id), hif_rpm_cbk);
453 }
454
455 qdf_export_symbol(qdf_rtpm_register);
456
qdf_rtpm_deregister(uint32_t id)457 QDF_STATUS qdf_rtpm_deregister(uint32_t id)
458 {
459 return hif_rtpm_deregister(qdf_to_hif_convert_rtpm_id(id));
460 }
461
462 qdf_export_symbol(qdf_rtpm_deregister);
463
__qdf_runtime_lock_init(qdf_runtime_lock_t * lock,const char * name)464 QDF_STATUS __qdf_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name)
465 {
466 return hif_runtime_lock_init(lock, name);
467 }
468
469 qdf_export_symbol(__qdf_runtime_lock_init);
470
qdf_runtime_lock_deinit(qdf_runtime_lock_t * lock)471 void qdf_runtime_lock_deinit(qdf_runtime_lock_t *lock)
472 {
473 hif_runtime_lock_deinit(lock->lock);
474 }
475 qdf_export_symbol(qdf_runtime_lock_deinit);
476
qdf_rtpm_get(uint8_t type,uint32_t id)477 QDF_STATUS qdf_rtpm_get(uint8_t type, uint32_t id)
478 {
479 return hif_rtpm_get(qdf_to_hif_convert_rtpm_type(type),
480 qdf_to_hif_convert_rtpm_id(id));
481 }
482
483 qdf_export_symbol(qdf_rtpm_get);
484
qdf_rtpm_put(uint8_t type,uint32_t id)485 QDF_STATUS qdf_rtpm_put(uint8_t type, uint32_t id)
486 {
487 return hif_rtpm_put(qdf_to_hif_convert_rtpm_type(type),
488 qdf_to_hif_convert_rtpm_id(id));
489 }
490
491 qdf_export_symbol(qdf_rtpm_put);
492
qdf_runtime_pm_prevent_suspend(qdf_runtime_lock_t * lock)493 QDF_STATUS qdf_runtime_pm_prevent_suspend(qdf_runtime_lock_t *lock)
494 {
495 return hif_pm_runtime_prevent_suspend(lock->lock);
496 }
497
498 qdf_export_symbol(qdf_runtime_pm_prevent_suspend);
499
qdf_runtime_pm_prevent_suspend_sync(qdf_runtime_lock_t * lock)500 QDF_STATUS qdf_runtime_pm_prevent_suspend_sync(qdf_runtime_lock_t *lock)
501 {
502 return hif_pm_runtime_prevent_suspend_sync(lock->lock);
503 }
504
505 qdf_export_symbol(qdf_runtime_pm_prevent_suspend_sync);
506
qdf_runtime_pm_allow_suspend(qdf_runtime_lock_t * lock)507 QDF_STATUS qdf_runtime_pm_allow_suspend(qdf_runtime_lock_t *lock)
508 {
509 return hif_pm_runtime_allow_suspend(lock->lock);
510 }
511
512 qdf_export_symbol(qdf_runtime_pm_allow_suspend);
513
qdf_rtpm_sync_resume(void)514 QDF_STATUS qdf_rtpm_sync_resume(void)
515 {
516 return hif_rtpm_sync_resume();
517 }
518 #endif
qdf_spinlock_acquire(qdf_spinlock_t * lock)519 QDF_STATUS qdf_spinlock_acquire(qdf_spinlock_t *lock)
520 {
521 spin_lock(&lock->lock.spinlock);
522 return QDF_STATUS_SUCCESS;
523 }
524 qdf_export_symbol(qdf_spinlock_acquire);
525
526
qdf_spinlock_release(qdf_spinlock_t * lock)527 QDF_STATUS qdf_spinlock_release(qdf_spinlock_t *lock)
528 {
529 spin_unlock(&lock->lock.spinlock);
530 return QDF_STATUS_SUCCESS;
531 }
532 qdf_export_symbol(qdf_spinlock_release);
533
qdf_mutex_destroy(qdf_mutex_t * lock)534 QDF_STATUS qdf_mutex_destroy(qdf_mutex_t *lock)
535 {
536 /* check for invalid pointer */
537 if (!lock) {
538 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
539 "%s: NULL pointer passed in", __func__);
540 return QDF_STATUS_E_FAULT;
541 }
542
543 if (LINUX_LOCK_COOKIE != lock->cookie) {
544 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
545 "%s: uninitialized lock", __func__);
546 return QDF_STATUS_E_INVAL;
547 }
548
549 if (in_interrupt()) {
550 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
551 "%s cannot be called from interrupt context!!!",
552 __func__);
553 return QDF_STATUS_E_FAULT;
554 }
555
556 /* check if lock is released */
557 if (!mutex_trylock(&lock->m_lock)) {
558 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
559 "%s: lock is not released", __func__);
560 return QDF_STATUS_E_BUSY;
561 }
562 lock->cookie = 0;
563 lock->state = LOCK_DESTROYED;
564 lock->process_id = 0;
565 lock->refcount = 0;
566
567 qdf_lock_stats_destroy(&lock->stats);
568 mutex_unlock(&lock->m_lock);
569
570 return QDF_STATUS_SUCCESS;
571 }
572 qdf_export_symbol(qdf_mutex_destroy);
573
574 #if QDF_LOCK_STATS_LIST
575 struct qdf_lock_cookie {
576 union {
577 struct {
578 struct lock_stats *stats;
579 const char *func;
580 int line;
581 } cookie;
582 struct {
583 struct qdf_lock_cookie *next;
584 } empty_node;
585 } u;
586 };
587
588 #ifndef QDF_LOCK_STATS_LIST_SIZE
589 #define QDF_LOCK_STATS_LIST_SIZE 256
590 #endif
591
592 static qdf_spinlock_t qdf_lock_list_spinlock;
593 static struct qdf_lock_cookie lock_cookies[QDF_LOCK_STATS_LIST_SIZE];
594 static struct qdf_lock_cookie *lock_cookie_freelist;
595 static qdf_atomic_t lock_cookie_get_failures;
596 static qdf_atomic_t lock_cookie_untracked_num;
597 /* dummy value */
598 #define DUMMY_LOCK_COOKIE 0xc00c1e
599
600 /**
601 * qdf_is_lock_cookie - check if memory is a valid lock cookie
602 * @lock_cookie: lock cookie to check
603 *
604 * Return: true if the memory is within the range of the lock cookie
605 * memory.
606 */
qdf_is_lock_cookie(struct qdf_lock_cookie * lock_cookie)607 static bool qdf_is_lock_cookie(struct qdf_lock_cookie *lock_cookie)
608 {
609 return lock_cookie >= &lock_cookies[0] &&
610 lock_cookie <= &lock_cookies[QDF_LOCK_STATS_LIST_SIZE-1];
611 }
612
613 /**
614 * qdf_is_lock_cookie_free() - check if the lock cookie is on the freelist
615 * @lock_cookie: lock cookie to check
616 *
617 * Check that the next field of the lock cookie points to a lock cookie.
618 * currently this is only true if the cookie is on the freelist.
619 *
620 * Checking for the function and line being NULL and 0 should also have worked.
621 *
622 * Return: true if the cookie is free
623 */
qdf_is_lock_cookie_free(struct qdf_lock_cookie * lock_cookie)624 static bool qdf_is_lock_cookie_free(struct qdf_lock_cookie *lock_cookie)
625 {
626 struct qdf_lock_cookie *tmp = lock_cookie->u.empty_node.next;
627
628 return qdf_is_lock_cookie(tmp) || (!tmp);
629 }
630
qdf_get_lock_cookie(void)631 static struct qdf_lock_cookie *qdf_get_lock_cookie(void)
632 {
633 struct qdf_lock_cookie *lock_cookie;
634
635 qdf_spin_lock_bh(&qdf_lock_list_spinlock);
636 lock_cookie = lock_cookie_freelist;
637 if (lock_cookie_freelist)
638 lock_cookie_freelist = lock_cookie_freelist->u.empty_node.next;
639 qdf_spin_unlock_bh(&qdf_lock_list_spinlock);
640 return lock_cookie;
641 }
642
__qdf_put_lock_cookie(struct qdf_lock_cookie * lock_cookie)643 static void __qdf_put_lock_cookie(struct qdf_lock_cookie *lock_cookie)
644 {
645 if (!qdf_is_lock_cookie(lock_cookie))
646 QDF_BUG(0);
647
648 lock_cookie->u.empty_node.next = lock_cookie_freelist;
649 lock_cookie_freelist = lock_cookie;
650 }
651
qdf_put_lock_cookie(struct qdf_lock_cookie * lock_cookie)652 static void qdf_put_lock_cookie(struct qdf_lock_cookie *lock_cookie)
653 {
654 qdf_spin_lock_bh(&qdf_lock_list_spinlock);
655 __qdf_put_lock_cookie(lock_cookie);
656 qdf_spin_unlock_bh(&qdf_lock_list_spinlock);
657 }
658
qdf_lock_stats_init(void)659 void qdf_lock_stats_init(void)
660 {
661 int i;
662
663 for (i = 0; i < QDF_LOCK_STATS_LIST_SIZE; i++)
664 __qdf_put_lock_cookie(&lock_cookies[i]);
665
666 /* stats must be allocated for the spinlock before the cookie,
667 * otherwise this qdf_lock_list_spinlock wouldn't get initialized
668 * properly
669 */
670 qdf_spinlock_create(&qdf_lock_list_spinlock);
671 qdf_atomic_init(&lock_cookie_get_failures);
672 qdf_atomic_init(&lock_cookie_untracked_num);
673 }
674
qdf_lock_stats_deinit(void)675 void qdf_lock_stats_deinit(void)
676 {
677 int i;
678
679 qdf_spinlock_destroy(&qdf_lock_list_spinlock);
680 for (i = 0; i < QDF_LOCK_STATS_LIST_SIZE; i++) {
681 if (!qdf_is_lock_cookie_free(&lock_cookies[i]))
682 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_DEBUG,
683 "%s: lock_not_destroyed, fun: %s, line %d",
684 __func__, lock_cookies[i].u.cookie.func,
685 lock_cookies[i].u.cookie.line);
686 }
687 lock_cookie_freelist = NULL;
688 }
689
690 /* allocated separate memory in case the lock memory is freed without
691 * running the deinitialization code. The cookie list will not be
692 * corrupted.
693 */
qdf_lock_stats_cookie_create(struct lock_stats * stats,const char * func,int line)694 void qdf_lock_stats_cookie_create(struct lock_stats *stats,
695 const char *func, int line)
696 {
697 struct qdf_lock_cookie *cookie = qdf_get_lock_cookie();
698
699 if (!cookie) {
700 int count;
701
702 qdf_atomic_inc(&lock_cookie_get_failures);
703 count = qdf_atomic_inc_return(&lock_cookie_untracked_num);
704 stats->cookie = (void *) DUMMY_LOCK_COOKIE;
705 return;
706 }
707
708 stats->cookie = cookie;
709 stats->cookie->u.cookie.stats = stats;
710 stats->cookie->u.cookie.func = func;
711 stats->cookie->u.cookie.line = line;
712 }
713
714 qdf_export_symbol(qdf_lock_stats_cookie_create);
715
qdf_lock_stats_cookie_destroy(struct lock_stats * stats)716 void qdf_lock_stats_cookie_destroy(struct lock_stats *stats)
717 {
718 struct qdf_lock_cookie *cookie = stats->cookie;
719
720 if (!cookie) {
721 QDF_DEBUG_PANIC("Lock destroyed twice or never created");
722 return;
723 }
724
725 stats->cookie = NULL;
726 if (cookie == (void *)DUMMY_LOCK_COOKIE) {
727 qdf_atomic_dec(&lock_cookie_untracked_num);
728 return;
729 }
730
731 cookie->u.cookie.stats = NULL;
732 cookie->u.cookie.func = NULL;
733 cookie->u.cookie.line = 0;
734
735 qdf_put_lock_cookie(cookie);
736 }
737
738 qdf_export_symbol(qdf_lock_stats_cookie_destroy);
739 #endif
740