1 /*
2 * Devices PM QoS constraints management
3 *
4 * Copyright (C) 2011 Texas Instruments, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 *
11 * This module exposes the interface to kernel space for specifying
12 * per-device PM QoS dependencies. It provides infrastructure for registration
13 * of:
14 *
15 * Dependents on a QoS value : register requests
16 * Watchers of QoS value : get notified when target QoS value changes
17 *
18 * This QoS design is best effort based. Dependents register their QoS needs.
19 * Watchers register to keep track of the current QoS needs of the system.
20 * Watchers can register a per-device notification callback using the
21 * dev_pm_qos_*_notifier API. The notification chain data is stored in the
22 * per-device constraint data struct.
23 *
24 * Note about the per-device constraint data struct allocation:
25 * . The per-device constraints data struct ptr is tored into the device
26 * dev_pm_info.
27 * . To minimize the data usage by the per-device constraints, the data struct
28 * is only allocated at the first call to dev_pm_qos_add_request.
29 * . The data is later free'd when the device is removed from the system.
30 * . A global mutex protects the constraints users from the data being
31 * allocated and free'd.
32 */
33
34 #include <linux/pm_qos.h>
35 #include <linux/spinlock.h>
36 #include <linux/slab.h>
37 #include <linux/device.h>
38 #include <linux/mutex.h>
39 #include <linux/export.h>
40 #include <linux/pm_runtime.h>
41 #include <linux/err.h>
42 #include <trace/events/power.h>
43
44 #include "power.h"
45
46 static DEFINE_MUTEX(dev_pm_qos_mtx);
47 static DEFINE_MUTEX(dev_pm_qos_sysfs_mtx);
48
49 /**
50 * __dev_pm_qos_flags - Check PM QoS flags for a given device.
51 * @dev: Device to check the PM QoS flags for.
52 * @mask: Flags to check against.
53 *
54 * This routine must be called with dev->power.lock held.
55 */
__dev_pm_qos_flags(struct device * dev,s32 mask)56 enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask)
57 {
58 struct dev_pm_qos *qos = dev->power.qos;
59 struct pm_qos_flags *pqf;
60 s32 val;
61
62 lockdep_assert_held(&dev->power.lock);
63
64 if (IS_ERR_OR_NULL(qos))
65 return PM_QOS_FLAGS_UNDEFINED;
66
67 pqf = &qos->flags;
68 if (list_empty(&pqf->list))
69 return PM_QOS_FLAGS_UNDEFINED;
70
71 val = pqf->effective_flags & mask;
72 if (val)
73 return (val == mask) ? PM_QOS_FLAGS_ALL : PM_QOS_FLAGS_SOME;
74
75 return PM_QOS_FLAGS_NONE;
76 }
77
78 /**
79 * dev_pm_qos_flags - Check PM QoS flags for a given device (locked).
80 * @dev: Device to check the PM QoS flags for.
81 * @mask: Flags to check against.
82 */
dev_pm_qos_flags(struct device * dev,s32 mask)83 enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask)
84 {
85 unsigned long irqflags;
86 enum pm_qos_flags_status ret;
87
88 spin_lock_irqsave(&dev->power.lock, irqflags);
89 ret = __dev_pm_qos_flags(dev, mask);
90 spin_unlock_irqrestore(&dev->power.lock, irqflags);
91
92 return ret;
93 }
94 EXPORT_SYMBOL_GPL(dev_pm_qos_flags);
95
96 /**
97 * __dev_pm_qos_read_value - Get PM QoS constraint for a given device.
98 * @dev: Device to get the PM QoS constraint value for.
99 *
100 * This routine must be called with dev->power.lock held.
101 */
__dev_pm_qos_read_value(struct device * dev)102 s32 __dev_pm_qos_read_value(struct device *dev)
103 {
104 lockdep_assert_held(&dev->power.lock);
105
106 return dev_pm_qos_raw_read_value(dev);
107 }
108
109 /**
110 * dev_pm_qos_read_value - Get PM QoS constraint for a given device (locked).
111 * @dev: Device to get the PM QoS constraint value for.
112 */
dev_pm_qos_read_value(struct device * dev)113 s32 dev_pm_qos_read_value(struct device *dev)
114 {
115 unsigned long flags;
116 s32 ret;
117
118 spin_lock_irqsave(&dev->power.lock, flags);
119 ret = __dev_pm_qos_read_value(dev);
120 spin_unlock_irqrestore(&dev->power.lock, flags);
121
122 return ret;
123 }
124
125 /**
126 * apply_constraint - Add/modify/remove device PM QoS request.
127 * @req: Constraint request to apply
128 * @action: Action to perform (add/update/remove).
129 * @value: Value to assign to the QoS request.
130 *
131 * Internal function to update the constraints list using the PM QoS core
132 * code and if needed call the per-device callbacks.
133 */
apply_constraint(struct dev_pm_qos_request * req,enum pm_qos_req_action action,s32 value)134 static int apply_constraint(struct dev_pm_qos_request *req,
135 enum pm_qos_req_action action, s32 value)
136 {
137 struct dev_pm_qos *qos = req->dev->power.qos;
138 int ret;
139
140 switch(req->type) {
141 case DEV_PM_QOS_RESUME_LATENCY:
142 if (WARN_ON(action != PM_QOS_REMOVE_REQ && value < 0))
143 value = 0;
144
145 ret = pm_qos_update_target(&qos->resume_latency,
146 &req->data.pnode, action, value);
147 break;
148 case DEV_PM_QOS_LATENCY_TOLERANCE:
149 ret = pm_qos_update_target(&qos->latency_tolerance,
150 &req->data.pnode, action, value);
151 if (ret) {
152 value = pm_qos_read_value(&qos->latency_tolerance);
153 req->dev->power.set_latency_tolerance(req->dev, value);
154 }
155 break;
156 case DEV_PM_QOS_FLAGS:
157 ret = pm_qos_update_flags(&qos->flags, &req->data.flr,
158 action, value);
159 break;
160 default:
161 ret = -EINVAL;
162 }
163
164 return ret;
165 }
166
167 /*
168 * dev_pm_qos_constraints_allocate
169 * @dev: device to allocate data for
170 *
171 * Called at the first call to add_request, for constraint data allocation
172 * Must be called with the dev_pm_qos_mtx mutex held
173 */
dev_pm_qos_constraints_allocate(struct device * dev)174 static int dev_pm_qos_constraints_allocate(struct device *dev)
175 {
176 struct dev_pm_qos *qos;
177 struct pm_qos_constraints *c;
178 struct blocking_notifier_head *n;
179
180 qos = kzalloc(sizeof(*qos), GFP_KERNEL);
181 if (!qos)
182 return -ENOMEM;
183
184 n = kzalloc(sizeof(*n), GFP_KERNEL);
185 if (!n) {
186 kfree(qos);
187 return -ENOMEM;
188 }
189 BLOCKING_INIT_NOTIFIER_HEAD(n);
190
191 c = &qos->resume_latency;
192 plist_head_init(&c->list);
193 c->target_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
194 c->default_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
195 c->no_constraint_value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
196 c->type = PM_QOS_MIN;
197 c->notifiers = n;
198
199 c = &qos->latency_tolerance;
200 plist_head_init(&c->list);
201 c->target_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
202 c->default_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
203 c->no_constraint_value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
204 c->type = PM_QOS_MIN;
205
206 INIT_LIST_HEAD(&qos->flags.list);
207
208 spin_lock_irq(&dev->power.lock);
209 dev->power.qos = qos;
210 spin_unlock_irq(&dev->power.lock);
211
212 return 0;
213 }
214
215 static void __dev_pm_qos_hide_latency_limit(struct device *dev);
216 static void __dev_pm_qos_hide_flags(struct device *dev);
217
218 /**
219 * dev_pm_qos_constraints_destroy
220 * @dev: target device
221 *
222 * Called from the device PM subsystem on device removal under device_pm_lock().
223 */
dev_pm_qos_constraints_destroy(struct device * dev)224 void dev_pm_qos_constraints_destroy(struct device *dev)
225 {
226 struct dev_pm_qos *qos;
227 struct dev_pm_qos_request *req, *tmp;
228 struct pm_qos_constraints *c;
229 struct pm_qos_flags *f;
230
231 mutex_lock(&dev_pm_qos_sysfs_mtx);
232
233 /*
234 * If the device's PM QoS resume latency limit or PM QoS flags have been
235 * exposed to user space, they have to be hidden at this point.
236 */
237 pm_qos_sysfs_remove_resume_latency(dev);
238 pm_qos_sysfs_remove_flags(dev);
239
240 mutex_lock(&dev_pm_qos_mtx);
241
242 __dev_pm_qos_hide_latency_limit(dev);
243 __dev_pm_qos_hide_flags(dev);
244
245 qos = dev->power.qos;
246 if (!qos)
247 goto out;
248
249 /* Flush the constraints lists for the device. */
250 c = &qos->resume_latency;
251 plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
252 /*
253 * Update constraints list and call the notification
254 * callbacks if needed
255 */
256 apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
257 memset(req, 0, sizeof(*req));
258 }
259 c = &qos->latency_tolerance;
260 plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
261 apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
262 memset(req, 0, sizeof(*req));
263 }
264 f = &qos->flags;
265 list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) {
266 apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
267 memset(req, 0, sizeof(*req));
268 }
269
270 spin_lock_irq(&dev->power.lock);
271 dev->power.qos = ERR_PTR(-ENODEV);
272 spin_unlock_irq(&dev->power.lock);
273
274 kfree(qos->resume_latency.notifiers);
275 kfree(qos);
276
277 out:
278 mutex_unlock(&dev_pm_qos_mtx);
279
280 mutex_unlock(&dev_pm_qos_sysfs_mtx);
281 }
282
dev_pm_qos_invalid_req_type(struct device * dev,enum dev_pm_qos_req_type type)283 static bool dev_pm_qos_invalid_req_type(struct device *dev,
284 enum dev_pm_qos_req_type type)
285 {
286 return type == DEV_PM_QOS_LATENCY_TOLERANCE &&
287 !dev->power.set_latency_tolerance;
288 }
289
__dev_pm_qos_add_request(struct device * dev,struct dev_pm_qos_request * req,enum dev_pm_qos_req_type type,s32 value)290 static int __dev_pm_qos_add_request(struct device *dev,
291 struct dev_pm_qos_request *req,
292 enum dev_pm_qos_req_type type, s32 value)
293 {
294 int ret = 0;
295
296 if (!dev || !req || dev_pm_qos_invalid_req_type(dev, type))
297 return -EINVAL;
298
299 if (WARN(dev_pm_qos_request_active(req),
300 "%s() called for already added request\n", __func__))
301 return -EINVAL;
302
303 if (IS_ERR(dev->power.qos))
304 ret = -ENODEV;
305 else if (!dev->power.qos)
306 ret = dev_pm_qos_constraints_allocate(dev);
307
308 trace_dev_pm_qos_add_request(dev_name(dev), type, value);
309 if (!ret) {
310 req->dev = dev;
311 req->type = type;
312 ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
313 }
314 return ret;
315 }
316
317 /**
318 * dev_pm_qos_add_request - inserts new qos request into the list
319 * @dev: target device for the constraint
320 * @req: pointer to a preallocated handle
321 * @type: type of the request
322 * @value: defines the qos request
323 *
324 * This function inserts a new entry in the device constraints list of
325 * requested qos performance characteristics. It recomputes the aggregate
326 * QoS expectations of parameters and initializes the dev_pm_qos_request
327 * handle. Caller needs to save this handle for later use in updates and
328 * removal.
329 *
330 * Returns 1 if the aggregated constraint value has changed,
331 * 0 if the aggregated constraint value has not changed,
332 * -EINVAL in case of wrong parameters, -ENOMEM if there's not enough memory
333 * to allocate for data structures, -ENODEV if the device has just been removed
334 * from the system.
335 *
336 * Callers should ensure that the target device is not RPM_SUSPENDED before
337 * using this function for requests of type DEV_PM_QOS_FLAGS.
338 */
dev_pm_qos_add_request(struct device * dev,struct dev_pm_qos_request * req,enum dev_pm_qos_req_type type,s32 value)339 int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
340 enum dev_pm_qos_req_type type, s32 value)
341 {
342 int ret;
343
344 mutex_lock(&dev_pm_qos_mtx);
345 ret = __dev_pm_qos_add_request(dev, req, type, value);
346 mutex_unlock(&dev_pm_qos_mtx);
347 return ret;
348 }
349 EXPORT_SYMBOL_GPL(dev_pm_qos_add_request);
350
351 /**
352 * __dev_pm_qos_update_request - Modify an existing device PM QoS request.
353 * @req : PM QoS request to modify.
354 * @new_value: New value to request.
355 */
__dev_pm_qos_update_request(struct dev_pm_qos_request * req,s32 new_value)356 static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
357 s32 new_value)
358 {
359 s32 curr_value;
360 int ret = 0;
361
362 if (!req) /*guard against callers passing in null */
363 return -EINVAL;
364
365 if (WARN(!dev_pm_qos_request_active(req),
366 "%s() called for unknown object\n", __func__))
367 return -EINVAL;
368
369 if (IS_ERR_OR_NULL(req->dev->power.qos))
370 return -ENODEV;
371
372 switch(req->type) {
373 case DEV_PM_QOS_RESUME_LATENCY:
374 case DEV_PM_QOS_LATENCY_TOLERANCE:
375 curr_value = req->data.pnode.prio;
376 break;
377 case DEV_PM_QOS_FLAGS:
378 curr_value = req->data.flr.flags;
379 break;
380 default:
381 return -EINVAL;
382 }
383
384 trace_dev_pm_qos_update_request(dev_name(req->dev), req->type,
385 new_value);
386 if (curr_value != new_value)
387 ret = apply_constraint(req, PM_QOS_UPDATE_REQ, new_value);
388
389 return ret;
390 }
391
392 /**
393 * dev_pm_qos_update_request - modifies an existing qos request
394 * @req : handle to list element holding a dev_pm_qos request to use
395 * @new_value: defines the qos request
396 *
397 * Updates an existing dev PM qos request along with updating the
398 * target value.
399 *
400 * Attempts are made to make this code callable on hot code paths.
401 *
402 * Returns 1 if the aggregated constraint value has changed,
403 * 0 if the aggregated constraint value has not changed,
404 * -EINVAL in case of wrong parameters, -ENODEV if the device has been
405 * removed from the system
406 *
407 * Callers should ensure that the target device is not RPM_SUSPENDED before
408 * using this function for requests of type DEV_PM_QOS_FLAGS.
409 */
dev_pm_qos_update_request(struct dev_pm_qos_request * req,s32 new_value)410 int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value)
411 {
412 int ret;
413
414 mutex_lock(&dev_pm_qos_mtx);
415 ret = __dev_pm_qos_update_request(req, new_value);
416 mutex_unlock(&dev_pm_qos_mtx);
417 return ret;
418 }
419 EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
420
__dev_pm_qos_remove_request(struct dev_pm_qos_request * req)421 static int __dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
422 {
423 int ret;
424
425 if (!req) /*guard against callers passing in null */
426 return -EINVAL;
427
428 if (WARN(!dev_pm_qos_request_active(req),
429 "%s() called for unknown object\n", __func__))
430 return -EINVAL;
431
432 if (IS_ERR_OR_NULL(req->dev->power.qos))
433 return -ENODEV;
434
435 trace_dev_pm_qos_remove_request(dev_name(req->dev), req->type,
436 PM_QOS_DEFAULT_VALUE);
437 ret = apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
438 memset(req, 0, sizeof(*req));
439 return ret;
440 }
441
442 /**
443 * dev_pm_qos_remove_request - modifies an existing qos request
444 * @req: handle to request list element
445 *
446 * Will remove pm qos request from the list of constraints and
447 * recompute the current target value. Call this on slow code paths.
448 *
449 * Returns 1 if the aggregated constraint value has changed,
450 * 0 if the aggregated constraint value has not changed,
451 * -EINVAL in case of wrong parameters, -ENODEV if the device has been
452 * removed from the system
453 *
454 * Callers should ensure that the target device is not RPM_SUSPENDED before
455 * using this function for requests of type DEV_PM_QOS_FLAGS.
456 */
dev_pm_qos_remove_request(struct dev_pm_qos_request * req)457 int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
458 {
459 int ret;
460
461 mutex_lock(&dev_pm_qos_mtx);
462 ret = __dev_pm_qos_remove_request(req);
463 mutex_unlock(&dev_pm_qos_mtx);
464 return ret;
465 }
466 EXPORT_SYMBOL_GPL(dev_pm_qos_remove_request);
467
468 /**
469 * dev_pm_qos_add_notifier - sets notification entry for changes to target value
470 * of per-device PM QoS constraints
471 *
472 * @dev: target device for the constraint
473 * @notifier: notifier block managed by caller.
474 *
475 * Will register the notifier into a notification chain that gets called
476 * upon changes to the target value for the device.
477 *
478 * If the device's constraints object doesn't exist when this routine is called,
479 * it will be created (or error code will be returned if that fails).
480 */
dev_pm_qos_add_notifier(struct device * dev,struct notifier_block * notifier)481 int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier)
482 {
483 int ret = 0;
484
485 mutex_lock(&dev_pm_qos_mtx);
486
487 if (IS_ERR(dev->power.qos))
488 ret = -ENODEV;
489 else if (!dev->power.qos)
490 ret = dev_pm_qos_constraints_allocate(dev);
491
492 if (!ret)
493 ret = blocking_notifier_chain_register(dev->power.qos->resume_latency.notifiers,
494 notifier);
495
496 mutex_unlock(&dev_pm_qos_mtx);
497 return ret;
498 }
499 EXPORT_SYMBOL_GPL(dev_pm_qos_add_notifier);
500
501 /**
502 * dev_pm_qos_remove_notifier - deletes notification for changes to target value
503 * of per-device PM QoS constraints
504 *
505 * @dev: target device for the constraint
506 * @notifier: notifier block to be removed.
507 *
508 * Will remove the notifier from the notification chain that gets called
509 * upon changes to the target value.
510 */
dev_pm_qos_remove_notifier(struct device * dev,struct notifier_block * notifier)511 int dev_pm_qos_remove_notifier(struct device *dev,
512 struct notifier_block *notifier)
513 {
514 int retval = 0;
515
516 mutex_lock(&dev_pm_qos_mtx);
517
518 /* Silently return if the constraints object is not present. */
519 if (!IS_ERR_OR_NULL(dev->power.qos))
520 retval = blocking_notifier_chain_unregister(dev->power.qos->resume_latency.notifiers,
521 notifier);
522
523 mutex_unlock(&dev_pm_qos_mtx);
524 return retval;
525 }
526 EXPORT_SYMBOL_GPL(dev_pm_qos_remove_notifier);
527
528 /**
529 * dev_pm_qos_add_ancestor_request - Add PM QoS request for device's ancestor.
530 * @dev: Device whose ancestor to add the request for.
531 * @req: Pointer to the preallocated handle.
532 * @type: Type of the request.
533 * @value: Constraint latency value.
534 */
dev_pm_qos_add_ancestor_request(struct device * dev,struct dev_pm_qos_request * req,enum dev_pm_qos_req_type type,s32 value)535 int dev_pm_qos_add_ancestor_request(struct device *dev,
536 struct dev_pm_qos_request *req,
537 enum dev_pm_qos_req_type type, s32 value)
538 {
539 struct device *ancestor = dev->parent;
540 int ret = -ENODEV;
541
542 switch (type) {
543 case DEV_PM_QOS_RESUME_LATENCY:
544 while (ancestor && !ancestor->power.ignore_children)
545 ancestor = ancestor->parent;
546
547 break;
548 case DEV_PM_QOS_LATENCY_TOLERANCE:
549 while (ancestor && !ancestor->power.set_latency_tolerance)
550 ancestor = ancestor->parent;
551
552 break;
553 default:
554 ancestor = NULL;
555 }
556 if (ancestor)
557 ret = dev_pm_qos_add_request(ancestor, req, type, value);
558
559 if (ret < 0)
560 req->dev = NULL;
561
562 return ret;
563 }
564 EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
565
__dev_pm_qos_drop_user_request(struct device * dev,enum dev_pm_qos_req_type type)566 static void __dev_pm_qos_drop_user_request(struct device *dev,
567 enum dev_pm_qos_req_type type)
568 {
569 struct dev_pm_qos_request *req = NULL;
570
571 switch(type) {
572 case DEV_PM_QOS_RESUME_LATENCY:
573 req = dev->power.qos->resume_latency_req;
574 dev->power.qos->resume_latency_req = NULL;
575 break;
576 case DEV_PM_QOS_LATENCY_TOLERANCE:
577 req = dev->power.qos->latency_tolerance_req;
578 dev->power.qos->latency_tolerance_req = NULL;
579 break;
580 case DEV_PM_QOS_FLAGS:
581 req = dev->power.qos->flags_req;
582 dev->power.qos->flags_req = NULL;
583 break;
584 }
585 __dev_pm_qos_remove_request(req);
586 kfree(req);
587 }
588
dev_pm_qos_drop_user_request(struct device * dev,enum dev_pm_qos_req_type type)589 static void dev_pm_qos_drop_user_request(struct device *dev,
590 enum dev_pm_qos_req_type type)
591 {
592 mutex_lock(&dev_pm_qos_mtx);
593 __dev_pm_qos_drop_user_request(dev, type);
594 mutex_unlock(&dev_pm_qos_mtx);
595 }
596
597 /**
598 * dev_pm_qos_expose_latency_limit - Expose PM QoS latency limit to user space.
599 * @dev: Device whose PM QoS latency limit is to be exposed to user space.
600 * @value: Initial value of the latency limit.
601 */
dev_pm_qos_expose_latency_limit(struct device * dev,s32 value)602 int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
603 {
604 struct dev_pm_qos_request *req;
605 int ret;
606
607 if (!device_is_registered(dev) || value < 0)
608 return -EINVAL;
609
610 req = kzalloc(sizeof(*req), GFP_KERNEL);
611 if (!req)
612 return -ENOMEM;
613
614 ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_RESUME_LATENCY, value);
615 if (ret < 0) {
616 kfree(req);
617 return ret;
618 }
619
620 mutex_lock(&dev_pm_qos_sysfs_mtx);
621
622 mutex_lock(&dev_pm_qos_mtx);
623
624 if (IS_ERR_OR_NULL(dev->power.qos))
625 ret = -ENODEV;
626 else if (dev->power.qos->resume_latency_req)
627 ret = -EEXIST;
628
629 if (ret < 0) {
630 __dev_pm_qos_remove_request(req);
631 kfree(req);
632 mutex_unlock(&dev_pm_qos_mtx);
633 goto out;
634 }
635 dev->power.qos->resume_latency_req = req;
636
637 mutex_unlock(&dev_pm_qos_mtx);
638
639 ret = pm_qos_sysfs_add_resume_latency(dev);
640 if (ret)
641 dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);
642
643 out:
644 mutex_unlock(&dev_pm_qos_sysfs_mtx);
645 return ret;
646 }
647 EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
648
__dev_pm_qos_hide_latency_limit(struct device * dev)649 static void __dev_pm_qos_hide_latency_limit(struct device *dev)
650 {
651 if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->resume_latency_req)
652 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);
653 }
654
655 /**
656 * dev_pm_qos_hide_latency_limit - Hide PM QoS latency limit from user space.
657 * @dev: Device whose PM QoS latency limit is to be hidden from user space.
658 */
dev_pm_qos_hide_latency_limit(struct device * dev)659 void dev_pm_qos_hide_latency_limit(struct device *dev)
660 {
661 mutex_lock(&dev_pm_qos_sysfs_mtx);
662
663 pm_qos_sysfs_remove_resume_latency(dev);
664
665 mutex_lock(&dev_pm_qos_mtx);
666 __dev_pm_qos_hide_latency_limit(dev);
667 mutex_unlock(&dev_pm_qos_mtx);
668
669 mutex_unlock(&dev_pm_qos_sysfs_mtx);
670 }
671 EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit);
672
673 /**
674 * dev_pm_qos_expose_flags - Expose PM QoS flags of a device to user space.
675 * @dev: Device whose PM QoS flags are to be exposed to user space.
676 * @val: Initial values of the flags.
677 */
dev_pm_qos_expose_flags(struct device * dev,s32 val)678 int dev_pm_qos_expose_flags(struct device *dev, s32 val)
679 {
680 struct dev_pm_qos_request *req;
681 int ret;
682
683 if (!device_is_registered(dev))
684 return -EINVAL;
685
686 req = kzalloc(sizeof(*req), GFP_KERNEL);
687 if (!req)
688 return -ENOMEM;
689
690 ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val);
691 if (ret < 0) {
692 kfree(req);
693 return ret;
694 }
695
696 pm_runtime_get_sync(dev);
697 mutex_lock(&dev_pm_qos_sysfs_mtx);
698
699 mutex_lock(&dev_pm_qos_mtx);
700
701 if (IS_ERR_OR_NULL(dev->power.qos))
702 ret = -ENODEV;
703 else if (dev->power.qos->flags_req)
704 ret = -EEXIST;
705
706 if (ret < 0) {
707 __dev_pm_qos_remove_request(req);
708 kfree(req);
709 mutex_unlock(&dev_pm_qos_mtx);
710 goto out;
711 }
712 dev->power.qos->flags_req = req;
713
714 mutex_unlock(&dev_pm_qos_mtx);
715
716 ret = pm_qos_sysfs_add_flags(dev);
717 if (ret)
718 dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
719
720 out:
721 mutex_unlock(&dev_pm_qos_sysfs_mtx);
722 pm_runtime_put(dev);
723 return ret;
724 }
725 EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags);
726
__dev_pm_qos_hide_flags(struct device * dev)727 static void __dev_pm_qos_hide_flags(struct device *dev)
728 {
729 if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req)
730 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
731 }
732
733 /**
734 * dev_pm_qos_hide_flags - Hide PM QoS flags of a device from user space.
735 * @dev: Device whose PM QoS flags are to be hidden from user space.
736 */
dev_pm_qos_hide_flags(struct device * dev)737 void dev_pm_qos_hide_flags(struct device *dev)
738 {
739 pm_runtime_get_sync(dev);
740 mutex_lock(&dev_pm_qos_sysfs_mtx);
741
742 pm_qos_sysfs_remove_flags(dev);
743
744 mutex_lock(&dev_pm_qos_mtx);
745 __dev_pm_qos_hide_flags(dev);
746 mutex_unlock(&dev_pm_qos_mtx);
747
748 mutex_unlock(&dev_pm_qos_sysfs_mtx);
749 pm_runtime_put(dev);
750 }
751 EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags);
752
753 /**
754 * dev_pm_qos_update_flags - Update PM QoS flags request owned by user space.
755 * @dev: Device to update the PM QoS flags request for.
756 * @mask: Flags to set/clear.
757 * @set: Whether to set or clear the flags (true means set).
758 */
dev_pm_qos_update_flags(struct device * dev,s32 mask,bool set)759 int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
760 {
761 s32 value;
762 int ret;
763
764 pm_runtime_get_sync(dev);
765 mutex_lock(&dev_pm_qos_mtx);
766
767 if (IS_ERR_OR_NULL(dev->power.qos) || !dev->power.qos->flags_req) {
768 ret = -EINVAL;
769 goto out;
770 }
771
772 value = dev_pm_qos_requested_flags(dev);
773 if (set)
774 value |= mask;
775 else
776 value &= ~mask;
777
778 ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value);
779
780 out:
781 mutex_unlock(&dev_pm_qos_mtx);
782 pm_runtime_put(dev);
783 return ret;
784 }
785
786 /**
787 * dev_pm_qos_get_user_latency_tolerance - Get user space latency tolerance.
788 * @dev: Device to obtain the user space latency tolerance for.
789 */
dev_pm_qos_get_user_latency_tolerance(struct device * dev)790 s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev)
791 {
792 s32 ret;
793
794 mutex_lock(&dev_pm_qos_mtx);
795 ret = IS_ERR_OR_NULL(dev->power.qos)
796 || !dev->power.qos->latency_tolerance_req ?
797 PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT :
798 dev->power.qos->latency_tolerance_req->data.pnode.prio;
799 mutex_unlock(&dev_pm_qos_mtx);
800 return ret;
801 }
802
803 /**
804 * dev_pm_qos_update_user_latency_tolerance - Update user space latency tolerance.
805 * @dev: Device to update the user space latency tolerance for.
806 * @val: New user space latency tolerance for @dev (negative values disable).
807 */
dev_pm_qos_update_user_latency_tolerance(struct device * dev,s32 val)808 int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
809 {
810 int ret;
811
812 mutex_lock(&dev_pm_qos_mtx);
813
814 if (IS_ERR_OR_NULL(dev->power.qos)
815 || !dev->power.qos->latency_tolerance_req) {
816 struct dev_pm_qos_request *req;
817
818 if (val < 0) {
819 if (val == PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT)
820 ret = 0;
821 else
822 ret = -EINVAL;
823 goto out;
824 }
825 req = kzalloc(sizeof(*req), GFP_KERNEL);
826 if (!req) {
827 ret = -ENOMEM;
828 goto out;
829 }
830 ret = __dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY_TOLERANCE, val);
831 if (ret < 0) {
832 kfree(req);
833 goto out;
834 }
835 dev->power.qos->latency_tolerance_req = req;
836 } else {
837 if (val < 0) {
838 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY_TOLERANCE);
839 ret = 0;
840 } else {
841 ret = __dev_pm_qos_update_request(dev->power.qos->latency_tolerance_req, val);
842 }
843 }
844
845 out:
846 mutex_unlock(&dev_pm_qos_mtx);
847 return ret;
848 }
849 EXPORT_SYMBOL_GPL(dev_pm_qos_update_user_latency_tolerance);
850
851 /**
852 * dev_pm_qos_expose_latency_tolerance - Expose latency tolerance to userspace
853 * @dev: Device whose latency tolerance to expose
854 */
dev_pm_qos_expose_latency_tolerance(struct device * dev)855 int dev_pm_qos_expose_latency_tolerance(struct device *dev)
856 {
857 int ret;
858
859 if (!dev->power.set_latency_tolerance)
860 return -EINVAL;
861
862 mutex_lock(&dev_pm_qos_sysfs_mtx);
863 ret = pm_qos_sysfs_add_latency_tolerance(dev);
864 mutex_unlock(&dev_pm_qos_sysfs_mtx);
865
866 return ret;
867 }
868 EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_tolerance);
869
870 /**
871 * dev_pm_qos_hide_latency_tolerance - Hide latency tolerance from userspace
872 * @dev: Device whose latency tolerance to hide
873 */
dev_pm_qos_hide_latency_tolerance(struct device * dev)874 void dev_pm_qos_hide_latency_tolerance(struct device *dev)
875 {
876 mutex_lock(&dev_pm_qos_sysfs_mtx);
877 pm_qos_sysfs_remove_latency_tolerance(dev);
878 mutex_unlock(&dev_pm_qos_sysfs_mtx);
879
880 /* Remove the request from user space now */
881 pm_runtime_get_sync(dev);
882 dev_pm_qos_update_user_latency_tolerance(dev,
883 PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT);
884 pm_runtime_put(dev);
885 }
886 EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_tolerance);
887