hc
2024-05-10 ee930fffee469d076998274a2ca55e13dc1efb67
kernel/kernel/power/qos.c
....@@ -1,30 +1,21 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
2
- * This module exposes the interface to kernel space for specifying
3
- * QoS dependencies. It provides infrastructure for registration of:
3
+ * Power Management Quality of Service (PM QoS) support base.
44 *
5
- * Dependents on a QoS value : register requests
6
- * Watchers of QoS value : get notified when target QoS value changes
5
+ * Copyright (C) 2020 Intel Corporation
76 *
8
- * This QoS design is best effort based. Dependents register their QoS needs.
9
- * Watchers register to keep track of the current QoS needs of the system.
7
+ * Authors:
8
+ * Mark Gross <mgross@linux.intel.com>
9
+ * Rafael J. Wysocki <rafael.j.wysocki@intel.com>
1010 *
11
- * There are 3 basic classes of QoS parameter: latency, timeout, throughput
12
- * each have defined units:
13
- * latency: usec
14
- * timeout: usec <-- currently not used.
15
- * throughput: kbs (kilo byte / sec)
11
+ * Provided here is an interface for specifying PM QoS dependencies. It allows
12
+ * entities depending on QoS constraints to register their requests which are
13
+ * aggregated as appropriate to produce effective constraints (target values)
14
+ * that can be monitored by entities needing to respect them, either by polling
15
+ * or through a built-in notification mechanism.
1616 *
17
- * There are lists of pm_qos_objects each one wrapping requests, notifiers
18
- *
19
- * User mode requests on a QOS parameter register themselves to the
20
- * subsystem by opening the device node /dev/... and writing there request to
21
- * the node. As long as the process holds a file handle open to the node the
22
- * client continues to be accounted for. Upon file release the usermode
23
- * request is removed and a new qos target is computed. This way when the
24
- * request that the application has is cleaned up when closes the file
25
- * pointer or exits the pm_qos_object will get an opportunity to clean up.
26
- *
27
- * Mark Gross <mgross@linux.intel.com>
17
+ * In addition to the basic functionality, more specific interfaces for managing
18
+ * global CPU latency QoS requests and frequency QoS requests are provided.
2819 */
2920
3021 /*#define DEBUG*/
....@@ -43,121 +34,32 @@
4334 #include <linux/kernel.h>
4435 #include <linux/debugfs.h>
4536 #include <linux/seq_file.h>
46
-#include <linux/irq.h>
47
-#include <linux/irqdesc.h>
4837
4938 #include <linux/uaccess.h>
5039 #include <linux/export.h>
5140 #include <trace/events/power.h>
41
+#undef CREATE_TRACE_POINT
42
+#include <trace/hooks/power.h>
43
+
5244
5345 /*
5446 * locking rule: all changes to constraints or notifiers lists
5547 * or pm_qos_object list and pm_qos_objects need to happen with pm_qos_lock
5648 * held, taken with _irqsave. One lock to rule them all
5749 */
58
-struct pm_qos_object {
59
- struct pm_qos_constraints *constraints;
60
- struct miscdevice pm_qos_power_miscdev;
61
- char *name;
62
-};
63
-
6450 static DEFINE_SPINLOCK(pm_qos_lock);
6551
66
-static struct pm_qos_object null_pm_qos;
67
-
68
-static BLOCKING_NOTIFIER_HEAD(cpu_dma_lat_notifier);
69
-static struct pm_qos_constraints cpu_dma_constraints = {
70
- .list = PLIST_HEAD_INIT(cpu_dma_constraints.list),
71
- .target_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
72
- .target_per_cpu = { [0 ... (NR_CPUS - 1)] =
73
- PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE },
74
- .default_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
75
- .no_constraint_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
76
- .type = PM_QOS_MIN,
77
- .notifiers = &cpu_dma_lat_notifier,
78
-};
79
-static struct pm_qos_object cpu_dma_pm_qos = {
80
- .constraints = &cpu_dma_constraints,
81
- .name = "cpu_dma_latency",
82
-};
83
-
84
-static BLOCKING_NOTIFIER_HEAD(network_lat_notifier);
85
-static struct pm_qos_constraints network_lat_constraints = {
86
- .list = PLIST_HEAD_INIT(network_lat_constraints.list),
87
- .target_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
88
- .target_per_cpu = { [0 ... (NR_CPUS - 1)] =
89
- PM_QOS_NETWORK_LAT_DEFAULT_VALUE },
90
- .default_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
91
- .no_constraint_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
92
- .type = PM_QOS_MIN,
93
- .notifiers = &network_lat_notifier,
94
-};
95
-static struct pm_qos_object network_lat_pm_qos = {
96
- .constraints = &network_lat_constraints,
97
- .name = "network_latency",
98
-};
99
-
100
-static BLOCKING_NOTIFIER_HEAD(network_throughput_notifier);
101
-static struct pm_qos_constraints network_tput_constraints = {
102
- .list = PLIST_HEAD_INIT(network_tput_constraints.list),
103
- .target_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
104
- .target_per_cpu = { [0 ... (NR_CPUS - 1)] =
105
- PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE },
106
- .default_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
107
- .no_constraint_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
108
- .type = PM_QOS_MAX,
109
- .notifiers = &network_throughput_notifier,
110
-};
111
-static struct pm_qos_object network_throughput_pm_qos = {
112
- .constraints = &network_tput_constraints,
113
- .name = "network_throughput",
114
-};
115
-
116
-
117
-static BLOCKING_NOTIFIER_HEAD(memory_bandwidth_notifier);
118
-static struct pm_qos_constraints memory_bw_constraints = {
119
- .list = PLIST_HEAD_INIT(memory_bw_constraints.list),
120
- .target_value = PM_QOS_MEMORY_BANDWIDTH_DEFAULT_VALUE,
121
- .default_value = PM_QOS_MEMORY_BANDWIDTH_DEFAULT_VALUE,
122
- .no_constraint_value = PM_QOS_MEMORY_BANDWIDTH_DEFAULT_VALUE,
123
- .type = PM_QOS_SUM,
124
- .notifiers = &memory_bandwidth_notifier,
125
-};
126
-static struct pm_qos_object memory_bandwidth_pm_qos = {
127
- .constraints = &memory_bw_constraints,
128
- .name = "memory_bandwidth",
129
-};
130
-
131
-
132
-static struct pm_qos_object *pm_qos_array[] = {
133
- &null_pm_qos,
134
- &cpu_dma_pm_qos,
135
- &network_lat_pm_qos,
136
- &network_throughput_pm_qos,
137
- &memory_bandwidth_pm_qos,
138
-};
139
-
140
-static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
141
- size_t count, loff_t *f_pos);
142
-static ssize_t pm_qos_power_read(struct file *filp, char __user *buf,
143
- size_t count, loff_t *f_pos);
144
-static int pm_qos_power_open(struct inode *inode, struct file *filp);
145
-static int pm_qos_power_release(struct inode *inode, struct file *filp);
146
-
147
-static const struct file_operations pm_qos_power_fops = {
148
- .write = pm_qos_power_write,
149
- .read = pm_qos_power_read,
150
- .open = pm_qos_power_open,
151
- .release = pm_qos_power_release,
152
- .llseek = noop_llseek,
153
-};
154
-
155
-/* unlocked internal variant */
156
-static inline int pm_qos_get_value(struct pm_qos_constraints *c)
52
+/**
53
+ * pm_qos_read_value - Return the current effective constraint value.
54
+ * @c: List of PM QoS constraint requests.
55
+ */
56
+s32 pm_qos_read_value(struct pm_qos_constraints *c)
15757 {
158
- struct plist_node *node;
159
- int total_value = 0;
58
+ return READ_ONCE(c->target_value);
59
+}
16060
61
+static int pm_qos_get_value(struct pm_qos_constraints *c)
62
+{
16163 if (plist_head_empty(&c->list))
16264 return c->no_constraint_value;
16365
....@@ -168,163 +70,42 @@
16870 case PM_QOS_MAX:
16971 return plist_last(&c->list)->prio;
17072
171
- case PM_QOS_SUM:
172
- plist_for_each(node, &c->list)
173
- total_value += node->prio;
174
-
175
- return total_value;
176
-
17773 default:
178
- /* runtime check for not using enum */
179
- BUG();
74
+ WARN(1, "Unknown PM QoS type in %s\n", __func__);
18075 return PM_QOS_DEFAULT_VALUE;
18176 }
18277 }
18378
184
-s32 pm_qos_read_value(struct pm_qos_constraints *c)
79
+static void pm_qos_set_value(struct pm_qos_constraints *c, s32 value)
18580 {
186
- return c->target_value;
187
-}
188
-
189
-static inline void pm_qos_set_value(struct pm_qos_constraints *c, s32 value)
190
-{
191
- c->target_value = value;
192
-}
193
-
194
-static int pm_qos_dbg_show_requests(struct seq_file *s, void *unused)
195
-{
196
- struct pm_qos_object *qos = (struct pm_qos_object *)s->private;
197
- struct pm_qos_constraints *c;
198
- struct pm_qos_request *req;
199
- char *type;
200
- unsigned long flags;
201
- int tot_reqs = 0;
202
- int active_reqs = 0;
203
-
204
- if (IS_ERR_OR_NULL(qos)) {
205
- pr_err("%s: bad qos param!\n", __func__);
206
- return -EINVAL;
207
- }
208
- c = qos->constraints;
209
- if (IS_ERR_OR_NULL(c)) {
210
- pr_err("%s: Bad constraints on qos?\n", __func__);
211
- return -EINVAL;
212
- }
213
-
214
- /* Lock to ensure we have a snapshot */
215
- spin_lock_irqsave(&pm_qos_lock, flags);
216
- if (plist_head_empty(&c->list)) {
217
- seq_puts(s, "Empty!\n");
218
- goto out;
219
- }
220
-
221
- switch (c->type) {
222
- case PM_QOS_MIN:
223
- type = "Minimum";
224
- break;
225
- case PM_QOS_MAX:
226
- type = "Maximum";
227
- break;
228
- case PM_QOS_SUM:
229
- type = "Sum";
230
- break;
231
- default:
232
- type = "Unknown";
233
- }
234
-
235
- plist_for_each_entry(req, &c->list, node) {
236
- char *state = "Default";
237
-
238
- if ((req->node).prio != c->default_value) {
239
- active_reqs++;
240
- state = "Active";
241
- }
242
- tot_reqs++;
243
- seq_printf(s, "%d: %d: %s\n", tot_reqs,
244
- (req->node).prio, state);
245
- }
246
-
247
- seq_printf(s, "Type=%s, Value=%d, Requests: active=%d / total=%d\n",
248
- type, pm_qos_get_value(c), active_reqs, tot_reqs);
249
-
250
-out:
251
- spin_unlock_irqrestore(&pm_qos_lock, flags);
252
- return 0;
253
-}
254
-
255
-static int pm_qos_dbg_open(struct inode *inode, struct file *file)
256
-{
257
- return single_open(file, pm_qos_dbg_show_requests,
258
- inode->i_private);
259
-}
260
-
261
-static const struct file_operations pm_qos_debug_fops = {
262
- .open = pm_qos_dbg_open,
263
- .read = seq_read,
264
- .llseek = seq_lseek,
265
- .release = single_release,
266
-};
267
-
268
-static inline void pm_qos_set_value_for_cpus(struct pm_qos_constraints *c,
269
- bool dev_req)
270
-{
271
- struct pm_qos_request *req = NULL;
272
- int cpu;
273
- s32 qos_val[NR_CPUS] = { [0 ... (NR_CPUS - 1)] = c->default_value };
274
-
275
- /*
276
- * pm_qos_set_value_for_cpus expects all c->list elements to be of type
277
- * pm_qos_request, however requests from device will contain elements
278
- * of type dev_pm_qos_request.
279
- * pm_qos_constraints.target_per_cpu can be accessed only for
280
- * constraints associated with one of the pm_qos_class and present in
281
- * pm_qos_array. Device requests are not associated with any of
282
- * pm_qos_class, therefore their target_per_cpu cannot be accessed. We
283
- * can safely skip updating target_per_cpu for device requests.
284
- */
285
- if (dev_req)
286
- return;
287
-
288
- plist_for_each_entry(req, &c->list, node) {
289
- for_each_cpu(cpu, &req->cpus_affine) {
290
- switch (c->type) {
291
- case PM_QOS_MIN:
292
- if (qos_val[cpu] > req->node.prio)
293
- qos_val[cpu] = req->node.prio;
294
- break;
295
- case PM_QOS_MAX:
296
- if (req->node.prio > qos_val[cpu])
297
- qos_val[cpu] = req->node.prio;
298
- break;
299
- default:
300
- break;
301
- }
302
- }
303
- }
304
-
305
- for_each_possible_cpu(cpu)
306
- c->target_per_cpu[cpu] = qos_val[cpu];
81
+ WRITE_ONCE(c->target_value, value);
30782 }
30883
30984 /**
310
- * pm_qos_update_target - manages the constraints list and calls the notifiers
311
- * if needed
312
- * @c: constraints data struct
313
- * @node: request to add to the list, to update or to remove
314
- * @action: action to take on the constraints list
315
- * @value: value of the request to add or update
85
+ * pm_qos_update_target - Update a list of PM QoS constraint requests.
86
+ * @c: List of PM QoS requests.
87
+ * @node: Target list entry.
88
+ * @action: Action to carry out (add, update or remove).
89
+ * @value: New request value for the target list entry.
31690 *
317
- * This function returns 1 if the aggregated constraint value has changed, 0
318
- * otherwise.
91
+ * Update the given list of PM QoS constraint requests, @c, by carrying an
92
+ * @action involving the @node list entry and @value on it.
93
+ *
94
+ * The recognized values of @action are PM_QOS_ADD_REQ (store @value in @node
95
+ * and add it to the list), PM_QOS_UPDATE_REQ (remove @node from the list, store
96
+ * @value in it and add it to the list again), and PM_QOS_REMOVE_REQ (remove
97
+ * @node from the list, ignore @value).
98
+ *
99
+ * Return: 1 if the aggregate constraint value has changed, 0 otherwise.
319100 */
320101 int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
321
- enum pm_qos_req_action action, int value, bool dev_req)
102
+ enum pm_qos_req_action action, int value)
322103 {
323
- unsigned long flags;
324104 int prev_value, curr_value, new_value;
325
- int ret;
105
+ unsigned long flags;
326106
327107 spin_lock_irqsave(&pm_qos_lock, flags);
108
+
328109 prev_value = pm_qos_get_value(c);
329110 if (value == PM_QOS_DEFAULT_VALUE)
330111 new_value = c->default_value;
....@@ -337,12 +118,11 @@
337118 break;
338119 case PM_QOS_UPDATE_REQ:
339120 /*
340
- * to change the list, we atomically remove, reinit
341
- * with new value and add, then see if the extremal
342
- * changed
121
+ * To change the list, atomically remove, reinit with new value
122
+ * and add, then see if the aggregate has changed.
343123 */
344124 plist_del(node, &c->list);
345
- /* fall through */
125
+ fallthrough;
346126 case PM_QOS_ADD_REQ:
347127 plist_node_init(node, new_value);
348128 plist_add(node, &c->list);
....@@ -354,21 +134,18 @@
354134
355135 curr_value = pm_qos_get_value(c);
356136 pm_qos_set_value(c, curr_value);
357
- pm_qos_set_value_for_cpus(c, dev_req);
358137
359138 spin_unlock_irqrestore(&pm_qos_lock, flags);
360139
361140 trace_pm_qos_update_target(action, prev_value, curr_value);
362
- if (prev_value != curr_value) {
363
- ret = 1;
364
- if (c->notifiers)
365
- blocking_notifier_call_chain(c->notifiers,
366
- (unsigned long)curr_value,
367
- NULL);
368
- } else {
369
- ret = 0;
370
- }
371
- return ret;
141
+
142
+ if (prev_value == curr_value)
143
+ return 0;
144
+
145
+ if (c->notifiers)
146
+ blocking_notifier_call_chain(c->notifiers, curr_value, NULL);
147
+
148
+ return 1;
372149 }
373150
374151 /**
....@@ -390,14 +167,12 @@
390167
391168 /**
392169 * pm_qos_update_flags - Update a set of PM QoS flags.
393
- * @pqf: Set of flags to update.
170
+ * @pqf: Set of PM QoS flags to update.
394171 * @req: Request to add to the set, to modify, or to remove from the set.
395172 * @action: Action to take on the set.
396173 * @val: Value of the request to add or modify.
397174 *
398
- * Update the given set of PM QoS flags and call notifiers if the aggregate
399
- * value has changed. Returns 1 if the aggregate constraint value has changed,
400
- * 0 otherwise.
175
+ * Return: 1 if the aggregate constraint value has changed, 0 otherwise.
401176 */
402177 bool pm_qos_update_flags(struct pm_qos_flags *pqf,
403178 struct pm_qos_flags_request *req,
....@@ -416,7 +191,7 @@
416191 break;
417192 case PM_QOS_UPDATE_REQ:
418193 pm_qos_flags_remove_req(pqf, req);
419
- /* fall through */
194
+ fallthrough;
420195 case PM_QOS_ADD_REQ:
421196 req->flags = val;
422197 INIT_LIST_HEAD(&req->node);
....@@ -433,425 +208,180 @@
433208 spin_unlock_irqrestore(&pm_qos_lock, irqflags);
434209
435210 trace_pm_qos_update_flags(action, prev_value, curr_value);
211
+
436212 return prev_value != curr_value;
437213 }
438214
215
+#ifdef CONFIG_CPU_IDLE
216
+/* Definitions related to the CPU latency QoS. */
217
+
218
+static struct pm_qos_constraints cpu_latency_constraints = {
219
+ .list = PLIST_HEAD_INIT(cpu_latency_constraints.list),
220
+ .target_value = PM_QOS_CPU_LATENCY_DEFAULT_VALUE,
221
+ .default_value = PM_QOS_CPU_LATENCY_DEFAULT_VALUE,
222
+ .no_constraint_value = PM_QOS_CPU_LATENCY_DEFAULT_VALUE,
223
+ .type = PM_QOS_MIN,
224
+};
225
+
439226 /**
440
- * pm_qos_request - returns current system wide qos expectation
441
- * @pm_qos_class: identification of which qos value is requested
442
- *
443
- * This function returns the current target value.
227
+ * cpu_latency_qos_limit - Return current system-wide CPU latency QoS limit.
444228 */
445
-int pm_qos_request(int pm_qos_class)
229
+s32 cpu_latency_qos_limit(void)
446230 {
447
- return pm_qos_read_value(pm_qos_array[pm_qos_class]->constraints);
448
-}
449
-EXPORT_SYMBOL_GPL(pm_qos_request);
450
-
451
-int pm_qos_request_for_cpu(int pm_qos_class, int cpu)
452
-{
453
- return pm_qos_array[pm_qos_class]->constraints->target_per_cpu[cpu];
454
-}
455
-EXPORT_SYMBOL(pm_qos_request_for_cpu);
456
-
457
-int pm_qos_request_active(struct pm_qos_request *req)
458
-{
459
- return req->pm_qos_class != 0;
460
-}
461
-EXPORT_SYMBOL_GPL(pm_qos_request_active);
462
-
463
-int pm_qos_request_for_cpumask(int pm_qos_class, struct cpumask *mask)
464
-{
465
- unsigned long irqflags;
466
- int cpu;
467
- struct pm_qos_constraints *c = NULL;
468
- int val;
469
-
470
- spin_lock_irqsave(&pm_qos_lock, irqflags);
471
- c = pm_qos_array[pm_qos_class]->constraints;
472
- val = c->default_value;
473
-
474
- for_each_cpu(cpu, mask) {
475
- switch (c->type) {
476
- case PM_QOS_MIN:
477
- if (c->target_per_cpu[cpu] < val)
478
- val = c->target_per_cpu[cpu];
479
- break;
480
- case PM_QOS_MAX:
481
- if (c->target_per_cpu[cpu] > val)
482
- val = c->target_per_cpu[cpu];
483
- break;
484
- default:
485
- break;
486
- }
487
- }
488
- spin_unlock_irqrestore(&pm_qos_lock, irqflags);
489
-
490
- return val;
491
-}
492
-EXPORT_SYMBOL(pm_qos_request_for_cpumask);
493
-
494
-static void __pm_qos_update_request(struct pm_qos_request *req,
495
- s32 new_value)
496
-{
497
- trace_pm_qos_update_request(req->pm_qos_class, new_value);
498
-
499
- if (new_value != req->node.prio)
500
- pm_qos_update_target(
501
- pm_qos_array[req->pm_qos_class]->constraints,
502
- &req->node, PM_QOS_UPDATE_REQ, new_value, false);
231
+ return pm_qos_read_value(&cpu_latency_constraints);
503232 }
504233
505234 /**
506
- * pm_qos_work_fn - the timeout handler of pm_qos_update_request_timeout
507
- * @work: work struct for the delayed work (timeout)
235
+ * cpu_latency_qos_request_active - Check the given PM QoS request.
236
+ * @req: PM QoS request to check.
508237 *
509
- * This cancels the timeout request by falling back to the default at timeout.
238
+ * Return: 'true' if @req has been added to the CPU latency QoS list, 'false'
239
+ * otherwise.
510240 */
511
-static void pm_qos_work_fn(struct work_struct *work)
241
+bool cpu_latency_qos_request_active(struct pm_qos_request *req)
512242 {
513
- struct pm_qos_request *req = container_of(to_delayed_work(work),
514
- struct pm_qos_request,
515
- work);
516
-
517
- __pm_qos_update_request(req, PM_QOS_DEFAULT_VALUE);
243
+ return req->qos == &cpu_latency_constraints;
518244 }
245
+EXPORT_SYMBOL_GPL(cpu_latency_qos_request_active);
519246
520
-#ifdef CONFIG_SMP
521
-static void pm_qos_irq_release(struct kref *ref)
247
+static void cpu_latency_qos_apply(struct pm_qos_request *req,
248
+ enum pm_qos_req_action action, s32 value)
522249 {
523
- unsigned long flags;
524
- struct irq_affinity_notify *notify = container_of(ref,
525
- struct irq_affinity_notify, kref);
526
- struct pm_qos_request *req = container_of(notify,
527
- struct pm_qos_request, irq_notify);
528
- struct pm_qos_constraints *c =
529
- pm_qos_array[req->pm_qos_class]->constraints;
530
-
531
- spin_lock_irqsave(&pm_qos_lock, flags);
532
- cpumask_setall(&req->cpus_affine);
533
- spin_unlock_irqrestore(&pm_qos_lock, flags);
534
-
535
- pm_qos_update_target(c, &req->node, PM_QOS_UPDATE_REQ,
536
- c->default_value, false);
250
+ int ret = pm_qos_update_target(req->qos, &req->node, action, value);
251
+ if (ret > 0)
252
+ wake_up_all_idle_cpus();
537253 }
538
-
539
-static void pm_qos_irq_notify(struct irq_affinity_notify *notify,
540
- const cpumask_t *mask)
541
-{
542
- unsigned long flags;
543
- struct pm_qos_request *req = container_of(notify,
544
- struct pm_qos_request, irq_notify);
545
- struct pm_qos_constraints *c =
546
- pm_qos_array[req->pm_qos_class]->constraints;
547
-
548
- spin_lock_irqsave(&pm_qos_lock, flags);
549
- cpumask_copy(&req->cpus_affine, mask);
550
- spin_unlock_irqrestore(&pm_qos_lock, flags);
551
-
552
- pm_qos_update_target(c, &req->node, PM_QOS_UPDATE_REQ, req->node.prio,
553
- false);
554
-}
555
-#endif
556254
557255 /**
558
- * pm_qos_add_request - inserts new qos request into the list
559
- * @req: pointer to a preallocated handle
560
- * @pm_qos_class: identifies which list of qos request to use
561
- * @value: defines the qos request
256
+ * cpu_latency_qos_add_request - Add new CPU latency QoS request.
257
+ * @req: Pointer to a preallocated handle.
258
+ * @value: Requested constraint value.
562259 *
563
- * This function inserts a new entry in the pm_qos_class list of requested qos
564
- * performance characteristics. It recomputes the aggregate QoS expectations
565
- * for the pm_qos_class of parameters and initializes the pm_qos_request
566
- * handle. Caller needs to save this handle for later use in updates and
567
- * removal.
260
+ * Use @value to initialize the request handle pointed to by @req, insert it as
261
+ * a new entry to the CPU latency QoS list and recompute the effective QoS
262
+ * constraint for that list.
263
+ *
264
+ * Callers need to save the handle for later use in updates and removal of the
265
+ * QoS request represented by it.
568266 */
569
-
570
-void pm_qos_add_request(struct pm_qos_request *req,
571
- int pm_qos_class, s32 value)
572
-{
573
- if (!req) /*guard against callers passing in null */
574
- return;
575
-
576
- if (pm_qos_request_active(req)) {
577
- WARN(1, KERN_ERR "pm_qos_add_request() called for already added request\n");
578
- return;
579
- }
580
-
581
- switch (req->type) {
582
- case PM_QOS_REQ_AFFINE_CORES:
583
- if (cpumask_empty(&req->cpus_affine)) {
584
- req->type = PM_QOS_REQ_ALL_CORES;
585
- cpumask_setall(&req->cpus_affine);
586
- WARN(1, "Affine cores not set for request with affinity flag\n");
587
- }
588
- break;
589
-#ifdef CONFIG_SMP
590
- case PM_QOS_REQ_AFFINE_IRQ:
591
- if (irq_can_set_affinity(req->irq)) {
592
- struct irq_desc *desc = irq_to_desc(req->irq);
593
- struct cpumask *mask;
594
-
595
- if (!desc)
596
- return;
597
-
598
- mask = desc->irq_data.common->affinity;
599
-
600
- /* Get the current affinity */
601
- cpumask_copy(&req->cpus_affine, mask);
602
- req->irq_notify.irq = req->irq;
603
- req->irq_notify.notify = pm_qos_irq_notify;
604
- req->irq_notify.release = pm_qos_irq_release;
605
-
606
- } else {
607
- req->type = PM_QOS_REQ_ALL_CORES;
608
- cpumask_setall(&req->cpus_affine);
609
- WARN(1, "IRQ-%d not set for request with affinity flag\n",
610
- req->irq);
611
- }
612
- break;
613
-#endif
614
- default:
615
- WARN(1, "Unknown request type %d\n", req->type);
616
- /* fall through */
617
- case PM_QOS_REQ_ALL_CORES:
618
- cpumask_setall(&req->cpus_affine);
619
- break;
620
- }
621
-
622
- req->pm_qos_class = pm_qos_class;
623
- INIT_DELAYED_WORK(&req->work, pm_qos_work_fn);
624
- trace_pm_qos_add_request(pm_qos_class, value);
625
- pm_qos_update_target(pm_qos_array[pm_qos_class]->constraints,
626
- &req->node, PM_QOS_ADD_REQ, value, false);
627
-
628
-#ifdef CONFIG_SMP
629
- if (req->type == PM_QOS_REQ_AFFINE_IRQ &&
630
- irq_can_set_affinity(req->irq)) {
631
- int ret = 0;
632
-
633
- ret = irq_set_affinity_notifier(req->irq,
634
- &req->irq_notify);
635
- if (ret) {
636
- WARN(1, "IRQ affinity notify set failed\n");
637
- req->type = PM_QOS_REQ_ALL_CORES;
638
- cpumask_setall(&req->cpus_affine);
639
- pm_qos_update_target(
640
- pm_qos_array[pm_qos_class]->constraints,
641
- &req->node, PM_QOS_UPDATE_REQ, value, false);
642
- }
643
- }
644
-#endif
645
-}
646
-EXPORT_SYMBOL_GPL(pm_qos_add_request);
647
-
648
-/**
649
- * pm_qos_update_request - modifies an existing qos request
650
- * @req : handle to list element holding a pm_qos request to use
651
- * @value: defines the qos request
652
- *
653
- * Updates an existing qos request for the pm_qos_class of parameters along
654
- * with updating the target pm_qos_class value.
655
- *
656
- * Attempts are made to make this code callable on hot code paths.
657
- */
658
-void pm_qos_update_request(struct pm_qos_request *req,
659
- s32 new_value)
660
-{
661
- if (!req) /*guard against callers passing in null */
662
- return;
663
-
664
- if (!pm_qos_request_active(req)) {
665
- WARN(1, KERN_ERR "pm_qos_update_request() called for unknown object\n");
666
- return;
667
- }
668
-
669
- cancel_delayed_work_sync(&req->work);
670
- __pm_qos_update_request(req, new_value);
671
-}
672
-EXPORT_SYMBOL_GPL(pm_qos_update_request);
673
-
674
-/**
675
- * pm_qos_update_request_timeout - modifies an existing qos request temporarily.
676
- * @req : handle to list element holding a pm_qos request to use
677
- * @new_value: defines the temporal qos request
678
- * @timeout_us: the effective duration of this qos request in usecs.
679
- *
680
- * After timeout_us, this qos request is cancelled automatically.
681
- */
682
-void pm_qos_update_request_timeout(struct pm_qos_request *req, s32 new_value,
683
- unsigned long timeout_us)
267
+void cpu_latency_qos_add_request(struct pm_qos_request *req, s32 value)
684268 {
685269 if (!req)
686270 return;
687
- if (WARN(!pm_qos_request_active(req),
688
- "%s called for unknown object.", __func__))
689
- return;
690271
691
- cancel_delayed_work_sync(&req->work);
692
-
693
- trace_pm_qos_update_request_timeout(req->pm_qos_class,
694
- new_value, timeout_us);
695
- if (new_value != req->node.prio)
696
- pm_qos_update_target(
697
- pm_qos_array[req->pm_qos_class]->constraints,
698
- &req->node, PM_QOS_UPDATE_REQ, new_value, false);
699
-
700
- schedule_delayed_work(&req->work, usecs_to_jiffies(timeout_us));
701
-}
702
-EXPORT_SYMBOL_GPL(pm_qos_update_request_timeout);
703
-
704
-/**
705
- * pm_qos_remove_request - modifies an existing qos request
706
- * @req: handle to request list element
707
- *
708
- * Will remove pm qos request from the list of constraints and
709
- * recompute the current target value for the pm_qos_class. Call this
710
- * on slow code paths.
711
- */
712
-void pm_qos_remove_request(struct pm_qos_request *req)
713
-{
714
- if (!req) /*guard against callers passing in null */
715
- return;
716
- /* silent return to keep pcm code cleaner */
717
-
718
- if (!pm_qos_request_active(req)) {
719
- WARN(1, KERN_ERR "pm_qos_remove_request() called for unknown object\n");
272
+ if (cpu_latency_qos_request_active(req)) {
273
+ WARN(1, KERN_ERR "%s called for already added request\n", __func__);
720274 return;
721275 }
722276
723
- cancel_delayed_work_sync(&req->work);
277
+ trace_pm_qos_add_request(value);
724278
725
- trace_pm_qos_remove_request(req->pm_qos_class, PM_QOS_DEFAULT_VALUE);
726
- pm_qos_update_target(pm_qos_array[req->pm_qos_class]->constraints,
727
- &req->node, PM_QOS_REMOVE_REQ,
728
- PM_QOS_DEFAULT_VALUE, false);
279
+ req->qos = &cpu_latency_constraints;
280
+ cpu_latency_qos_apply(req, PM_QOS_ADD_REQ, value);
281
+}
282
+EXPORT_SYMBOL_GPL(cpu_latency_qos_add_request);
283
+
284
+/**
285
+ * cpu_latency_qos_update_request - Modify existing CPU latency QoS request.
286
+ * @req : QoS request to update.
287
+ * @new_value: New requested constraint value.
288
+ *
289
+ * Use @new_value to update the QoS request represented by @req in the CPU
290
+ * latency QoS list along with updating the effective constraint value for that
291
+ * list.
292
+ */
293
+void cpu_latency_qos_update_request(struct pm_qos_request *req, s32 new_value)
294
+{
295
+ if (!req)
296
+ return;
297
+
298
+ if (!cpu_latency_qos_request_active(req)) {
299
+ WARN(1, KERN_ERR "%s called for unknown object\n", __func__);
300
+ return;
301
+ }
302
+
303
+ trace_pm_qos_update_request(new_value);
304
+
305
+ if (new_value == req->node.prio)
306
+ return;
307
+
308
+ cpu_latency_qos_apply(req, PM_QOS_UPDATE_REQ, new_value);
309
+}
310
+EXPORT_SYMBOL_GPL(cpu_latency_qos_update_request);
311
+
312
+/**
313
+ * cpu_latency_qos_remove_request - Remove existing CPU latency QoS request.
314
+ * @req: QoS request to remove.
315
+ *
316
+ * Remove the CPU latency QoS request represented by @req from the CPU latency
317
+ * QoS list along with updating the effective constraint value for that list.
318
+ */
319
+void cpu_latency_qos_remove_request(struct pm_qos_request *req)
320
+{
321
+ if (!req)
322
+ return;
323
+
324
+ if (!cpu_latency_qos_request_active(req)) {
325
+ WARN(1, KERN_ERR "%s called for unknown object\n", __func__);
326
+ return;
327
+ }
328
+
329
+ trace_pm_qos_remove_request(PM_QOS_DEFAULT_VALUE);
330
+
331
+ cpu_latency_qos_apply(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
729332 memset(req, 0, sizeof(*req));
730333 }
731
-EXPORT_SYMBOL_GPL(pm_qos_remove_request);
334
+EXPORT_SYMBOL_GPL(cpu_latency_qos_remove_request);
732335
733
-/**
734
- * pm_qos_add_notifier - sets notification entry for changes to target value
735
- * @pm_qos_class: identifies which qos target changes should be notified.
736
- * @notifier: notifier block managed by caller.
737
- *
738
- * will register the notifier into a notification chain that gets called
739
- * upon changes to the pm_qos_class target value.
740
- */
741
-int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier)
742
-{
743
- int retval;
336
+/* User space interface to the CPU latency QoS via misc device. */
744337
745
- retval = blocking_notifier_chain_register(
746
- pm_qos_array[pm_qos_class]->constraints->notifiers,
747
- notifier);
748
-
749
- return retval;
750
-}
751
-EXPORT_SYMBOL_GPL(pm_qos_add_notifier);
752
-
753
-/**
754
- * pm_qos_remove_notifier - deletes notification entry from chain.
755
- * @pm_qos_class: identifies which qos target changes are notified.
756
- * @notifier: notifier block to be removed.
757
- *
758
- * will remove the notifier from the notification chain that gets called
759
- * upon changes to the pm_qos_class target value.
760
- */
761
-int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier)
762
-{
763
- int retval;
764
-
765
- retval = blocking_notifier_chain_unregister(
766
- pm_qos_array[pm_qos_class]->constraints->notifiers,
767
- notifier);
768
-
769
- return retval;
770
-}
771
-EXPORT_SYMBOL_GPL(pm_qos_remove_notifier);
772
-
773
-/* User space interface to PM QoS classes via misc devices */
774
-static int register_pm_qos_misc(struct pm_qos_object *qos, struct dentry *d)
775
-{
776
- qos->pm_qos_power_miscdev.minor = MISC_DYNAMIC_MINOR;
777
- qos->pm_qos_power_miscdev.name = qos->name;
778
- qos->pm_qos_power_miscdev.fops = &pm_qos_power_fops;
779
-
780
- if (d) {
781
- (void)debugfs_create_file(qos->name, S_IRUGO, d,
782
- (void *)qos, &pm_qos_debug_fops);
783
- }
784
-
785
- return misc_register(&qos->pm_qos_power_miscdev);
786
-}
787
-
788
-static int find_pm_qos_object_by_minor(int minor)
789
-{
790
- int pm_qos_class;
791
-
792
- for (pm_qos_class = PM_QOS_CPU_DMA_LATENCY;
793
- pm_qos_class < PM_QOS_NUM_CLASSES; pm_qos_class++) {
794
- if (minor ==
795
- pm_qos_array[pm_qos_class]->pm_qos_power_miscdev.minor)
796
- return pm_qos_class;
797
- }
798
- return -1;
799
-}
800
-
801
-static int pm_qos_power_open(struct inode *inode, struct file *filp)
802
-{
803
- long pm_qos_class;
804
-
805
- pm_qos_class = find_pm_qos_object_by_minor(iminor(inode));
806
- if (pm_qos_class >= PM_QOS_CPU_DMA_LATENCY) {
807
- struct pm_qos_request *req = kzalloc(sizeof(*req), GFP_KERNEL);
808
- if (!req)
809
- return -ENOMEM;
810
-
811
- pm_qos_add_request(req, pm_qos_class, PM_QOS_DEFAULT_VALUE);
812
- filp->private_data = req;
813
-
814
- return 0;
815
- }
816
- return -EPERM;
817
-}
818
-
819
-static int pm_qos_power_release(struct inode *inode, struct file *filp)
338
+static int cpu_latency_qos_open(struct inode *inode, struct file *filp)
820339 {
821340 struct pm_qos_request *req;
822341
823
- req = filp->private_data;
824
- pm_qos_remove_request(req);
342
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
343
+ if (!req)
344
+ return -ENOMEM;
345
+
346
+ cpu_latency_qos_add_request(req, PM_QOS_DEFAULT_VALUE);
347
+ filp->private_data = req;
348
+
349
+ return 0;
350
+}
351
+
352
+static int cpu_latency_qos_release(struct inode *inode, struct file *filp)
353
+{
354
+ struct pm_qos_request *req = filp->private_data;
355
+
356
+ filp->private_data = NULL;
357
+
358
+ cpu_latency_qos_remove_request(req);
825359 kfree(req);
826360
827361 return 0;
828362 }
829363
830
-
831
-static ssize_t pm_qos_power_read(struct file *filp, char __user *buf,
832
- size_t count, loff_t *f_pos)
364
+static ssize_t cpu_latency_qos_read(struct file *filp, char __user *buf,
365
+ size_t count, loff_t *f_pos)
833366 {
834
- s32 value;
835
- unsigned long flags;
836367 struct pm_qos_request *req = filp->private_data;
368
+ unsigned long flags;
369
+ s32 value;
837370
838
- if (!req)
839
- return -EINVAL;
840
- if (!pm_qos_request_active(req))
371
+ if (!req || !cpu_latency_qos_request_active(req))
841372 return -EINVAL;
842373
843374 spin_lock_irqsave(&pm_qos_lock, flags);
844
- value = pm_qos_get_value(pm_qos_array[req->pm_qos_class]->constraints);
375
+ value = pm_qos_get_value(&cpu_latency_constraints);
845376 spin_unlock_irqrestore(&pm_qos_lock, flags);
846377
847378 return simple_read_from_buffer(buf, count, f_pos, &value, sizeof(s32));
848379 }
849380
850
-static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
851
- size_t count, loff_t *f_pos)
381
+static ssize_t cpu_latency_qos_write(struct file *filp, const char __user *buf,
382
+ size_t count, loff_t *f_pos)
852383 {
853384 s32 value;
854
- struct pm_qos_request *req;
855385
856386 if (count == sizeof(s32)) {
857387 if (copy_from_user(&value, buf, sizeof(s32)))
....@@ -864,35 +394,286 @@
864394 return ret;
865395 }
866396
867
- req = filp->private_data;
868
- pm_qos_update_request(req, value);
397
+ cpu_latency_qos_update_request(filp->private_data, value);
869398
870399 return count;
871400 }
872401
402
+static const struct file_operations cpu_latency_qos_fops = {
403
+ .write = cpu_latency_qos_write,
404
+ .read = cpu_latency_qos_read,
405
+ .open = cpu_latency_qos_open,
406
+ .release = cpu_latency_qos_release,
407
+ .llseek = noop_llseek,
408
+};
873409
874
-static int __init pm_qos_power_init(void)
410
+static struct miscdevice cpu_latency_qos_miscdev = {
411
+ .minor = MISC_DYNAMIC_MINOR,
412
+ .name = "cpu_dma_latency",
413
+ .fops = &cpu_latency_qos_fops,
414
+};
415
+
416
+static int __init cpu_latency_qos_init(void)
875417 {
876
- int ret = 0;
877
- int i;
878
- struct dentry *d;
418
+ int ret;
879419
880
- BUILD_BUG_ON(ARRAY_SIZE(pm_qos_array) != PM_QOS_NUM_CLASSES);
420
+ ret = misc_register(&cpu_latency_qos_miscdev);
421
+ if (ret < 0)
422
+ pr_err("%s: %s setup failed\n", __func__,
423
+ cpu_latency_qos_miscdev.name);
881424
882
- d = debugfs_create_dir("pm_qos", NULL);
883
- if (IS_ERR_OR_NULL(d))
884
- d = NULL;
425
+ return ret;
426
+}
427
+late_initcall(cpu_latency_qos_init);
428
+#endif /* CONFIG_CPU_IDLE */
885429
886
- for (i = PM_QOS_CPU_DMA_LATENCY; i < PM_QOS_NUM_CLASSES; i++) {
887
- ret = register_pm_qos_misc(pm_qos_array[i], d);
888
- if (ret < 0) {
889
- pr_err("%s: %s setup failed\n",
890
- __func__, pm_qos_array[i]->name);
891
- return ret;
892
- }
430
+/* Definitions related to the frequency QoS below. */
431
+
432
+/**
433
+ * freq_constraints_init - Initialize frequency QoS constraints.
434
+ * @qos: Frequency QoS constraints to initialize.
435
+ */
436
+void freq_constraints_init(struct freq_constraints *qos)
437
+{
438
+ struct pm_qos_constraints *c;
439
+
440
+ c = &qos->min_freq;
441
+ plist_head_init(&c->list);
442
+ c->target_value = FREQ_QOS_MIN_DEFAULT_VALUE;
443
+ c->default_value = FREQ_QOS_MIN_DEFAULT_VALUE;
444
+ c->no_constraint_value = FREQ_QOS_MIN_DEFAULT_VALUE;
445
+ c->type = PM_QOS_MAX;
446
+ c->notifiers = &qos->min_freq_notifiers;
447
+ BLOCKING_INIT_NOTIFIER_HEAD(c->notifiers);
448
+
449
+ c = &qos->max_freq;
450
+ plist_head_init(&c->list);
451
+ c->target_value = FREQ_QOS_MAX_DEFAULT_VALUE;
452
+ c->default_value = FREQ_QOS_MAX_DEFAULT_VALUE;
453
+ c->no_constraint_value = FREQ_QOS_MAX_DEFAULT_VALUE;
454
+ c->type = PM_QOS_MIN;
455
+ c->notifiers = &qos->max_freq_notifiers;
456
+ BLOCKING_INIT_NOTIFIER_HEAD(c->notifiers);
457
+}
458
+
459
+/**
460
+ * freq_qos_read_value - Get frequency QoS constraint for a given list.
461
+ * @qos: Constraints to evaluate.
462
+ * @type: QoS request type.
463
+ */
464
+s32 freq_qos_read_value(struct freq_constraints *qos,
465
+ enum freq_qos_req_type type)
466
+{
467
+ s32 ret;
468
+
469
+ switch (type) {
470
+ case FREQ_QOS_MIN:
471
+ ret = IS_ERR_OR_NULL(qos) ?
472
+ FREQ_QOS_MIN_DEFAULT_VALUE :
473
+ pm_qos_read_value(&qos->min_freq);
474
+ break;
475
+ case FREQ_QOS_MAX:
476
+ ret = IS_ERR_OR_NULL(qos) ?
477
+ FREQ_QOS_MAX_DEFAULT_VALUE :
478
+ pm_qos_read_value(&qos->max_freq);
479
+ break;
480
+ default:
481
+ WARN_ON(1);
482
+ ret = 0;
893483 }
894484
895485 return ret;
896486 }
897487
898
-late_initcall(pm_qos_power_init);
488
+/**
489
+ * freq_qos_apply - Add/modify/remove frequency QoS request.
490
+ * @req: Constraint request to apply.
491
+ * @action: Action to perform (add/update/remove).
492
+ * @value: Value to assign to the QoS request.
493
+ *
494
+ * This is only meant to be called from inside pm_qos, not drivers.
495
+ */
496
+int freq_qos_apply(struct freq_qos_request *req,
497
+ enum pm_qos_req_action action, s32 value)
498
+{
499
+ int ret;
500
+
501
+ switch(req->type) {
502
+ case FREQ_QOS_MIN:
503
+ ret = pm_qos_update_target(&req->qos->min_freq, &req->pnode,
504
+ action, value);
505
+ break;
506
+ case FREQ_QOS_MAX:
507
+ ret = pm_qos_update_target(&req->qos->max_freq, &req->pnode,
508
+ action, value);
509
+ break;
510
+ default:
511
+ ret = -EINVAL;
512
+ }
513
+
514
+ return ret;
515
+}
516
+
517
+/**
518
+ * freq_qos_add_request - Insert new frequency QoS request into a given list.
519
+ * @qos: Constraints to update.
520
+ * @req: Preallocated request object.
521
+ * @type: Request type.
522
+ * @value: Request value.
523
+ *
524
+ * Insert a new entry into the @qos list of requests, recompute the effective
525
+ * QoS constraint value for that list and initialize the @req object. The
526
+ * caller needs to save that object for later use in updates and removal.
527
+ *
528
+ * Return 1 if the effective constraint value has changed, 0 if the effective
529
+ * constraint value has not changed, or a negative error code on failures.
530
+ */
531
+int freq_qos_add_request(struct freq_constraints *qos,
532
+ struct freq_qos_request *req,
533
+ enum freq_qos_req_type type, s32 value)
534
+{
535
+ int ret;
536
+
537
+ if (IS_ERR_OR_NULL(qos) || !req)
538
+ return -EINVAL;
539
+
540
+ if (WARN(freq_qos_request_active(req),
541
+ "%s() called for active request\n", __func__))
542
+ return -EINVAL;
543
+
544
+ req->qos = qos;
545
+ req->type = type;
546
+ ret = freq_qos_apply(req, PM_QOS_ADD_REQ, value);
547
+ if (ret < 0) {
548
+ req->qos = NULL;
549
+ req->type = 0;
550
+ }
551
+
552
+ trace_android_vh_freq_qos_add_request(qos, req, type, value, ret);
553
+ return ret;
554
+}
555
+EXPORT_SYMBOL_GPL(freq_qos_add_request);
556
+
557
+/**
558
+ * freq_qos_update_request - Modify existing frequency QoS request.
559
+ * @req: Request to modify.
560
+ * @new_value: New request value.
561
+ *
562
+ * Update an existing frequency QoS request along with the effective constraint
563
+ * value for the list of requests it belongs to.
564
+ *
565
+ * Return 1 if the effective constraint value has changed, 0 if the effective
566
+ * constraint value has not changed, or a negative error code on failures.
567
+ */
568
+int freq_qos_update_request(struct freq_qos_request *req, s32 new_value)
569
+{
570
+ if (!req)
571
+ return -EINVAL;
572
+
573
+ if (WARN(!freq_qos_request_active(req),
574
+ "%s() called for unknown object\n", __func__))
575
+ return -EINVAL;
576
+
577
+ trace_android_vh_freq_qos_update_request(req, new_value);
578
+ if (req->pnode.prio == new_value)
579
+ return 0;
580
+
581
+ return freq_qos_apply(req, PM_QOS_UPDATE_REQ, new_value);
582
+}
583
+EXPORT_SYMBOL_GPL(freq_qos_update_request);
584
+
585
+/**
586
+ * freq_qos_remove_request - Remove frequency QoS request from its list.
587
+ * @req: Request to remove.
588
+ *
589
+ * Remove the given frequency QoS request from the list of constraints it
590
+ * belongs to and recompute the effective constraint value for that list.
591
+ *
592
+ * Return 1 if the effective constraint value has changed, 0 if the effective
593
+ * constraint value has not changed, or a negative error code on failures.
594
+ */
595
+int freq_qos_remove_request(struct freq_qos_request *req)
596
+{
597
+ int ret;
598
+
599
+ if (!req)
600
+ return -EINVAL;
601
+
602
+ if (WARN(!freq_qos_request_active(req),
603
+ "%s() called for unknown object\n", __func__))
604
+ return -EINVAL;
605
+
606
+ trace_android_vh_freq_qos_remove_request(req);
607
+ ret = freq_qos_apply(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
608
+ req->qos = NULL;
609
+ req->type = 0;
610
+
611
+ return ret;
612
+}
613
+EXPORT_SYMBOL_GPL(freq_qos_remove_request);
614
+
615
+/**
616
+ * freq_qos_add_notifier - Add frequency QoS change notifier.
617
+ * @qos: List of requests to add the notifier to.
618
+ * @type: Request type.
619
+ * @notifier: Notifier block to add.
620
+ */
621
+int freq_qos_add_notifier(struct freq_constraints *qos,
622
+ enum freq_qos_req_type type,
623
+ struct notifier_block *notifier)
624
+{
625
+ int ret;
626
+
627
+ if (IS_ERR_OR_NULL(qos) || !notifier)
628
+ return -EINVAL;
629
+
630
+ switch (type) {
631
+ case FREQ_QOS_MIN:
632
+ ret = blocking_notifier_chain_register(qos->min_freq.notifiers,
633
+ notifier);
634
+ break;
635
+ case FREQ_QOS_MAX:
636
+ ret = blocking_notifier_chain_register(qos->max_freq.notifiers,
637
+ notifier);
638
+ break;
639
+ default:
640
+ WARN_ON(1);
641
+ ret = -EINVAL;
642
+ }
643
+
644
+ return ret;
645
+}
646
+EXPORT_SYMBOL_GPL(freq_qos_add_notifier);
647
+
648
+/**
649
+ * freq_qos_remove_notifier - Remove frequency QoS change notifier.
650
+ * @qos: List of requests to remove the notifier from.
651
+ * @type: Request type.
652
+ * @notifier: Notifier block to remove.
653
+ */
654
+int freq_qos_remove_notifier(struct freq_constraints *qos,
655
+ enum freq_qos_req_type type,
656
+ struct notifier_block *notifier)
657
+{
658
+ int ret;
659
+
660
+ if (IS_ERR_OR_NULL(qos) || !notifier)
661
+ return -EINVAL;
662
+
663
+ switch (type) {
664
+ case FREQ_QOS_MIN:
665
+ ret = blocking_notifier_chain_unregister(qos->min_freq.notifiers,
666
+ notifier);
667
+ break;
668
+ case FREQ_QOS_MAX:
669
+ ret = blocking_notifier_chain_unregister(qos->max_freq.notifiers,
670
+ notifier);
671
+ break;
672
+ default:
673
+ WARN_ON(1);
674
+ ret = -EINVAL;
675
+ }
676
+
677
+ return ret;
678
+}
679
+EXPORT_SYMBOL_GPL(freq_qos_remove_notifier);