hc
2024-05-16 8d2a02b24d66aa359e83eebc1ed3c0f85367a1cb
kernel/drivers/firmware/arm_scmi/clock.c
....@@ -2,8 +2,11 @@
22 /*
33 * System Control and Management Interface (SCMI) Clock Protocol
44 *
5
- * Copyright (C) 2018 ARM Ltd.
5
+ * Copyright (C) 2018-2020 ARM Ltd.
66 */
7
+
8
+#include <linux/module.h>
9
+#include <linux/sort.h>
710
811 #include "common.h"
912
....@@ -72,60 +75,73 @@
7275 struct scmi_clock_info *clk;
7376 };
7477
75
-static int scmi_clock_protocol_attributes_get(const struct scmi_handle *handle,
76
- struct clock_info *ci)
78
+static int
79
+scmi_clock_protocol_attributes_get(const struct scmi_protocol_handle *ph,
80
+ struct clock_info *ci)
7781 {
7882 int ret;
7983 struct scmi_xfer *t;
8084 struct scmi_msg_resp_clock_protocol_attributes *attr;
8185
82
- ret = scmi_xfer_get_init(handle, PROTOCOL_ATTRIBUTES,
83
- SCMI_PROTOCOL_CLOCK, 0, sizeof(*attr), &t);
86
+ ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES,
87
+ 0, sizeof(*attr), &t);
8488 if (ret)
8589 return ret;
8690
8791 attr = t->rx.buf;
8892
89
- ret = scmi_do_xfer(handle, t);
93
+ ret = ph->xops->do_xfer(ph, t);
9094 if (!ret) {
9195 ci->num_clocks = le16_to_cpu(attr->num_clocks);
9296 ci->max_async_req = attr->max_async_req;
9397 }
9498
95
- scmi_xfer_put(handle, t);
99
+ ph->xops->xfer_put(ph, t);
96100 return ret;
97101 }
98102
99
-static int scmi_clock_attributes_get(const struct scmi_handle *handle,
103
+static int scmi_clock_attributes_get(const struct scmi_protocol_handle *ph,
100104 u32 clk_id, struct scmi_clock_info *clk)
101105 {
102106 int ret;
103107 struct scmi_xfer *t;
104108 struct scmi_msg_resp_clock_attributes *attr;
105109
106
- ret = scmi_xfer_get_init(handle, CLOCK_ATTRIBUTES, SCMI_PROTOCOL_CLOCK,
107
- sizeof(clk_id), sizeof(*attr), &t);
110
+ ret = ph->xops->xfer_get_init(ph, CLOCK_ATTRIBUTES,
111
+ sizeof(clk_id), sizeof(*attr), &t);
108112 if (ret)
109113 return ret;
110114
111115 put_unaligned_le32(clk_id, t->tx.buf);
112116 attr = t->rx.buf;
113117
114
- ret = scmi_do_xfer(handle, t);
118
+ ret = ph->xops->do_xfer(ph, t);
115119 if (!ret)
116120 strlcpy(clk->name, attr->name, SCMI_MAX_STR_SIZE);
117121 else
118122 clk->name[0] = '\0';
119123
120
- scmi_xfer_put(handle, t);
124
+ ph->xops->xfer_put(ph, t);
121125 return ret;
122126 }
123127
128
+static int rate_cmp_func(const void *_r1, const void *_r2)
129
+{
130
+ const u64 *r1 = _r1, *r2 = _r2;
131
+
132
+ if (*r1 < *r2)
133
+ return -1;
134
+ else if (*r1 == *r2)
135
+ return 0;
136
+ else
137
+ return 1;
138
+}
139
+
124140 static int
125
-scmi_clock_describe_rates_get(const struct scmi_handle *handle, u32 clk_id,
141
+scmi_clock_describe_rates_get(const struct scmi_protocol_handle *ph, u32 clk_id,
126142 struct scmi_clock_info *clk)
127143 {
128
- u64 *rate;
144
+ u64 *rate = NULL;
129145 int ret, cnt;
130146 bool rate_discrete = false;
131147 u32 tot_rate_cnt = 0, rates_flag;
....@@ -134,8 +150,8 @@
134150 struct scmi_msg_clock_describe_rates *clk_desc;
135151 struct scmi_msg_resp_clock_describe_rates *rlist;
136152
137
- ret = scmi_xfer_get_init(handle, CLOCK_DESCRIBE_RATES,
138
- SCMI_PROTOCOL_CLOCK, sizeof(*clk_desc), 0, &t);
153
+ ret = ph->xops->xfer_get_init(ph, CLOCK_DESCRIBE_RATES,
154
+ sizeof(*clk_desc), 0, &t);
139155 if (ret)
140156 return ret;
141157
....@@ -147,7 +163,7 @@
147163 /* Set the number of rates to be skipped/already read */
148164 clk_desc->rate_index = cpu_to_le32(tot_rate_cnt);
149165
150
- ret = scmi_do_xfer(handle, t);
166
+ ret = ph->xops->do_xfer(ph, t);
151167 if (ret)
152168 goto err;
153169
....@@ -157,7 +173,7 @@
157173 num_returned = NUM_RETURNED(rates_flag);
158174
159175 if (tot_rate_cnt + num_returned > SCMI_MAX_NUM_RATES) {
160
- dev_err(handle->dev, "No. of rates > MAX_NUM_RATES");
176
+ dev_err(ph->dev, "No. of rates > MAX_NUM_RATES");
161177 break;
162178 }
163179
....@@ -165,7 +181,7 @@
165181 clk->range.min_rate = RATE_TO_U64(rlist->rate[0]);
166182 clk->range.max_rate = RATE_TO_U64(rlist->rate[1]);
167183 clk->range.step_size = RATE_TO_U64(rlist->rate[2]);
168
- dev_dbg(handle->dev, "Min %llu Max %llu Step %llu Hz\n",
184
+ dev_dbg(ph->dev, "Min %llu Max %llu Step %llu Hz\n",
169185 clk->range.min_rate, clk->range.max_rate,
170186 clk->range.step_size);
171187 break;
....@@ -174,58 +190,63 @@
174190 rate = &clk->list.rates[tot_rate_cnt];
175191 for (cnt = 0; cnt < num_returned; cnt++, rate++) {
176192 *rate = RATE_TO_U64(rlist->rate[cnt]);
177
- dev_dbg(handle->dev, "Rate %llu Hz\n", *rate);
193
+ dev_dbg(ph->dev, "Rate %llu Hz\n", *rate);
178194 }
179195
180196 tot_rate_cnt += num_returned;
197
+
198
+ ph->xops->reset_rx_to_maxsz(ph, t);
181199 /*
182200 * check for both returned and remaining to avoid infinite
183201 * loop due to buggy firmware
184202 */
185203 } while (num_returned && num_remaining);
186204
187
- if (rate_discrete)
205
+ if (rate_discrete && rate) {
188206 clk->list.num_rates = tot_rate_cnt;
207
+ sort(clk->list.rates, tot_rate_cnt, sizeof(*rate),
208
+ rate_cmp_func, NULL);
209
+ }
189210
190211 clk->rate_discrete = rate_discrete;
191212
192213 err:
193
- scmi_xfer_put(handle, t);
214
+ ph->xops->xfer_put(ph, t);
194215 return ret;
195216 }
196217
197218 static int
198
-scmi_clock_rate_get(const struct scmi_handle *handle, u32 clk_id, u64 *value)
219
+scmi_clock_rate_get(const struct scmi_protocol_handle *ph,
220
+ u32 clk_id, u64 *value)
199221 {
200222 int ret;
201223 struct scmi_xfer *t;
202224
203
- ret = scmi_xfer_get_init(handle, CLOCK_RATE_GET, SCMI_PROTOCOL_CLOCK,
204
- sizeof(__le32), sizeof(u64), &t);
225
+ ret = ph->xops->xfer_get_init(ph, CLOCK_RATE_GET,
226
+ sizeof(__le32), sizeof(u64), &t);
205227 if (ret)
206228 return ret;
207229
208230 put_unaligned_le32(clk_id, t->tx.buf);
209231
210
- ret = scmi_do_xfer(handle, t);
232
+ ret = ph->xops->do_xfer(ph, t);
211233 if (!ret)
212234 *value = get_unaligned_le64(t->rx.buf);
213235
214
- scmi_xfer_put(handle, t);
236
+ ph->xops->xfer_put(ph, t);
215237 return ret;
216238 }
217239
218
-static int scmi_clock_rate_set(const struct scmi_handle *handle, u32 clk_id,
219
- u64 rate)
240
+static int scmi_clock_rate_set(const struct scmi_protocol_handle *ph,
241
+ u32 clk_id, u64 rate)
220242 {
221243 int ret;
222244 u32 flags = 0;
223245 struct scmi_xfer *t;
224246 struct scmi_clock_set_rate *cfg;
225
- struct clock_info *ci = handle->clk_priv;
247
+ struct clock_info *ci = ph->get_priv(ph);
226248
227
- ret = scmi_xfer_get_init(handle, CLOCK_RATE_SET, SCMI_PROTOCOL_CLOCK,
228
- sizeof(*cfg), 0, &t);
249
+ ret = ph->xops->xfer_get_init(ph, CLOCK_RATE_SET, sizeof(*cfg), 0, &t);
229250 if (ret)
230251 return ret;
231252
....@@ -240,26 +261,27 @@
240261 cfg->value_high = cpu_to_le32(rate >> 32);
241262
242263 if (flags & CLOCK_SET_ASYNC)
243
- ret = scmi_do_xfer_with_response(handle, t);
264
+ ret = ph->xops->do_xfer_with_response(ph, t);
244265 else
245
- ret = scmi_do_xfer(handle, t);
266
+ ret = ph->xops->do_xfer(ph, t);
246267
247268 if (ci->max_async_req)
248269 atomic_dec(&ci->cur_async_req);
249270
250
- scmi_xfer_put(handle, t);
271
+ ph->xops->xfer_put(ph, t);
251272 return ret;
252273 }
253274
254275 static int
255
-scmi_clock_config_set(const struct scmi_handle *handle, u32 clk_id, u32 config)
276
+scmi_clock_config_set(const struct scmi_protocol_handle *ph, u32 clk_id,
277
+ u32 config)
256278 {
257279 int ret;
258280 struct scmi_xfer *t;
259281 struct scmi_clock_set_config *cfg;
260282
261
- ret = scmi_xfer_get_init(handle, CLOCK_CONFIG_SET, SCMI_PROTOCOL_CLOCK,
262
- sizeof(*cfg), 0, &t);
283
+ ret = ph->xops->xfer_get_init(ph, CLOCK_CONFIG_SET,
284
+ sizeof(*cfg), 0, &t);
263285 if (ret)
264286 return ret;
265287
....@@ -267,33 +289,33 @@
267289 cfg->id = cpu_to_le32(clk_id);
268290 cfg->attributes = cpu_to_le32(config);
269291
270
- ret = scmi_do_xfer(handle, t);
292
+ ret = ph->xops->do_xfer(ph, t);
271293
272
- scmi_xfer_put(handle, t);
294
+ ph->xops->xfer_put(ph, t);
273295 return ret;
274296 }
275297
276
-static int scmi_clock_enable(const struct scmi_handle *handle, u32 clk_id)
298
+static int scmi_clock_enable(const struct scmi_protocol_handle *ph, u32 clk_id)
277299 {
278
- return scmi_clock_config_set(handle, clk_id, CLOCK_ENABLE);
300
+ return scmi_clock_config_set(ph, clk_id, CLOCK_ENABLE);
279301 }
280302
281
-static int scmi_clock_disable(const struct scmi_handle *handle, u32 clk_id)
303
+static int scmi_clock_disable(const struct scmi_protocol_handle *ph, u32 clk_id)
282304 {
283
- return scmi_clock_config_set(handle, clk_id, 0);
305
+ return scmi_clock_config_set(ph, clk_id, 0);
284306 }
285307
286
-static int scmi_clock_count_get(const struct scmi_handle *handle)
308
+static int scmi_clock_count_get(const struct scmi_protocol_handle *ph)
287309 {
288
- struct clock_info *ci = handle->clk_priv;
310
+ struct clock_info *ci = ph->get_priv(ph);
289311
290312 return ci->num_clocks;
291313 }
292314
293315 static const struct scmi_clock_info *
294
-scmi_clock_info_get(const struct scmi_handle *handle, u32 clk_id)
316
+scmi_clock_info_get(const struct scmi_protocol_handle *ph, u32 clk_id)
295317 {
296
- struct clock_info *ci = handle->clk_priv;
318
+ struct clock_info *ci = ph->get_priv(ph);
297319 struct scmi_clock_info *clk = ci->clk + clk_id;
298320
299321 if (!clk->name[0])
....@@ -302,7 +324,7 @@
302324 return clk;
303325 }
304326
305
-static struct scmi_clk_ops clk_ops = {
327
+static const struct scmi_clk_proto_ops clk_proto_ops = {
306328 .count_get = scmi_clock_count_get,
307329 .info_get = scmi_clock_info_get,
308330 .rate_get = scmi_clock_rate_get,
....@@ -311,24 +333,24 @@
311333 .disable = scmi_clock_disable,
312334 };
313335
314
-static int scmi_clock_protocol_init(struct scmi_handle *handle)
336
+static int scmi_clock_protocol_init(const struct scmi_protocol_handle *ph)
315337 {
316338 u32 version;
317339 int clkid, ret;
318340 struct clock_info *cinfo;
319341
320
- scmi_version_get(handle, SCMI_PROTOCOL_CLOCK, &version);
342
+ ph->xops->version_get(ph, &version);
321343
322
- dev_dbg(handle->dev, "Clock Version %d.%d\n",
344
+ dev_dbg(ph->dev, "Clock Version %d.%d\n",
323345 PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
324346
325
- cinfo = devm_kzalloc(handle->dev, sizeof(*cinfo), GFP_KERNEL);
347
+ cinfo = devm_kzalloc(ph->dev, sizeof(*cinfo), GFP_KERNEL);
326348 if (!cinfo)
327349 return -ENOMEM;
328350
329
- scmi_clock_protocol_attributes_get(handle, cinfo);
351
+ scmi_clock_protocol_attributes_get(ph, cinfo);
330352
331
- cinfo->clk = devm_kcalloc(handle->dev, cinfo->num_clocks,
353
+ cinfo->clk = devm_kcalloc(ph->dev, cinfo->num_clocks,
332354 sizeof(*cinfo->clk), GFP_KERNEL);
333355 if (!cinfo->clk)
334356 return -ENOMEM;
....@@ -336,16 +358,20 @@
336358 for (clkid = 0; clkid < cinfo->num_clocks; clkid++) {
337359 struct scmi_clock_info *clk = cinfo->clk + clkid;
338360
339
- ret = scmi_clock_attributes_get(handle, clkid, clk);
361
+ ret = scmi_clock_attributes_get(ph, clkid, clk);
340362 if (!ret)
341
- scmi_clock_describe_rates_get(handle, clkid, clk);
363
+ scmi_clock_describe_rates_get(ph, clkid, clk);
342364 }
343365
344366 cinfo->version = version;
345
- handle->clk_ops = &clk_ops;
346
- handle->clk_priv = cinfo;
347
-
348
- return 0;
367
+ return ph->set_priv(ph, cinfo);
349368 }
350369
351
-DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(SCMI_PROTOCOL_CLOCK, clock)
370
+static const struct scmi_protocol scmi_clock = {
371
+ .id = SCMI_PROTOCOL_CLOCK,
372
+ .owner = THIS_MODULE,
373
+ .init_instance = &scmi_clock_protocol_init,
374
+ .ops = &clk_proto_ops,
375
+};
376
+
377
+DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(clock, scmi_clock)