hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/drivers/firmware/qcom_scm.c
....@@ -1,18 +1,6 @@
1
-/*
2
- * Qualcomm SCM driver
3
- *
4
- * Copyright (c) 2010,2015, The Linux Foundation. All rights reserved.
1
+// SPDX-License-Identifier: GPL-2.0-only
2
+/* Copyright (c) 2010,2015,2019 The Linux Foundation. All rights reserved.
53 * Copyright (C) 2015 Linaro Ltd.
6
- *
7
- * This program is free software; you can redistribute it and/or modify
8
- * it under the terms of the GNU General Public License version 2 and
9
- * only version 2 as published by the Free Software Foundation.
10
- *
11
- * This program is distributed in the hope that it will be useful,
12
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14
- * GNU General Public License for more details.
15
- *
164 */
175 #include <linux/platform_device.h>
186 #include <linux/init.h>
....@@ -27,6 +15,7 @@
2715 #include <linux/of_platform.h>
2816 #include <linux/clk.h>
2917 #include <linux/reset-controller.h>
18
+#include <linux/arm-smccc.h>
3019
3120 #include "qcom_scm.h"
3221
....@@ -58,6 +47,35 @@
5847 struct qcom_scm_mem_map_info {
5948 __le64 mem_addr;
6049 __le64 mem_size;
50
+};
51
+
52
+#define QCOM_SCM_FLAG_COLDBOOT_CPU0 0x00
53
+#define QCOM_SCM_FLAG_COLDBOOT_CPU1 0x01
54
+#define QCOM_SCM_FLAG_COLDBOOT_CPU2 0x08
55
+#define QCOM_SCM_FLAG_COLDBOOT_CPU3 0x20
56
+
57
+#define QCOM_SCM_FLAG_WARMBOOT_CPU0 0x04
58
+#define QCOM_SCM_FLAG_WARMBOOT_CPU1 0x02
59
+#define QCOM_SCM_FLAG_WARMBOOT_CPU2 0x10
60
+#define QCOM_SCM_FLAG_WARMBOOT_CPU3 0x40
61
+
62
+struct qcom_scm_wb_entry {
63
+ int flag;
64
+ void *entry;
65
+};
66
+
67
+static struct qcom_scm_wb_entry qcom_scm_wb[] = {
68
+ { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU0 },
69
+ { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU1 },
70
+ { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU2 },
71
+ { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU3 },
72
+};
73
+
74
+static const char *qcom_scm_convention_names[] = {
75
+ [SMC_CONVENTION_UNKNOWN] = "unknown",
76
+ [SMC_CONVENTION_ARM_32] = "smc arm 32",
77
+ [SMC_CONVENTION_ARM_64] = "smc arm 64",
78
+ [SMC_CONVENTION_LEGACY] = "smc legacy",
6179 };
6280
6381 static struct qcom_scm *__scm;
....@@ -95,19 +113,152 @@
95113 clk_disable_unprepare(__scm->bus_clk);
96114 }
97115
98
-/**
99
- * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus
100
- * @entry: Entry point function for the cpus
101
- * @cpus: The cpumask of cpus that will use the entry point
102
- *
103
- * Set the cold boot address of the cpus. Any cpu outside the supported
104
- * range would be removed from the cpu present mask.
105
- */
106
-int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus)
116
+enum qcom_scm_convention qcom_scm_convention = SMC_CONVENTION_UNKNOWN;
117
+static DEFINE_SPINLOCK(scm_query_lock);
118
+
119
+static enum qcom_scm_convention __get_convention(void)
107120 {
108
- return __qcom_scm_set_cold_boot_addr(entry, cpus);
121
+ unsigned long flags;
122
+ struct qcom_scm_desc desc = {
123
+ .svc = QCOM_SCM_SVC_INFO,
124
+ .cmd = QCOM_SCM_INFO_IS_CALL_AVAIL,
125
+ .args[0] = SCM_SMC_FNID(QCOM_SCM_SVC_INFO,
126
+ QCOM_SCM_INFO_IS_CALL_AVAIL) |
127
+ (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT),
128
+ .arginfo = QCOM_SCM_ARGS(1),
129
+ .owner = ARM_SMCCC_OWNER_SIP,
130
+ };
131
+ struct qcom_scm_res res;
132
+ enum qcom_scm_convention probed_convention;
133
+ int ret;
134
+ bool forced = false;
135
+
136
+ if (likely(qcom_scm_convention != SMC_CONVENTION_UNKNOWN))
137
+ return qcom_scm_convention;
138
+
139
+ /*
140
+ * Device isn't required as there is only one argument - no device
141
+ * needed to dma_map_single to secure world
142
+ */
143
+ probed_convention = SMC_CONVENTION_ARM_64;
144
+ ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
145
+ if (!ret && res.result[0] == 1)
146
+ goto found;
147
+
148
+ /*
149
+ * Some SC7180 firmwares didn't implement the
150
+ * QCOM_SCM_INFO_IS_CALL_AVAIL call, so we fallback to forcing ARM_64
151
+ * calling conventions on these firmwares. Luckily we don't make any
152
+ * early calls into the firmware on these SoCs so the device pointer
153
+ * will be valid here to check if the compatible matches.
154
+ */
155
+ if (of_device_is_compatible(__scm ? __scm->dev->of_node : NULL, "qcom,scm-sc7180")) {
156
+ forced = true;
157
+ goto found;
158
+ }
159
+
160
+ probed_convention = SMC_CONVENTION_ARM_32;
161
+ ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
162
+ if (!ret && res.result[0] == 1)
163
+ goto found;
164
+
165
+ probed_convention = SMC_CONVENTION_LEGACY;
166
+found:
167
+ spin_lock_irqsave(&scm_query_lock, flags);
168
+ if (probed_convention != qcom_scm_convention) {
169
+ qcom_scm_convention = probed_convention;
170
+ pr_info("qcom_scm: convention: %s%s\n",
171
+ qcom_scm_convention_names[qcom_scm_convention],
172
+ forced ? " (forced)" : "");
173
+ }
174
+ spin_unlock_irqrestore(&scm_query_lock, flags);
175
+
176
+ return qcom_scm_convention;
109177 }
110
-EXPORT_SYMBOL(qcom_scm_set_cold_boot_addr);
178
+
179
+/**
180
+ * qcom_scm_call() - Invoke a syscall in the secure world
181
+ * @dev: device
182
+ * @svc_id: service identifier
183
+ * @cmd_id: command identifier
184
+ * @desc: Descriptor structure containing arguments and return values
185
+ *
186
+ * Sends a command to the SCM and waits for the command to finish processing.
187
+ * This should *only* be called in pre-emptible context.
188
+ */
189
+static int qcom_scm_call(struct device *dev, const struct qcom_scm_desc *desc,
190
+ struct qcom_scm_res *res)
191
+{
192
+ might_sleep();
193
+ switch (__get_convention()) {
194
+ case SMC_CONVENTION_ARM_32:
195
+ case SMC_CONVENTION_ARM_64:
196
+ return scm_smc_call(dev, desc, res, false);
197
+ case SMC_CONVENTION_LEGACY:
198
+ return scm_legacy_call(dev, desc, res);
199
+ default:
200
+ pr_err("Unknown current SCM calling convention.\n");
201
+ return -EINVAL;
202
+ }
203
+}
204
+
205
+/**
206
+ * qcom_scm_call_atomic() - atomic variation of qcom_scm_call()
207
+ * @dev: device
208
+ * @svc_id: service identifier
209
+ * @cmd_id: command identifier
210
+ * @desc: Descriptor structure containing arguments and return values
211
+ * @res: Structure containing results from SMC/HVC call
212
+ *
213
+ * Sends a command to the SCM and waits for the command to finish processing.
214
+ * This can be called in atomic context.
215
+ */
216
+static int qcom_scm_call_atomic(struct device *dev,
217
+ const struct qcom_scm_desc *desc,
218
+ struct qcom_scm_res *res)
219
+{
220
+ switch (__get_convention()) {
221
+ case SMC_CONVENTION_ARM_32:
222
+ case SMC_CONVENTION_ARM_64:
223
+ return scm_smc_call(dev, desc, res, true);
224
+ case SMC_CONVENTION_LEGACY:
225
+ return scm_legacy_call_atomic(dev, desc, res);
226
+ default:
227
+ pr_err("Unknown current SCM calling convention.\n");
228
+ return -EINVAL;
229
+ }
230
+}
231
+
232
+static bool __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
233
+ u32 cmd_id)
234
+{
235
+ int ret;
236
+ struct qcom_scm_desc desc = {
237
+ .svc = QCOM_SCM_SVC_INFO,
238
+ .cmd = QCOM_SCM_INFO_IS_CALL_AVAIL,
239
+ .owner = ARM_SMCCC_OWNER_SIP,
240
+ };
241
+ struct qcom_scm_res res;
242
+
243
+ desc.arginfo = QCOM_SCM_ARGS(1);
244
+ switch (__get_convention()) {
245
+ case SMC_CONVENTION_ARM_32:
246
+ case SMC_CONVENTION_ARM_64:
247
+ desc.args[0] = SCM_SMC_FNID(svc_id, cmd_id) |
248
+ (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT);
249
+ break;
250
+ case SMC_CONVENTION_LEGACY:
251
+ desc.args[0] = SCM_LEGACY_FNID(svc_id, cmd_id);
252
+ break;
253
+ default:
254
+ pr_err("Unknown SMC convention being used\n");
255
+ return false;
256
+ }
257
+
258
+ ret = qcom_scm_call(dev, &desc, &res);
259
+
260
+ return ret ? false : !!res.result[0];
261
+}
111262
112263 /**
113264 * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus
....@@ -119,9 +270,83 @@
119270 */
120271 int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus)
121272 {
122
- return __qcom_scm_set_warm_boot_addr(__scm->dev, entry, cpus);
273
+ int ret;
274
+ int flags = 0;
275
+ int cpu;
276
+ struct qcom_scm_desc desc = {
277
+ .svc = QCOM_SCM_SVC_BOOT,
278
+ .cmd = QCOM_SCM_BOOT_SET_ADDR,
279
+ .arginfo = QCOM_SCM_ARGS(2),
280
+ };
281
+
282
+ /*
283
+ * Reassign only if we are switching from hotplug entry point
284
+ * to cpuidle entry point or vice versa.
285
+ */
286
+ for_each_cpu(cpu, cpus) {
287
+ if (entry == qcom_scm_wb[cpu].entry)
288
+ continue;
289
+ flags |= qcom_scm_wb[cpu].flag;
290
+ }
291
+
292
+ /* No change in entry function */
293
+ if (!flags)
294
+ return 0;
295
+
296
+ desc.args[0] = flags;
297
+ desc.args[1] = virt_to_phys(entry);
298
+
299
+ ret = qcom_scm_call(__scm->dev, &desc, NULL);
300
+ if (!ret) {
301
+ for_each_cpu(cpu, cpus)
302
+ qcom_scm_wb[cpu].entry = entry;
303
+ }
304
+
305
+ return ret;
123306 }
124307 EXPORT_SYMBOL(qcom_scm_set_warm_boot_addr);
308
+
309
+/**
310
+ * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus
311
+ * @entry: Entry point function for the cpus
312
+ * @cpus: The cpumask of cpus that will use the entry point
313
+ *
314
+ * Set the cold boot address of the cpus. Any cpu outside the supported
315
+ * range would be removed from the cpu present mask.
316
+ */
317
+int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus)
318
+{
319
+ int flags = 0;
320
+ int cpu;
321
+ int scm_cb_flags[] = {
322
+ QCOM_SCM_FLAG_COLDBOOT_CPU0,
323
+ QCOM_SCM_FLAG_COLDBOOT_CPU1,
324
+ QCOM_SCM_FLAG_COLDBOOT_CPU2,
325
+ QCOM_SCM_FLAG_COLDBOOT_CPU3,
326
+ };
327
+ struct qcom_scm_desc desc = {
328
+ .svc = QCOM_SCM_SVC_BOOT,
329
+ .cmd = QCOM_SCM_BOOT_SET_ADDR,
330
+ .arginfo = QCOM_SCM_ARGS(2),
331
+ .owner = ARM_SMCCC_OWNER_SIP,
332
+ };
333
+
334
+ if (!cpus || (cpus && cpumask_empty(cpus)))
335
+ return -EINVAL;
336
+
337
+ for_each_cpu(cpu, cpus) {
338
+ if (cpu < ARRAY_SIZE(scm_cb_flags))
339
+ flags |= scm_cb_flags[cpu];
340
+ else
341
+ set_cpu_present(cpu, false);
342
+ }
343
+
344
+ desc.args[0] = flags;
345
+ desc.args[1] = virt_to_phys(entry);
346
+
347
+ return qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL);
348
+}
349
+EXPORT_SYMBOL(qcom_scm_set_cold_boot_addr);
125350
126351 /**
127352 * qcom_scm_cpu_power_down() - Power down the cpu
....@@ -133,71 +358,73 @@
133358 */
134359 void qcom_scm_cpu_power_down(u32 flags)
135360 {
136
- __qcom_scm_cpu_power_down(flags);
361
+ struct qcom_scm_desc desc = {
362
+ .svc = QCOM_SCM_SVC_BOOT,
363
+ .cmd = QCOM_SCM_BOOT_TERMINATE_PC,
364
+ .args[0] = flags & QCOM_SCM_FLUSH_FLAG_MASK,
365
+ .arginfo = QCOM_SCM_ARGS(1),
366
+ .owner = ARM_SMCCC_OWNER_SIP,
367
+ };
368
+
369
+ qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL);
137370 }
138371 EXPORT_SYMBOL(qcom_scm_cpu_power_down);
139372
140
-/**
141
- * qcom_scm_hdcp_available() - Check if secure environment supports HDCP.
142
- *
143
- * Return true if HDCP is supported, false if not.
144
- */
145
-bool qcom_scm_hdcp_available(void)
373
+int qcom_scm_set_remote_state(u32 state, u32 id)
146374 {
147
- int ret = qcom_scm_clk_enable();
148
-
149
- if (ret)
150
- return ret;
151
-
152
- ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP,
153
- QCOM_SCM_CMD_HDCP);
154
-
155
- qcom_scm_clk_disable();
156
-
157
- return ret > 0 ? true : false;
158
-}
159
-EXPORT_SYMBOL(qcom_scm_hdcp_available);
160
-
161
-/**
162
- * qcom_scm_hdcp_req() - Send HDCP request.
163
- * @req: HDCP request array
164
- * @req_cnt: HDCP request array count
165
- * @resp: response buffer passed to SCM
166
- *
167
- * Write HDCP register(s) through SCM.
168
- */
169
-int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
170
-{
171
- int ret = qcom_scm_clk_enable();
172
-
173
- if (ret)
174
- return ret;
175
-
176
- ret = __qcom_scm_hdcp_req(__scm->dev, req, req_cnt, resp);
177
- qcom_scm_clk_disable();
178
- return ret;
179
-}
180
-EXPORT_SYMBOL(qcom_scm_hdcp_req);
181
-
182
-/**
183
- * qcom_scm_pas_supported() - Check if the peripheral authentication service is
184
- * available for the given peripherial
185
- * @peripheral: peripheral id
186
- *
187
- * Returns true if PAS is supported for this peripheral, otherwise false.
188
- */
189
-bool qcom_scm_pas_supported(u32 peripheral)
190
-{
375
+ struct qcom_scm_desc desc = {
376
+ .svc = QCOM_SCM_SVC_BOOT,
377
+ .cmd = QCOM_SCM_BOOT_SET_REMOTE_STATE,
378
+ .arginfo = QCOM_SCM_ARGS(2),
379
+ .args[0] = state,
380
+ .args[1] = id,
381
+ .owner = ARM_SMCCC_OWNER_SIP,
382
+ };
383
+ struct qcom_scm_res res;
191384 int ret;
192385
193
- ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL,
194
- QCOM_SCM_PAS_IS_SUPPORTED_CMD);
195
- if (ret <= 0)
196
- return false;
386
+ ret = qcom_scm_call(__scm->dev, &desc, &res);
197387
198
- return __qcom_scm_pas_supported(__scm->dev, peripheral);
388
+ return ret ? : res.result[0];
199389 }
200
-EXPORT_SYMBOL(qcom_scm_pas_supported);
390
+EXPORT_SYMBOL(qcom_scm_set_remote_state);
391
+
392
+static int __qcom_scm_set_dload_mode(struct device *dev, bool enable)
393
+{
394
+ struct qcom_scm_desc desc = {
395
+ .svc = QCOM_SCM_SVC_BOOT,
396
+ .cmd = QCOM_SCM_BOOT_SET_DLOAD_MODE,
397
+ .arginfo = QCOM_SCM_ARGS(2),
398
+ .args[0] = QCOM_SCM_BOOT_SET_DLOAD_MODE,
399
+ .owner = ARM_SMCCC_OWNER_SIP,
400
+ };
401
+
402
+ desc.args[1] = enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0;
403
+
404
+ return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
405
+}
406
+
407
+static void qcom_scm_set_download_mode(bool enable)
408
+{
409
+ bool avail;
410
+ int ret = 0;
411
+
412
+ avail = __qcom_scm_is_call_available(__scm->dev,
413
+ QCOM_SCM_SVC_BOOT,
414
+ QCOM_SCM_BOOT_SET_DLOAD_MODE);
415
+ if (avail) {
416
+ ret = __qcom_scm_set_dload_mode(__scm->dev, enable);
417
+ } else if (__scm->dload_mode_addr) {
418
+ ret = qcom_scm_io_writel(__scm->dload_mode_addr,
419
+ enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0);
420
+ } else {
421
+ dev_err(__scm->dev,
422
+ "No available mechanism for setting download mode\n");
423
+ }
424
+
425
+ if (ret)
426
+ dev_err(__scm->dev, "failed to set download mode: %d\n", ret);
427
+}
201428
202429 /**
203430 * qcom_scm_pas_init_image() - Initialize peripheral authentication service
....@@ -216,6 +443,14 @@
216443 dma_addr_t mdata_phys;
217444 void *mdata_buf;
218445 int ret;
446
+ struct qcom_scm_desc desc = {
447
+ .svc = QCOM_SCM_SVC_PIL,
448
+ .cmd = QCOM_SCM_PIL_PAS_INIT_IMAGE,
449
+ .arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_VAL, QCOM_SCM_RW),
450
+ .args[0] = peripheral,
451
+ .owner = ARM_SMCCC_OWNER_SIP,
452
+ };
453
+ struct qcom_scm_res res;
219454
220455 /*
221456 * During the scm call memory protection will be enabled for the meta
....@@ -234,14 +469,16 @@
234469 if (ret)
235470 goto free_metadata;
236471
237
- ret = __qcom_scm_pas_init_image(__scm->dev, peripheral, mdata_phys);
472
+ desc.args[1] = mdata_phys;
473
+
474
+ ret = qcom_scm_call(__scm->dev, &desc, &res);
238475
239476 qcom_scm_clk_disable();
240477
241478 free_metadata:
242479 dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys);
243480
244
- return ret;
481
+ return ret ? : res.result[0];
245482 }
246483 EXPORT_SYMBOL(qcom_scm_pas_init_image);
247484
....@@ -257,15 +494,25 @@
257494 int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size)
258495 {
259496 int ret;
497
+ struct qcom_scm_desc desc = {
498
+ .svc = QCOM_SCM_SVC_PIL,
499
+ .cmd = QCOM_SCM_PIL_PAS_MEM_SETUP,
500
+ .arginfo = QCOM_SCM_ARGS(3),
501
+ .args[0] = peripheral,
502
+ .args[1] = addr,
503
+ .args[2] = size,
504
+ .owner = ARM_SMCCC_OWNER_SIP,
505
+ };
506
+ struct qcom_scm_res res;
260507
261508 ret = qcom_scm_clk_enable();
262509 if (ret)
263510 return ret;
264511
265
- ret = __qcom_scm_pas_mem_setup(__scm->dev, peripheral, addr, size);
512
+ ret = qcom_scm_call(__scm->dev, &desc, &res);
266513 qcom_scm_clk_disable();
267514
268
- return ret;
515
+ return ret ? : res.result[0];
269516 }
270517 EXPORT_SYMBOL(qcom_scm_pas_mem_setup);
271518
....@@ -279,15 +526,23 @@
279526 int qcom_scm_pas_auth_and_reset(u32 peripheral)
280527 {
281528 int ret;
529
+ struct qcom_scm_desc desc = {
530
+ .svc = QCOM_SCM_SVC_PIL,
531
+ .cmd = QCOM_SCM_PIL_PAS_AUTH_AND_RESET,
532
+ .arginfo = QCOM_SCM_ARGS(1),
533
+ .args[0] = peripheral,
534
+ .owner = ARM_SMCCC_OWNER_SIP,
535
+ };
536
+ struct qcom_scm_res res;
282537
283538 ret = qcom_scm_clk_enable();
284539 if (ret)
285540 return ret;
286541
287
- ret = __qcom_scm_pas_auth_and_reset(__scm->dev, peripheral);
542
+ ret = qcom_scm_call(__scm->dev, &desc, &res);
288543 qcom_scm_clk_disable();
289544
290
- return ret;
545
+ return ret ? : res.result[0];
291546 }
292547 EXPORT_SYMBOL(qcom_scm_pas_auth_and_reset);
293548
....@@ -300,17 +555,73 @@
300555 int qcom_scm_pas_shutdown(u32 peripheral)
301556 {
302557 int ret;
558
+ struct qcom_scm_desc desc = {
559
+ .svc = QCOM_SCM_SVC_PIL,
560
+ .cmd = QCOM_SCM_PIL_PAS_SHUTDOWN,
561
+ .arginfo = QCOM_SCM_ARGS(1),
562
+ .args[0] = peripheral,
563
+ .owner = ARM_SMCCC_OWNER_SIP,
564
+ };
565
+ struct qcom_scm_res res;
303566
304567 ret = qcom_scm_clk_enable();
305568 if (ret)
306569 return ret;
307570
308
- ret = __qcom_scm_pas_shutdown(__scm->dev, peripheral);
571
+ ret = qcom_scm_call(__scm->dev, &desc, &res);
572
+
309573 qcom_scm_clk_disable();
310574
311
- return ret;
575
+ return ret ? : res.result[0];
312576 }
313577 EXPORT_SYMBOL(qcom_scm_pas_shutdown);
578
+
579
+/**
580
+ * qcom_scm_pas_supported() - Check if the peripheral authentication service is
581
+ * available for the given peripherial
582
+ * @peripheral: peripheral id
583
+ *
584
+ * Returns true if PAS is supported for this peripheral, otherwise false.
585
+ */
586
+bool qcom_scm_pas_supported(u32 peripheral)
587
+{
588
+ int ret;
589
+ struct qcom_scm_desc desc = {
590
+ .svc = QCOM_SCM_SVC_PIL,
591
+ .cmd = QCOM_SCM_PIL_PAS_IS_SUPPORTED,
592
+ .arginfo = QCOM_SCM_ARGS(1),
593
+ .args[0] = peripheral,
594
+ .owner = ARM_SMCCC_OWNER_SIP,
595
+ };
596
+ struct qcom_scm_res res;
597
+
598
+ if (!__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL,
599
+ QCOM_SCM_PIL_PAS_IS_SUPPORTED))
600
+ return false;
601
+
602
+ ret = qcom_scm_call(__scm->dev, &desc, &res);
603
+
604
+ return ret ? false : !!res.result[0];
605
+}
606
+EXPORT_SYMBOL(qcom_scm_pas_supported);
607
+
608
+static int __qcom_scm_pas_mss_reset(struct device *dev, bool reset)
609
+{
610
+ struct qcom_scm_desc desc = {
611
+ .svc = QCOM_SCM_SVC_PIL,
612
+ .cmd = QCOM_SCM_PIL_PAS_MSS_RESET,
613
+ .arginfo = QCOM_SCM_ARGS(2),
614
+ .args[0] = reset,
615
+ .args[1] = 0,
616
+ .owner = ARM_SMCCC_OWNER_SIP,
617
+ };
618
+ struct qcom_scm_res res;
619
+ int ret;
620
+
621
+ ret = qcom_scm_call(__scm->dev, &desc, &res);
622
+
623
+ return ret ? : res.result[0];
624
+}
314625
315626 static int qcom_scm_pas_reset_assert(struct reset_controller_dev *rcdev,
316627 unsigned long idx)
....@@ -335,57 +646,500 @@
335646 .deassert = qcom_scm_pas_reset_deassert,
336647 };
337648
338
-int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare)
339
-{
340
- return __qcom_scm_restore_sec_cfg(__scm->dev, device_id, spare);
341
-}
342
-EXPORT_SYMBOL(qcom_scm_restore_sec_cfg);
343
-
344
-int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size)
345
-{
346
- return __qcom_scm_iommu_secure_ptbl_size(__scm->dev, spare, size);
347
-}
348
-EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_size);
349
-
350
-int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
351
-{
352
- return __qcom_scm_iommu_secure_ptbl_init(__scm->dev, addr, size, spare);
353
-}
354
-EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_init);
355
-
356649 int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val)
357650 {
358
- return __qcom_scm_io_readl(__scm->dev, addr, val);
651
+ struct qcom_scm_desc desc = {
652
+ .svc = QCOM_SCM_SVC_IO,
653
+ .cmd = QCOM_SCM_IO_READ,
654
+ .arginfo = QCOM_SCM_ARGS(1),
655
+ .args[0] = addr,
656
+ .owner = ARM_SMCCC_OWNER_SIP,
657
+ };
658
+ struct qcom_scm_res res;
659
+ int ret;
660
+
661
+
662
+ ret = qcom_scm_call_atomic(__scm->dev, &desc, &res);
663
+ if (ret >= 0)
664
+ *val = res.result[0];
665
+
666
+ return ret < 0 ? ret : 0;
359667 }
360668 EXPORT_SYMBOL(qcom_scm_io_readl);
361669
362670 int qcom_scm_io_writel(phys_addr_t addr, unsigned int val)
363671 {
364
- return __qcom_scm_io_writel(__scm->dev, addr, val);
672
+ struct qcom_scm_desc desc = {
673
+ .svc = QCOM_SCM_SVC_IO,
674
+ .cmd = QCOM_SCM_IO_WRITE,
675
+ .arginfo = QCOM_SCM_ARGS(2),
676
+ .args[0] = addr,
677
+ .args[1] = val,
678
+ .owner = ARM_SMCCC_OWNER_SIP,
679
+ };
680
+
681
+ return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
365682 }
366683 EXPORT_SYMBOL(qcom_scm_io_writel);
367684
368
-static void qcom_scm_set_download_mode(bool enable)
685
+/**
686
+ * qcom_scm_restore_sec_cfg_available() - Check if secure environment
687
+ * supports restore security config interface.
688
+ *
689
+ * Return true if restore-cfg interface is supported, false if not.
690
+ */
691
+bool qcom_scm_restore_sec_cfg_available(void)
369692 {
370
- bool avail;
371
- int ret = 0;
693
+ return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP,
694
+ QCOM_SCM_MP_RESTORE_SEC_CFG);
695
+}
696
+EXPORT_SYMBOL(qcom_scm_restore_sec_cfg_available);
372697
373
- avail = __qcom_scm_is_call_available(__scm->dev,
374
- QCOM_SCM_SVC_BOOT,
375
- QCOM_SCM_SET_DLOAD_MODE);
376
- if (avail) {
377
- ret = __qcom_scm_set_dload_mode(__scm->dev, enable);
378
- } else if (__scm->dload_mode_addr) {
379
- ret = __qcom_scm_io_writel(__scm->dev, __scm->dload_mode_addr,
380
- enable ? QCOM_SCM_SET_DLOAD_MODE : 0);
381
- } else {
382
- dev_err(__scm->dev,
383
- "No available mechanism for setting download mode\n");
698
+int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare)
699
+{
700
+ struct qcom_scm_desc desc = {
701
+ .svc = QCOM_SCM_SVC_MP,
702
+ .cmd = QCOM_SCM_MP_RESTORE_SEC_CFG,
703
+ .arginfo = QCOM_SCM_ARGS(2),
704
+ .args[0] = device_id,
705
+ .args[1] = spare,
706
+ .owner = ARM_SMCCC_OWNER_SIP,
707
+ };
708
+ struct qcom_scm_res res;
709
+ int ret;
710
+
711
+ ret = qcom_scm_call(__scm->dev, &desc, &res);
712
+
713
+ return ret ? : res.result[0];
714
+}
715
+EXPORT_SYMBOL(qcom_scm_restore_sec_cfg);
716
+
717
+int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size)
718
+{
719
+ struct qcom_scm_desc desc = {
720
+ .svc = QCOM_SCM_SVC_MP,
721
+ .cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_SIZE,
722
+ .arginfo = QCOM_SCM_ARGS(1),
723
+ .args[0] = spare,
724
+ .owner = ARM_SMCCC_OWNER_SIP,
725
+ };
726
+ struct qcom_scm_res res;
727
+ int ret;
728
+
729
+ ret = qcom_scm_call(__scm->dev, &desc, &res);
730
+
731
+ if (size)
732
+ *size = res.result[0];
733
+
734
+ return ret ? : res.result[1];
735
+}
736
+EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_size);
737
+
738
+int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
739
+{
740
+ struct qcom_scm_desc desc = {
741
+ .svc = QCOM_SCM_SVC_MP,
742
+ .cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_INIT,
743
+ .arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL,
744
+ QCOM_SCM_VAL),
745
+ .args[0] = addr,
746
+ .args[1] = size,
747
+ .args[2] = spare,
748
+ .owner = ARM_SMCCC_OWNER_SIP,
749
+ };
750
+ int ret;
751
+
752
+ ret = qcom_scm_call(__scm->dev, &desc, NULL);
753
+
754
+ /* the pg table has been initialized already, ignore the error */
755
+ if (ret == -EPERM)
756
+ ret = 0;
757
+
758
+ return ret;
759
+}
760
+EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_init);
761
+
762
+int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size,
763
+ u32 cp_nonpixel_start,
764
+ u32 cp_nonpixel_size)
765
+{
766
+ int ret;
767
+ struct qcom_scm_desc desc = {
768
+ .svc = QCOM_SCM_SVC_MP,
769
+ .cmd = QCOM_SCM_MP_VIDEO_VAR,
770
+ .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_VAL, QCOM_SCM_VAL,
771
+ QCOM_SCM_VAL, QCOM_SCM_VAL),
772
+ .args[0] = cp_start,
773
+ .args[1] = cp_size,
774
+ .args[2] = cp_nonpixel_start,
775
+ .args[3] = cp_nonpixel_size,
776
+ .owner = ARM_SMCCC_OWNER_SIP,
777
+ };
778
+ struct qcom_scm_res res;
779
+
780
+ ret = qcom_scm_call(__scm->dev, &desc, &res);
781
+
782
+ return ret ? : res.result[0];
783
+}
784
+EXPORT_SYMBOL(qcom_scm_mem_protect_video_var);
785
+
786
+static int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region,
787
+ size_t mem_sz, phys_addr_t src, size_t src_sz,
788
+ phys_addr_t dest, size_t dest_sz)
789
+{
790
+ int ret;
791
+ struct qcom_scm_desc desc = {
792
+ .svc = QCOM_SCM_SVC_MP,
793
+ .cmd = QCOM_SCM_MP_ASSIGN,
794
+ .arginfo = QCOM_SCM_ARGS(7, QCOM_SCM_RO, QCOM_SCM_VAL,
795
+ QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_RO,
796
+ QCOM_SCM_VAL, QCOM_SCM_VAL),
797
+ .args[0] = mem_region,
798
+ .args[1] = mem_sz,
799
+ .args[2] = src,
800
+ .args[3] = src_sz,
801
+ .args[4] = dest,
802
+ .args[5] = dest_sz,
803
+ .args[6] = 0,
804
+ .owner = ARM_SMCCC_OWNER_SIP,
805
+ };
806
+ struct qcom_scm_res res;
807
+
808
+ ret = qcom_scm_call(dev, &desc, &res);
809
+
810
+ return ret ? : res.result[0];
811
+}
812
+
813
+/**
814
+ * qcom_scm_assign_mem() - Make a secure call to reassign memory ownership
815
+ * @mem_addr: mem region whose ownership need to be reassigned
816
+ * @mem_sz: size of the region.
817
+ * @srcvm: vmid for current set of owners, each set bit in
818
+ * flag indicate a unique owner
819
+ * @newvm: array having new owners and corresponding permission
820
+ * flags
821
+ * @dest_cnt: number of owners in next set.
822
+ *
823
+ * Return negative errno on failure or 0 on success with @srcvm updated.
824
+ */
825
+int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
826
+ unsigned int *srcvm,
827
+ const struct qcom_scm_vmperm *newvm,
828
+ unsigned int dest_cnt)
829
+{
830
+ struct qcom_scm_current_perm_info *destvm;
831
+ struct qcom_scm_mem_map_info *mem_to_map;
832
+ phys_addr_t mem_to_map_phys;
833
+ phys_addr_t dest_phys;
834
+ dma_addr_t ptr_phys;
835
+ size_t mem_to_map_sz;
836
+ size_t dest_sz;
837
+ size_t src_sz;
838
+ size_t ptr_sz;
839
+ int next_vm;
840
+ __le32 *src;
841
+ void *ptr;
842
+ int ret, i, b;
843
+ unsigned long srcvm_bits = *srcvm;
844
+
845
+ src_sz = hweight_long(srcvm_bits) * sizeof(*src);
846
+ mem_to_map_sz = sizeof(*mem_to_map);
847
+ dest_sz = dest_cnt * sizeof(*destvm);
848
+ ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) +
849
+ ALIGN(dest_sz, SZ_64);
850
+
851
+ ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_phys, GFP_KERNEL);
852
+ if (!ptr)
853
+ return -ENOMEM;
854
+
855
+ /* Fill source vmid detail */
856
+ src = ptr;
857
+ i = 0;
858
+ for_each_set_bit(b, &srcvm_bits, BITS_PER_LONG)
859
+ src[i++] = cpu_to_le32(b);
860
+
861
+ /* Fill details of mem buff to map */
862
+ mem_to_map = ptr + ALIGN(src_sz, SZ_64);
863
+ mem_to_map_phys = ptr_phys + ALIGN(src_sz, SZ_64);
864
+ mem_to_map->mem_addr = cpu_to_le64(mem_addr);
865
+ mem_to_map->mem_size = cpu_to_le64(mem_sz);
866
+
867
+ next_vm = 0;
868
+ /* Fill details of next vmid detail */
869
+ destvm = ptr + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
870
+ dest_phys = ptr_phys + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
871
+ for (i = 0; i < dest_cnt; i++, destvm++, newvm++) {
872
+ destvm->vmid = cpu_to_le32(newvm->vmid);
873
+ destvm->perm = cpu_to_le32(newvm->perm);
874
+ destvm->ctx = 0;
875
+ destvm->ctx_size = 0;
876
+ next_vm |= BIT(newvm->vmid);
384877 }
385878
386
- if (ret)
387
- dev_err(__scm->dev, "failed to set download mode: %d\n", ret);
879
+ ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz,
880
+ ptr_phys, src_sz, dest_phys, dest_sz);
881
+ dma_free_coherent(__scm->dev, ptr_sz, ptr, ptr_phys);
882
+ if (ret) {
883
+ dev_err(__scm->dev,
884
+ "Assign memory protection call failed %d\n", ret);
885
+ return -EINVAL;
886
+ }
887
+
888
+ *srcvm = next_vm;
889
+ return 0;
388890 }
891
+EXPORT_SYMBOL(qcom_scm_assign_mem);
892
+
893
+/**
894
+ * qcom_scm_ocmem_lock_available() - is OCMEM lock/unlock interface available
895
+ */
896
+bool qcom_scm_ocmem_lock_available(void)
897
+{
898
+ return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_OCMEM,
899
+ QCOM_SCM_OCMEM_LOCK_CMD);
900
+}
901
+EXPORT_SYMBOL(qcom_scm_ocmem_lock_available);
902
+
903
+/**
904
+ * qcom_scm_ocmem_lock() - call OCMEM lock interface to assign an OCMEM
905
+ * region to the specified initiator
906
+ *
907
+ * @id: tz initiator id
908
+ * @offset: OCMEM offset
909
+ * @size: OCMEM size
910
+ * @mode: access mode (WIDE/NARROW)
911
+ */
912
+int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, u32 size,
913
+ u32 mode)
914
+{
915
+ struct qcom_scm_desc desc = {
916
+ .svc = QCOM_SCM_SVC_OCMEM,
917
+ .cmd = QCOM_SCM_OCMEM_LOCK_CMD,
918
+ .args[0] = id,
919
+ .args[1] = offset,
920
+ .args[2] = size,
921
+ .args[3] = mode,
922
+ .arginfo = QCOM_SCM_ARGS(4),
923
+ };
924
+
925
+ return qcom_scm_call(__scm->dev, &desc, NULL);
926
+}
927
+EXPORT_SYMBOL(qcom_scm_ocmem_lock);
928
+
929
+/**
930
+ * qcom_scm_ocmem_unlock() - call OCMEM unlock interface to release an OCMEM
931
+ * region from the specified initiator
932
+ *
933
+ * @id: tz initiator id
934
+ * @offset: OCMEM offset
935
+ * @size: OCMEM size
936
+ */
937
+int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size)
938
+{
939
+ struct qcom_scm_desc desc = {
940
+ .svc = QCOM_SCM_SVC_OCMEM,
941
+ .cmd = QCOM_SCM_OCMEM_UNLOCK_CMD,
942
+ .args[0] = id,
943
+ .args[1] = offset,
944
+ .args[2] = size,
945
+ .arginfo = QCOM_SCM_ARGS(3),
946
+ };
947
+
948
+ return qcom_scm_call(__scm->dev, &desc, NULL);
949
+}
950
+EXPORT_SYMBOL(qcom_scm_ocmem_unlock);
951
+
952
+/**
953
+ * qcom_scm_ice_available() - Is the ICE key programming interface available?
954
+ *
955
+ * Return: true iff the SCM calls wrapped by qcom_scm_ice_invalidate_key() and
956
+ * qcom_scm_ice_set_key() are available.
957
+ */
958
+bool qcom_scm_ice_available(void)
959
+{
960
+ return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
961
+ QCOM_SCM_ES_INVALIDATE_ICE_KEY) &&
962
+ __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
963
+ QCOM_SCM_ES_CONFIG_SET_ICE_KEY);
964
+}
965
+EXPORT_SYMBOL(qcom_scm_ice_available);
966
+
967
+/**
968
+ * qcom_scm_ice_invalidate_key() - Invalidate an inline encryption key
969
+ * @index: the keyslot to invalidate
970
+ *
971
+ * The UFSHCI and eMMC standards define a standard way to do this, but it
972
+ * doesn't work on these SoCs; only this SCM call does.
973
+ *
974
+ * It is assumed that the SoC has only one ICE instance being used, as this SCM
975
+ * call doesn't specify which ICE instance the keyslot belongs to.
976
+ *
977
+ * Return: 0 on success; -errno on failure.
978
+ */
979
+int qcom_scm_ice_invalidate_key(u32 index)
980
+{
981
+ struct qcom_scm_desc desc = {
982
+ .svc = QCOM_SCM_SVC_ES,
983
+ .cmd = QCOM_SCM_ES_INVALIDATE_ICE_KEY,
984
+ .arginfo = QCOM_SCM_ARGS(1),
985
+ .args[0] = index,
986
+ .owner = ARM_SMCCC_OWNER_SIP,
987
+ };
988
+
989
+ return qcom_scm_call(__scm->dev, &desc, NULL);
990
+}
991
+EXPORT_SYMBOL(qcom_scm_ice_invalidate_key);
992
+
993
+/**
994
+ * qcom_scm_ice_set_key() - Set an inline encryption key
995
+ * @index: the keyslot into which to set the key
996
+ * @key: the key to program
997
+ * @key_size: the size of the key in bytes
998
+ * @cipher: the encryption algorithm the key is for
999
+ * @data_unit_size: the encryption data unit size, i.e. the size of each
1000
+ * individual plaintext and ciphertext. Given in 512-byte
1001
+ * units, e.g. 1 = 512 bytes, 8 = 4096 bytes, etc.
1002
+ *
1003
+ * Program a key into a keyslot of Qualcomm ICE (Inline Crypto Engine), where it
1004
+ * can then be used to encrypt/decrypt UFS or eMMC I/O requests inline.
1005
+ *
1006
+ * The UFSHCI and eMMC standards define a standard way to do this, but it
1007
+ * doesn't work on these SoCs; only this SCM call does.
1008
+ *
1009
+ * It is assumed that the SoC has only one ICE instance being used, as this SCM
1010
+ * call doesn't specify which ICE instance the keyslot belongs to.
1011
+ *
1012
+ * Return: 0 on success; -errno on failure.
1013
+ */
1014
+int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size,
1015
+ enum qcom_scm_ice_cipher cipher, u32 data_unit_size)
1016
+{
1017
+ struct qcom_scm_desc desc = {
1018
+ .svc = QCOM_SCM_SVC_ES,
1019
+ .cmd = QCOM_SCM_ES_CONFIG_SET_ICE_KEY,
1020
+ .arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_VAL, QCOM_SCM_RW,
1021
+ QCOM_SCM_VAL, QCOM_SCM_VAL,
1022
+ QCOM_SCM_VAL),
1023
+ .args[0] = index,
1024
+ .args[2] = key_size,
1025
+ .args[3] = cipher,
1026
+ .args[4] = data_unit_size,
1027
+ .owner = ARM_SMCCC_OWNER_SIP,
1028
+ };
1029
+ void *keybuf;
1030
+ dma_addr_t key_phys;
1031
+ int ret;
1032
+
1033
+ /*
1034
+ * 'key' may point to vmalloc()'ed memory, but we need to pass a
1035
+ * physical address that's been properly flushed. The sanctioned way to
1036
+ * do this is by using the DMA API. But as is best practice for crypto
1037
+ * keys, we also must wipe the key after use. This makes kmemdup() +
1038
+ * dma_map_single() not clearly correct, since the DMA API can use
1039
+ * bounce buffers. Instead, just use dma_alloc_coherent(). Programming
1040
+ * keys is normally rare and thus not performance-critical.
1041
+ */
1042
+
1043
+ keybuf = dma_alloc_coherent(__scm->dev, key_size, &key_phys,
1044
+ GFP_KERNEL);
1045
+ if (!keybuf)
1046
+ return -ENOMEM;
1047
+ memcpy(keybuf, key, key_size);
1048
+ desc.args[1] = key_phys;
1049
+
1050
+ ret = qcom_scm_call(__scm->dev, &desc, NULL);
1051
+
1052
+ memzero_explicit(keybuf, key_size);
1053
+
1054
+ dma_free_coherent(__scm->dev, key_size, keybuf, key_phys);
1055
+ return ret;
1056
+}
1057
+EXPORT_SYMBOL(qcom_scm_ice_set_key);
1058
+
1059
+/**
1060
+ * qcom_scm_hdcp_available() - Check if secure environment supports HDCP.
1061
+ *
1062
+ * Return true if HDCP is supported, false if not.
1063
+ */
1064
+bool qcom_scm_hdcp_available(void)
1065
+{
1066
+ bool avail;
1067
+ int ret = qcom_scm_clk_enable();
1068
+
1069
+ if (ret)
1070
+ return ret;
1071
+
1072
+ avail = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP,
1073
+ QCOM_SCM_HDCP_INVOKE);
1074
+
1075
+ qcom_scm_clk_disable();
1076
+
1077
+ return avail;
1078
+}
1079
+EXPORT_SYMBOL(qcom_scm_hdcp_available);
1080
+
1081
+/**
1082
+ * qcom_scm_hdcp_req() - Send HDCP request.
1083
+ * @req: HDCP request array
1084
+ * @req_cnt: HDCP request array count
1085
+ * @resp: response buffer passed to SCM
1086
+ *
1087
+ * Write HDCP register(s) through SCM.
1088
+ */
1089
+int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
1090
+{
1091
+ int ret;
1092
+ struct qcom_scm_desc desc = {
1093
+ .svc = QCOM_SCM_SVC_HDCP,
1094
+ .cmd = QCOM_SCM_HDCP_INVOKE,
1095
+ .arginfo = QCOM_SCM_ARGS(10),
1096
+ .args = {
1097
+ req[0].addr,
1098
+ req[0].val,
1099
+ req[1].addr,
1100
+ req[1].val,
1101
+ req[2].addr,
1102
+ req[2].val,
1103
+ req[3].addr,
1104
+ req[3].val,
1105
+ req[4].addr,
1106
+ req[4].val
1107
+ },
1108
+ .owner = ARM_SMCCC_OWNER_SIP,
1109
+ };
1110
+ struct qcom_scm_res res;
1111
+
1112
+ if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT)
1113
+ return -ERANGE;
1114
+
1115
+ ret = qcom_scm_clk_enable();
1116
+ if (ret)
1117
+ return ret;
1118
+
1119
+ ret = qcom_scm_call(__scm->dev, &desc, &res);
1120
+ *resp = res.result[0];
1121
+
1122
+ qcom_scm_clk_disable();
1123
+
1124
+ return ret;
1125
+}
1126
+EXPORT_SYMBOL(qcom_scm_hdcp_req);
1127
+
1128
+int qcom_scm_qsmmu500_wait_safe_toggle(bool en)
1129
+{
1130
+ struct qcom_scm_desc desc = {
1131
+ .svc = QCOM_SCM_SVC_SMMU_PROGRAM,
1132
+ .cmd = QCOM_SCM_SMMU_CONFIG_ERRATA1,
1133
+ .arginfo = QCOM_SCM_ARGS(2),
1134
+ .args[0] = QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL,
1135
+ .args[1] = en,
1136
+ .owner = ARM_SMCCC_OWNER_SIP,
1137
+ };
1138
+
1139
+
1140
+ return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
1141
+}
1142
+EXPORT_SYMBOL(qcom_scm_qsmmu500_wait_safe_toggle);
3891143
3901144 static int qcom_scm_find_dload_address(struct device *dev, u64 *addr)
3911145 {
....@@ -422,94 +1176,6 @@
4221176 }
4231177 EXPORT_SYMBOL(qcom_scm_is_available);
4241178
425
-int qcom_scm_set_remote_state(u32 state, u32 id)
426
-{
427
- return __qcom_scm_set_remote_state(__scm->dev, state, id);
428
-}
429
-EXPORT_SYMBOL(qcom_scm_set_remote_state);
430
-
431
-/**
432
- * qcom_scm_assign_mem() - Make a secure call to reassign memory ownership
433
- * @mem_addr: mem region whose ownership need to be reassigned
434
- * @mem_sz: size of the region.
435
- * @srcvm: vmid for current set of owners, each set bit in
436
- * flag indicate a unique owner
437
- * @newvm: array having new owners and corrsponding permission
438
- * flags
439
- * @dest_cnt: number of owners in next set.
440
- *
441
- * Return negative errno on failure, 0 on success, with @srcvm updated.
442
- */
443
-int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
444
- unsigned int *srcvm,
445
- struct qcom_scm_vmperm *newvm, int dest_cnt)
446
-{
447
- struct qcom_scm_current_perm_info *destvm;
448
- struct qcom_scm_mem_map_info *mem_to_map;
449
- phys_addr_t mem_to_map_phys;
450
- phys_addr_t dest_phys;
451
- dma_addr_t ptr_phys;
452
- size_t mem_to_map_sz;
453
- size_t dest_sz;
454
- size_t src_sz;
455
- size_t ptr_sz;
456
- int next_vm;
457
- __le32 *src;
458
- void *ptr;
459
- int ret;
460
- int len;
461
- int i;
462
-
463
- src_sz = hweight_long(*srcvm) * sizeof(*src);
464
- mem_to_map_sz = sizeof(*mem_to_map);
465
- dest_sz = dest_cnt * sizeof(*destvm);
466
- ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) +
467
- ALIGN(dest_sz, SZ_64);
468
-
469
- ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_phys, GFP_KERNEL);
470
- if (!ptr)
471
- return -ENOMEM;
472
-
473
- /* Fill source vmid detail */
474
- src = ptr;
475
- len = hweight_long(*srcvm);
476
- for (i = 0; i < len; i++) {
477
- src[i] = cpu_to_le32(ffs(*srcvm) - 1);
478
- *srcvm ^= 1 << (ffs(*srcvm) - 1);
479
- }
480
-
481
- /* Fill details of mem buff to map */
482
- mem_to_map = ptr + ALIGN(src_sz, SZ_64);
483
- mem_to_map_phys = ptr_phys + ALIGN(src_sz, SZ_64);
484
- mem_to_map[0].mem_addr = cpu_to_le64(mem_addr);
485
- mem_to_map[0].mem_size = cpu_to_le64(mem_sz);
486
-
487
- next_vm = 0;
488
- /* Fill details of next vmid detail */
489
- destvm = ptr + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
490
- dest_phys = ptr_phys + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
491
- for (i = 0; i < dest_cnt; i++) {
492
- destvm[i].vmid = cpu_to_le32(newvm[i].vmid);
493
- destvm[i].perm = cpu_to_le32(newvm[i].perm);
494
- destvm[i].ctx = 0;
495
- destvm[i].ctx_size = 0;
496
- next_vm |= BIT(newvm[i].vmid);
497
- }
498
-
499
- ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz,
500
- ptr_phys, src_sz, dest_phys, dest_sz);
501
- dma_free_coherent(__scm->dev, ptr_sz, ptr, ptr_phys);
502
- if (ret) {
503
- dev_err(__scm->dev,
504
- "Assign memory protection call failed %d.\n", ret);
505
- return -EINVAL;
506
- }
507
-
508
- *srcvm = next_vm;
509
- return 0;
510
-}
511
-EXPORT_SYMBOL(qcom_scm_assign_mem);
512
-
5131179 static int qcom_scm_probe(struct platform_device *pdev)
5141180 {
5151181 struct qcom_scm *scm;
....@@ -525,34 +1191,44 @@
5251191 return ret;
5261192
5271193 clks = (unsigned long)of_device_get_match_data(&pdev->dev);
528
- if (clks & SCM_HAS_CORE_CLK) {
529
- scm->core_clk = devm_clk_get(&pdev->dev, "core");
530
- if (IS_ERR(scm->core_clk)) {
531
- if (PTR_ERR(scm->core_clk) != -EPROBE_DEFER)
532
- dev_err(&pdev->dev,
533
- "failed to acquire core clk\n");
1194
+
1195
+ scm->core_clk = devm_clk_get(&pdev->dev, "core");
1196
+ if (IS_ERR(scm->core_clk)) {
1197
+ if (PTR_ERR(scm->core_clk) == -EPROBE_DEFER)
1198
+ return PTR_ERR(scm->core_clk);
1199
+
1200
+ if (clks & SCM_HAS_CORE_CLK) {
1201
+ dev_err(&pdev->dev, "failed to acquire core clk\n");
5341202 return PTR_ERR(scm->core_clk);
5351203 }
1204
+
1205
+ scm->core_clk = NULL;
5361206 }
5371207
538
- if (clks & SCM_HAS_IFACE_CLK) {
539
- scm->iface_clk = devm_clk_get(&pdev->dev, "iface");
540
- if (IS_ERR(scm->iface_clk)) {
541
- if (PTR_ERR(scm->iface_clk) != -EPROBE_DEFER)
542
- dev_err(&pdev->dev,
543
- "failed to acquire iface clk\n");
1208
+ scm->iface_clk = devm_clk_get(&pdev->dev, "iface");
1209
+ if (IS_ERR(scm->iface_clk)) {
1210
+ if (PTR_ERR(scm->iface_clk) == -EPROBE_DEFER)
1211
+ return PTR_ERR(scm->iface_clk);
1212
+
1213
+ if (clks & SCM_HAS_IFACE_CLK) {
1214
+ dev_err(&pdev->dev, "failed to acquire iface clk\n");
5441215 return PTR_ERR(scm->iface_clk);
5451216 }
1217
+
1218
+ scm->iface_clk = NULL;
5461219 }
5471220
548
- if (clks & SCM_HAS_BUS_CLK) {
549
- scm->bus_clk = devm_clk_get(&pdev->dev, "bus");
550
- if (IS_ERR(scm->bus_clk)) {
551
- if (PTR_ERR(scm->bus_clk) != -EPROBE_DEFER)
552
- dev_err(&pdev->dev,
553
- "failed to acquire bus clk\n");
1221
+ scm->bus_clk = devm_clk_get(&pdev->dev, "bus");
1222
+ if (IS_ERR(scm->bus_clk)) {
1223
+ if (PTR_ERR(scm->bus_clk) == -EPROBE_DEFER)
1224
+ return PTR_ERR(scm->bus_clk);
1225
+
1226
+ if (clks & SCM_HAS_BUS_CLK) {
1227
+ dev_err(&pdev->dev, "failed to acquire bus clk\n");
5541228 return PTR_ERR(scm->bus_clk);
5551229 }
1230
+
1231
+ scm->bus_clk = NULL;
5561232 }
5571233
5581234 scm->reset.ops = &qcom_scm_pas_reset_ops;
....@@ -570,7 +1246,7 @@
5701246 __scm = scm;
5711247 __scm->dev = &pdev->dev;
5721248
573
- __qcom_scm_init();
1249
+ __get_convention();
5741250
5751251 /*
5761252 * If requested enable "download mode", from this point on warmboot
....@@ -586,33 +1262,34 @@
5861262 static void qcom_scm_shutdown(struct platform_device *pdev)
5871263 {
5881264 /* Clean shutdown, disable download mode to allow normal restart */
589
- if (download_mode)
590
- qcom_scm_set_download_mode(false);
1265
+ qcom_scm_set_download_mode(false);
5911266 }
5921267
5931268 static const struct of_device_id qcom_scm_dt_match[] = {
5941269 { .compatible = "qcom,scm-apq8064",
5951270 /* FIXME: This should have .data = (void *) SCM_HAS_CORE_CLK */
5961271 },
597
- { .compatible = "qcom,scm-msm8660",
598
- .data = (void *) SCM_HAS_CORE_CLK,
1272
+ { .compatible = "qcom,scm-apq8084", .data = (void *)(SCM_HAS_CORE_CLK |
1273
+ SCM_HAS_IFACE_CLK |
1274
+ SCM_HAS_BUS_CLK)
5991275 },
600
- { .compatible = "qcom,scm-msm8960",
601
- .data = (void *) SCM_HAS_CORE_CLK,
1276
+ { .compatible = "qcom,scm-ipq4019" },
1277
+ { .compatible = "qcom,scm-msm8660", .data = (void *) SCM_HAS_CORE_CLK },
1278
+ { .compatible = "qcom,scm-msm8960", .data = (void *) SCM_HAS_CORE_CLK },
1279
+ { .compatible = "qcom,scm-msm8916", .data = (void *)(SCM_HAS_CORE_CLK |
1280
+ SCM_HAS_IFACE_CLK |
1281
+ SCM_HAS_BUS_CLK)
6021282 },
603
- { .compatible = "qcom,scm-msm8996",
604
- .data = NULL, /* no clocks */
1283
+ { .compatible = "qcom,scm-msm8974", .data = (void *)(SCM_HAS_CORE_CLK |
1284
+ SCM_HAS_IFACE_CLK |
1285
+ SCM_HAS_BUS_CLK)
6051286 },
606
- { .compatible = "qcom,scm-ipq4019",
607
- .data = NULL, /* no clocks */
608
- },
609
- { .compatible = "qcom,scm",
610
- .data = (void *)(SCM_HAS_CORE_CLK
611
- | SCM_HAS_IFACE_CLK
612
- | SCM_HAS_BUS_CLK),
613
- },
1287
+ { .compatible = "qcom,scm-msm8994" },
1288
+ { .compatible = "qcom,scm-msm8996" },
1289
+ { .compatible = "qcom,scm" },
6141290 {}
6151291 };
1292
+MODULE_DEVICE_TABLE(of, qcom_scm_dt_match);
6161293
6171294 static struct platform_driver qcom_scm_driver = {
6181295 .driver = {
....@@ -628,3 +1305,6 @@
6281305 return platform_driver_register(&qcom_scm_driver);
6291306 }
6301307 subsys_initcall(qcom_scm_init);
1308
+
1309
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. SCM driver");
1310
+MODULE_LICENSE("GPL v2");