hc
2024-02-19 1c055e55a242a33e574e48be530e06770a210dcd
kernel/drivers/firmware/arm_scmi/driver.c
....@@ -11,24 +11,31 @@
1111 * various power domain DVFS including the core/cluster, certain system
1212 * clocks configuration, thermal sensors and many others.
1313 *
14
- * Copyright (C) 2018 ARM Ltd.
14
+ * Copyright (C) 2018-2020 ARM Ltd.
1515 */
1616
1717 #include <linux/bitmap.h>
18
+#include <linux/device.h>
1819 #include <linux/export.h>
20
+#include <linux/idr.h>
1921 #include <linux/io.h>
2022 #include <linux/kernel.h>
2123 #include <linux/ktime.h>
24
+#include <linux/list.h>
2225 #include <linux/module.h>
2326 #include <linux/of_address.h>
2427 #include <linux/of_device.h>
2528 #include <linux/processor.h>
29
+#include <linux/refcount.h>
2630 #include <linux/slab.h>
2731
2832 #include "common.h"
33
+#include "notify.h"
2934
3035 #define CREATE_TRACE_POINTS
3136 #include <trace/events/scmi.h>
37
+#undef CREATE_TRACE_POINTS
38
+#include <trace/hooks/scmi.h>
3239
3340 enum scmi_error_codes {
3441 SCMI_SUCCESS = 0, /* Success */
....@@ -51,6 +58,14 @@
5158 /* Track the unique id for the transfers for debug & profiling purpose */
5259 static atomic_t transfer_last_id;
5360
61
+static DEFINE_IDR(scmi_requested_devices);
62
+static DEFINE_MUTEX(scmi_requested_devices_mtx);
63
+
64
+struct scmi_requested_dev {
65
+ const struct scmi_device_id *id_table;
66
+ struct list_head node;
67
+};
68
+
5469 /**
5570 * struct scmi_xfers_info - Structure to manage transfer information
5671 *
....@@ -67,6 +82,30 @@
6782 };
6883
6984 /**
85
+ * struct scmi_protocol_instance - Describe an initialized protocol instance.
86
+ * @handle: Reference to the SCMI handle associated to this protocol instance.
87
+ * @proto: A reference to the protocol descriptor.
88
+ * @gid: A reference for per-protocol devres management.
89
+ * @users: A refcount to track effective users of this protocol.
90
+ * @priv: Reference for optional protocol private data.
91
+ * @ph: An embedded protocol handle that will be passed down to protocol
92
+ * initialization code to identify this instance.
93
+ *
94
+ * Each protocol is initialized independently once for each SCMI platform in
95
+ * which is defined by DT and implemented by the SCMI server fw.
96
+ */
97
+struct scmi_protocol_instance {
98
+ const struct scmi_handle *handle;
99
+ const struct scmi_protocol *proto;
100
+ void *gid;
101
+ refcount_t users;
102
+ void *priv;
103
+ struct scmi_protocol_handle ph;
104
+};
105
+
106
+#define ph_to_pi(h) container_of(h, struct scmi_protocol_instance, ph)
107
+
108
+/**
70109 * struct scmi_info - Structure representing a SCMI instance
71110 *
72111 * @dev: Device pointer
....@@ -75,10 +114,18 @@
75114 * implementation version and (sub-)vendor identification.
76115 * @handle: Instance of SCMI handle to send to clients
77116 * @tx_minfo: Universal Transmit Message management info
117
+ * @rx_minfo: Universal Receive Message management info
78118 * @tx_idr: IDR object to map protocol id to Tx channel info pointer
79119 * @rx_idr: IDR object to map protocol id to Rx channel info pointer
120
+ * @protocols: IDR for protocols' instance descriptors initialized for
121
+ * this SCMI instance: populated on protocol's first attempted
122
+ * usage.
123
+ * @protocols_mtx: A mutex to protect protocols instances initialization.
80124 * @protocols_imp: List of protocols implemented, currently maximum of
81125 * MAX_PROTOCOLS_IMP elements allocated by the base protocol
126
+ * @active_protocols: IDR storing device_nodes for protocols actually defined
127
+ * in the DT and confirmed as implemented by fw.
128
+ * @notify_priv: Pointer to private data structure specific to notifications.
82129 * @node: List head
83130 * @users: Number of users of this instance
84131 */
....@@ -88,9 +135,15 @@
88135 struct scmi_revision_info version;
89136 struct scmi_handle handle;
90137 struct scmi_xfers_info tx_minfo;
138
+ struct scmi_xfers_info rx_minfo;
91139 struct idr tx_idr;
92140 struct idr rx_idr;
141
+ struct idr protocols;
142
+ /* Ensure mutual exclusive access to protocols instance array */
143
+ struct mutex protocols_mtx;
93144 u8 *protocols_imp;
145
+ struct idr active_protocols;
146
+ void *notify_priv;
94147 struct list_head node;
95148 int users;
96149 };
....@@ -132,6 +185,25 @@
132185 {
133186 dev_dbg(dev, "Message ID: %x Sequence ID: %x Protocol: %x\n",
134187 hdr->id, hdr->seq, hdr->protocol_id);
188
+}
189
+
190
+void scmi_set_notification_instance_data(const struct scmi_handle *handle,
191
+ void *priv)
192
+{
193
+ struct scmi_info *info = handle_to_scmi_info(handle);
194
+
195
+ info->notify_priv = priv;
196
+ /* Ensure updated protocol private date are visible */
197
+ smp_wmb();
198
+}
199
+
200
+void *scmi_get_notification_instance_data(const struct scmi_handle *handle)
201
+{
202
+ struct scmi_info *info = handle_to_scmi_info(handle);
203
+
204
+ /* Ensure protocols_private_data has been updated */
205
+ smp_rmb();
206
+ return info->notify_priv;
135207 }
136208
137209 /**
....@@ -201,6 +273,91 @@
201273 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
202274 }
203275
276
+static void scmi_handle_notification(struct scmi_chan_info *cinfo, u32 msg_hdr)
277
+{
278
+ struct scmi_xfer *xfer;
279
+ struct device *dev = cinfo->dev;
280
+ struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
281
+ struct scmi_xfers_info *minfo = &info->rx_minfo;
282
+ ktime_t ts;
283
+
284
+ ts = ktime_get_boottime();
285
+ xfer = scmi_xfer_get(cinfo->handle, minfo);
286
+ if (IS_ERR(xfer)) {
287
+ dev_err(dev, "failed to get free message slot (%ld)\n",
288
+ PTR_ERR(xfer));
289
+ info->desc->ops->clear_channel(cinfo);
290
+ return;
291
+ }
292
+
293
+ unpack_scmi_header(msg_hdr, &xfer->hdr);
294
+ scmi_dump_header_dbg(dev, &xfer->hdr);
295
+ info->desc->ops->fetch_notification(cinfo, info->desc->max_msg_size,
296
+ xfer);
297
+ scmi_notify(cinfo->handle, xfer->hdr.protocol_id,
298
+ xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts);
299
+
300
+ trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
301
+ xfer->hdr.protocol_id, xfer->hdr.seq,
302
+ MSG_TYPE_NOTIFICATION);
303
+
304
+ __scmi_xfer_put(minfo, xfer);
305
+
306
+ info->desc->ops->clear_channel(cinfo);
307
+}
308
+
309
+static void scmi_handle_response(struct scmi_chan_info *cinfo,
310
+ u16 xfer_id, u8 msg_type)
311
+{
312
+ struct scmi_xfer *xfer;
313
+ struct device *dev = cinfo->dev;
314
+ struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
315
+ struct scmi_xfers_info *minfo = &info->tx_minfo;
316
+
317
+ /* Are we even expecting this? */
318
+ if (!test_bit(xfer_id, minfo->xfer_alloc_table)) {
319
+ dev_err(dev, "message for %d is not expected!\n", xfer_id);
320
+ info->desc->ops->clear_channel(cinfo);
321
+ return;
322
+ }
323
+
324
+ xfer = &minfo->xfer_block[xfer_id];
325
+ /*
326
+ * Even if a response was indeed expected on this slot at this point,
327
+ * a buggy platform could wrongly reply feeding us an unexpected
328
+ * delayed response we're not prepared to handle: bail-out safely
329
+ * blaming firmware.
330
+ */
331
+ if (unlikely(msg_type == MSG_TYPE_DELAYED_RESP && !xfer->async_done)) {
332
+ dev_err(dev,
333
+ "Delayed Response for %d not expected! Buggy F/W ?\n",
334
+ xfer_id);
335
+ info->desc->ops->clear_channel(cinfo);
336
+ /* It was unexpected, so nobody will clear the xfer if not us */
337
+ __scmi_xfer_put(minfo, xfer);
338
+ return;
339
+ }
340
+
341
+ /* rx.len could be shrunk in the sync do_xfer, so reset to maxsz */
342
+ if (msg_type == MSG_TYPE_DELAYED_RESP)
343
+ xfer->rx.len = info->desc->max_msg_size;
344
+
345
+ scmi_dump_header_dbg(dev, &xfer->hdr);
346
+
347
+ info->desc->ops->fetch_response(cinfo, xfer);
348
+
349
+ trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
350
+ xfer->hdr.protocol_id, xfer->hdr.seq,
351
+ msg_type);
352
+
353
+ if (msg_type == MSG_TYPE_DELAYED_RESP) {
354
+ info->desc->ops->clear_channel(cinfo);
355
+ complete(xfer->async_done);
356
+ } else {
357
+ complete(&xfer->done);
358
+ }
359
+}
360
+
204361 /**
205362 * scmi_rx_callback() - callback for receiving messages
206363 *
....@@ -215,47 +372,34 @@
215372 */
216373 void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr)
217374 {
218
- struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
219
- struct scmi_xfers_info *minfo = &info->tx_minfo;
220375 u16 xfer_id = MSG_XTRACT_TOKEN(msg_hdr);
221376 u8 msg_type = MSG_XTRACT_TYPE(msg_hdr);
222
- struct device *dev = cinfo->dev;
223
- struct scmi_xfer *xfer;
224377
225
- if (msg_type == MSG_TYPE_NOTIFICATION)
226
- return; /* Notifications not yet supported */
227
-
228
- /* Are we even expecting this? */
229
- if (!test_bit(xfer_id, minfo->xfer_alloc_table)) {
230
- dev_err(dev, "message for %d is not expected!\n", xfer_id);
231
- return;
378
+ switch (msg_type) {
379
+ case MSG_TYPE_NOTIFICATION:
380
+ scmi_handle_notification(cinfo, msg_hdr);
381
+ break;
382
+ case MSG_TYPE_COMMAND:
383
+ case MSG_TYPE_DELAYED_RESP:
384
+ scmi_handle_response(cinfo, xfer_id, msg_type);
385
+ break;
386
+ default:
387
+ WARN_ONCE(1, "received unknown msg_type:%d\n", msg_type);
388
+ break;
232389 }
233
-
234
- xfer = &minfo->xfer_block[xfer_id];
235
-
236
- scmi_dump_header_dbg(dev, &xfer->hdr);
237
-
238
- info->desc->ops->fetch_response(cinfo, xfer);
239
-
240
- trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
241
- xfer->hdr.protocol_id, xfer->hdr.seq,
242
- msg_type);
243
-
244
- if (msg_type == MSG_TYPE_DELAYED_RESP)
245
- complete(xfer->async_done);
246
- else
247
- complete(&xfer->done);
248390 }
249391
250392 /**
251
- * scmi_xfer_put() - Release a transmit message
393
+ * xfer_put() - Release a transmit message
252394 *
253
- * @handle: Pointer to SCMI entity handle
395
+ * @ph: Pointer to SCMI protocol handle
254396 * @xfer: message that was reserved by scmi_xfer_get
255397 */
256
-void scmi_xfer_put(const struct scmi_handle *handle, struct scmi_xfer *xfer)
398
+static void xfer_put(const struct scmi_protocol_handle *ph,
399
+ struct scmi_xfer *xfer)
257400 {
258
- struct scmi_info *info = handle_to_scmi_info(handle);
401
+ const struct scmi_protocol_instance *pi = ph_to_pi(ph);
402
+ struct scmi_info *info = handle_to_scmi_info(pi->handle);
259403
260404 __scmi_xfer_put(&info->tx_minfo, xfer);
261405 }
....@@ -272,22 +416,31 @@
272416 }
273417
274418 /**
275
- * scmi_do_xfer() - Do one transfer
419
+ * do_xfer() - Do one transfer
276420 *
277
- * @handle: Pointer to SCMI entity handle
421
+ * @ph: Pointer to SCMI protocol handle
278422 * @xfer: Transfer to initiate and wait for response
279423 *
280424 * Return: -ETIMEDOUT in case of no response, if transmit error,
281425 * return corresponding error, else if all goes well,
282426 * return 0.
283427 */
284
-int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer)
428
+static int do_xfer(const struct scmi_protocol_handle *ph,
429
+ struct scmi_xfer *xfer)
285430 {
286431 int ret;
287432 int timeout;
288
- struct scmi_info *info = handle_to_scmi_info(handle);
433
+ const struct scmi_protocol_instance *pi = ph_to_pi(ph);
434
+ struct scmi_info *info = handle_to_scmi_info(pi->handle);
289435 struct device *dev = info->dev;
290436 struct scmi_chan_info *cinfo;
437
+
438
+ /*
439
+ * Re-instate protocol id here from protocol handle so that cannot be
440
+ * overridden by mistake (or malice) by the protocol code mangling with
441
+ * the scmi_xfer structure.
442
+ */
443
+ xfer->hdr.protocol_id = pi->proto->id;
291444
292445 cinfo = idr_find(&info->tx_idr, xfer->hdr.protocol_id);
293446 if (unlikely(!cinfo))
....@@ -315,6 +468,7 @@
315468 } else {
316469 /* And we wait for the response. */
317470 timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
471
+ trace_android_vh_scmi_timeout_sync(&timeout);
318472 if (!wait_for_completion_timeout(&xfer->done, timeout)) {
319473 dev_err(dev, "timed out in resp(caller: %pS)\n",
320474 (void *)_RET_IP_);
....@@ -329,46 +483,60 @@
329483 info->desc->ops->mark_txdone(cinfo, ret);
330484
331485 trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id,
332
- xfer->hdr.protocol_id, xfer->hdr.seq,
333
- xfer->hdr.status);
486
+ xfer->hdr.protocol_id, xfer->hdr.seq, ret);
334487
335488 return ret;
489
+}
490
+
491
+static void reset_rx_to_maxsz(const struct scmi_protocol_handle *ph,
492
+ struct scmi_xfer *xfer)
493
+{
494
+ const struct scmi_protocol_instance *pi = ph_to_pi(ph);
495
+ struct scmi_info *info = handle_to_scmi_info(pi->handle);
496
+
497
+ xfer->rx.len = info->desc->max_msg_size;
336498 }
337499
338500 #define SCMI_MAX_RESPONSE_TIMEOUT (2 * MSEC_PER_SEC)
339501
340502 /**
341
- * scmi_do_xfer_with_response() - Do one transfer and wait until the delayed
503
+ * do_xfer_with_response() - Do one transfer and wait until the delayed
342504 * response is received
343505 *
344
- * @handle: Pointer to SCMI entity handle
506
+ * @ph: Pointer to SCMI protocol handle
345507 * @xfer: Transfer to initiate and wait for response
346508 *
347509 * Return: -ETIMEDOUT in case of no delayed response, if transmit error,
348510 * return corresponding error, else if all goes well, return 0.
349511 */
350
-int scmi_do_xfer_with_response(const struct scmi_handle *handle,
351
- struct scmi_xfer *xfer)
512
+static int do_xfer_with_response(const struct scmi_protocol_handle *ph,
513
+ struct scmi_xfer *xfer)
352514 {
353515 int ret, timeout = msecs_to_jiffies(SCMI_MAX_RESPONSE_TIMEOUT);
516
+ const struct scmi_protocol_instance *pi = ph_to_pi(ph);
354517 DECLARE_COMPLETION_ONSTACK(async_response);
518
+
519
+ xfer->hdr.protocol_id = pi->proto->id;
355520
356521 xfer->async_done = &async_response;
357522
358
- ret = scmi_do_xfer(handle, xfer);
359
- if (!ret && !wait_for_completion_timeout(xfer->async_done, timeout))
360
- ret = -ETIMEDOUT;
523
+ ret = do_xfer(ph, xfer);
524
+ if (!ret) {
525
+ if (!wait_for_completion_timeout(xfer->async_done, timeout))
526
+ ret = -ETIMEDOUT;
527
+ else if (xfer->hdr.status)
528
+ ret = scmi_to_linux_errno(xfer->hdr.status);
529
+ }
361530
362531 xfer->async_done = NULL;
363532 return ret;
364533 }
365534
366535 /**
367
- * scmi_xfer_get_init() - Allocate and initialise one message for transmit
536
+ * xfer_get_init() - Allocate and initialise one message for transmit
368537 *
369
- * @handle: Pointer to SCMI entity handle
538
+ * @ph: Pointer to SCMI protocol handle
370539 * @msg_id: Message identifier
371
- * @prot_id: Protocol identifier for the message
372540 * @tx_size: transmit message size
373541 * @rx_size: receive message size
374542 * @p: pointer to the allocated and initialised message
....@@ -379,12 +547,14 @@
379547 * Return: 0 if all went fine with @p pointing to message, else
380548 * corresponding error.
381549 */
382
-int scmi_xfer_get_init(const struct scmi_handle *handle, u8 msg_id, u8 prot_id,
383
- size_t tx_size, size_t rx_size, struct scmi_xfer **p)
550
+static int xfer_get_init(const struct scmi_protocol_handle *ph,
551
+ u8 msg_id, size_t tx_size, size_t rx_size,
552
+ struct scmi_xfer **p)
384553 {
385554 int ret;
386555 struct scmi_xfer *xfer;
387
- struct scmi_info *info = handle_to_scmi_info(handle);
556
+ const struct scmi_protocol_instance *pi = ph_to_pi(ph);
557
+ struct scmi_info *info = handle_to_scmi_info(pi->handle);
388558 struct scmi_xfers_info *minfo = &info->tx_minfo;
389559 struct device *dev = info->dev;
390560
....@@ -393,7 +563,7 @@
393563 tx_size > info->desc->max_msg_size)
394564 return -ERANGE;
395565
396
- xfer = scmi_xfer_get(handle, minfo);
566
+ xfer = scmi_xfer_get(pi->handle, minfo);
397567 if (IS_ERR(xfer)) {
398568 ret = PTR_ERR(xfer);
399569 dev_err(dev, "failed to get free message slot(%d)\n", ret);
....@@ -403,7 +573,7 @@
403573 xfer->tx.len = tx_size;
404574 xfer->rx.len = rx_size ? : info->desc->max_msg_size;
405575 xfer->hdr.id = msg_id;
406
- xfer->hdr.protocol_id = prot_id;
576
+ xfer->hdr.protocol_id = pi->proto->id;
407577 xfer->hdr.poll_completion = false;
408578
409579 *p = xfer;
....@@ -412,42 +582,237 @@
412582 }
413583
414584 /**
415
- * scmi_version_get() - command to get the revision of the SCMI entity
585
+ * version_get() - command to get the revision of the SCMI entity
416586 *
417
- * @handle: Pointer to SCMI entity handle
418
- * @protocol: Protocol identifier for the message
587
+ * @ph: Pointer to SCMI protocol handle
419588 * @version: Holds returned version of protocol.
420589 *
421590 * Updates the SCMI information in the internal data structure.
422591 *
423592 * Return: 0 if all went fine, else return appropriate error.
424593 */
425
-int scmi_version_get(const struct scmi_handle *handle, u8 protocol,
426
- u32 *version)
594
+static int version_get(const struct scmi_protocol_handle *ph, u32 *version)
427595 {
428596 int ret;
429597 __le32 *rev_info;
430598 struct scmi_xfer *t;
431599
432
- ret = scmi_xfer_get_init(handle, PROTOCOL_VERSION, protocol, 0,
433
- sizeof(*version), &t);
600
+ ret = xfer_get_init(ph, PROTOCOL_VERSION, 0, sizeof(*version), &t);
434601 if (ret)
435602 return ret;
436603
437
- ret = scmi_do_xfer(handle, t);
604
+ ret = do_xfer(ph, t);
438605 if (!ret) {
439606 rev_info = t->rx.buf;
440607 *version = le32_to_cpu(*rev_info);
441608 }
442609
443
- scmi_xfer_put(handle, t);
610
+ xfer_put(ph, t);
444611 return ret;
445612 }
446613
447
-void scmi_setup_protocol_implemented(const struct scmi_handle *handle,
448
- u8 *prot_imp)
614
+/**
615
+ * scmi_set_protocol_priv - Set protocol specific data at init time
616
+ *
617
+ * @ph: A reference to the protocol handle.
618
+ * @priv: The private data to set.
619
+ *
620
+ * Return: 0 on Success
621
+ */
622
+static int scmi_set_protocol_priv(const struct scmi_protocol_handle *ph,
623
+ void *priv)
624
+{
625
+ struct scmi_protocol_instance *pi = ph_to_pi(ph);
626
+
627
+ pi->priv = priv;
628
+
629
+ return 0;
630
+}
631
+
632
+/**
633
+ * scmi_get_protocol_priv - Set protocol specific data at init time
634
+ *
635
+ * @ph: A reference to the protocol handle.
636
+ *
637
+ * Return: Protocol private data if any was set.
638
+ */
639
+static void *scmi_get_protocol_priv(const struct scmi_protocol_handle *ph)
640
+{
641
+ const struct scmi_protocol_instance *pi = ph_to_pi(ph);
642
+
643
+ return pi->priv;
644
+}
645
+
646
+static const struct scmi_xfer_ops xfer_ops = {
647
+ .version_get = version_get,
648
+ .xfer_get_init = xfer_get_init,
649
+ .reset_rx_to_maxsz = reset_rx_to_maxsz,
650
+ .do_xfer = do_xfer,
651
+ .do_xfer_with_response = do_xfer_with_response,
652
+ .xfer_put = xfer_put,
653
+};
654
+
655
+/**
656
+ * scmi_get_revision_area - Retrieve version memory area.
657
+ *
658
+ * @ph: A reference to the protocol handle.
659
+ *
660
+ * A helper to grab the version memory area reference during SCMI Base protocol
661
+ * initialization.
662
+ *
663
+ * Return: A reference to the version memory area associated to the SCMI
664
+ * instance underlying this protocol handle.
665
+ */
666
+struct scmi_revision_info *
667
+scmi_get_revision_area(const struct scmi_protocol_handle *ph)
668
+{
669
+ const struct scmi_protocol_instance *pi = ph_to_pi(ph);
670
+
671
+ return pi->handle->version;
672
+}
673
+
674
+/**
675
+ * scmi_get_protocol_instance - Protocol initialization helper.
676
+ * @handle: A reference to the SCMI platform instance.
677
+ * @protocol_id: The protocol being requested.
678
+ *
679
+ * In case the required protocol has never been requested before for this
680
+ * instance, allocate and initialize all the needed structures while handling
681
+ * resource allocation with a dedicated per-protocol devres subgroup.
682
+ *
683
+ * Return: A reference to an initialized protocol instance or error on failure.
684
+ */
685
+static struct scmi_protocol_instance * __must_check
686
+scmi_get_protocol_instance(const struct scmi_handle *handle, u8 protocol_id)
687
+{
688
+ int ret = -ENOMEM;
689
+ void *gid;
690
+ struct scmi_protocol_instance *pi;
691
+ struct scmi_info *info = handle_to_scmi_info(handle);
692
+
693
+ mutex_lock(&info->protocols_mtx);
694
+ pi = idr_find(&info->protocols, protocol_id);
695
+
696
+ if (pi) {
697
+ refcount_inc(&pi->users);
698
+ } else {
699
+ const struct scmi_protocol *proto;
700
+
701
+ /* Fail if protocol not registered on bus */
702
+ proto = scmi_get_protocol(protocol_id);
703
+ if (!proto) {
704
+ ret = -EPROBE_DEFER;
705
+ goto out;
706
+ }
707
+
708
+ /* Protocol specific devres group */
709
+ gid = devres_open_group(handle->dev, NULL, GFP_KERNEL);
710
+ if (!gid)
711
+ goto out;
712
+
713
+ pi = devm_kzalloc(handle->dev, sizeof(*pi), GFP_KERNEL);
714
+ if (!pi)
715
+ goto clean;
716
+
717
+ pi->gid = gid;
718
+ pi->proto = proto;
719
+ pi->handle = handle;
720
+ pi->ph.dev = handle->dev;
721
+ pi->ph.xops = &xfer_ops;
722
+ pi->ph.set_priv = scmi_set_protocol_priv;
723
+ pi->ph.get_priv = scmi_get_protocol_priv;
724
+ refcount_set(&pi->users, 1);
725
+ /* proto->init is assured NON NULL by scmi_protocol_register */
726
+ ret = pi->proto->init_instance(&pi->ph);
727
+ if (ret)
728
+ goto clean;
729
+
730
+ ret = idr_alloc(&info->protocols, pi,
731
+ protocol_id, protocol_id + 1, GFP_KERNEL);
732
+ if (ret != protocol_id)
733
+ goto clean;
734
+
735
+ if (pi->proto->events)
736
+ scmi_register_protocol_events(handle, pi->proto->id,
737
+ &pi->ph,
738
+ pi->proto->events);
739
+
740
+ devres_close_group(handle->dev, pi->gid);
741
+ dev_dbg(handle->dev, "Initialized protocol: 0x%X\n",
742
+ protocol_id);
743
+ }
744
+ mutex_unlock(&info->protocols_mtx);
745
+
746
+ return pi;
747
+
748
+clean:
749
+ scmi_put_protocol(protocol_id);
750
+ devres_release_group(handle->dev, gid);
751
+out:
752
+ mutex_unlock(&info->protocols_mtx);
753
+ return ERR_PTR(ret);
754
+}
755
+
756
+/**
757
+ * scmi_acquire_protocol - Protocol acquire
758
+ * @handle: A reference to the SCMI platform instance.
759
+ * @protocol_id: The protocol being requested.
760
+ *
761
+ * Register a new user for the requested protocol on the specified SCMI
762
+ * platform instance, possibly triggering its initialization on first user.
763
+ *
764
+ * Return: 0 if protocol was acquired successfully.
765
+ */
766
+int scmi_acquire_protocol(const struct scmi_handle *handle, u8 protocol_id)
767
+{
768
+ return PTR_ERR_OR_ZERO(scmi_get_protocol_instance(handle, protocol_id));
769
+}
770
+
771
+/**
772
+ * scmi_release_protocol - Protocol de-initialization helper.
773
+ * @handle: A reference to the SCMI platform instance.
774
+ * @protocol_id: The protocol being requested.
775
+ *
776
+ * Remove one user for the specified protocol and triggers de-initialization
777
+ * and resources de-allocation once the last user has gone.
778
+ */
779
+void scmi_release_protocol(const struct scmi_handle *handle, u8 protocol_id)
449780 {
450781 struct scmi_info *info = handle_to_scmi_info(handle);
782
+ struct scmi_protocol_instance *pi;
783
+
784
+ mutex_lock(&info->protocols_mtx);
785
+ pi = idr_find(&info->protocols, protocol_id);
786
+ if (WARN_ON(!pi))
787
+ goto out;
788
+
789
+ if (refcount_dec_and_test(&pi->users)) {
790
+ void *gid = pi->gid;
791
+
792
+ if (pi->proto->events)
793
+ scmi_deregister_protocol_events(handle, protocol_id);
794
+
795
+ if (pi->proto->deinit_instance)
796
+ pi->proto->deinit_instance(&pi->ph);
797
+
798
+ idr_remove(&info->protocols, protocol_id);
799
+
800
+ scmi_put_protocol(protocol_id);
801
+
802
+ devres_release_group(handle->dev, gid);
803
+ dev_dbg(handle->dev, "De-Initialized protocol: 0x%X\n",
804
+ protocol_id);
805
+ }
806
+
807
+out:
808
+ mutex_unlock(&info->protocols_mtx);
809
+}
810
+
811
+void scmi_setup_protocol_implemented(const struct scmi_protocol_handle *ph,
812
+ u8 *prot_imp)
813
+{
814
+ const struct scmi_protocol_instance *pi = ph_to_pi(ph);
815
+ struct scmi_info *info = handle_to_scmi_info(pi->handle);
451816
452817 info->protocols_imp = prot_imp;
453818 }
....@@ -465,6 +830,141 @@
465830 if (info->protocols_imp[i] == prot_id)
466831 return true;
467832 return false;
833
+}
834
+
835
+struct scmi_protocol_devres {
836
+ const struct scmi_handle *handle;
837
+ u8 protocol_id;
838
+};
839
+
840
+static void scmi_devm_release_protocol(struct device *dev, void *res)
841
+{
842
+ struct scmi_protocol_devres *dres = res;
843
+
844
+ scmi_release_protocol(dres->handle, dres->protocol_id);
845
+}
846
+
847
+static struct scmi_protocol_instance __must_check *
848
+__scmi_devres_get_protocol_instance(struct scmi_device *sdev, u8 protocol_id)
849
+{
850
+ struct scmi_protocol_devres *dres;
851
+ struct scmi_protocol_instance *pi;
852
+
853
+ dres = devres_alloc(scmi_devm_release_protocol,
854
+ sizeof(*dres), GFP_KERNEL);
855
+ if (!dres)
856
+ return ERR_PTR(-ENOMEM);
857
+
858
+ pi = scmi_get_protocol_instance(sdev->handle, protocol_id);
859
+ if (IS_ERR(pi)) {
860
+ devres_free(dres);
861
+ return pi;
862
+ }
863
+
864
+ dres->handle = sdev->handle;
865
+ dres->protocol_id = protocol_id;
866
+ devres_add(&sdev->dev, dres);
867
+
868
+ return pi;
869
+}
870
+
871
+/**
872
+ * scmi_devm_get_protocol - Devres managed get protocol operations and handle
873
+ * @sdev: A reference to an scmi_device whose embedded struct device is to
874
+ * be used for devres accounting.
875
+ * @protocol_id: The protocol being requested.
876
+ * @ph: A pointer reference used to pass back the associated protocol handle.
877
+ *
878
+ * Get hold of a protocol accounting for its usage, eventually triggering its
879
+ * initialization, and returning the protocol specific operations and related
880
+ * protocol handle which will be used as first argument in most of the
881
+ * protocols operations methods.
882
+ * Being a devres based managed method, protocol hold will be automatically
883
+ * released, and possibly de-initialized on last user, once the SCMI driver
884
+ * owning the scmi_device is unbound from it.
885
+ *
886
+ * Return: A reference to the requested protocol operations or error.
887
+ * Must be checked for errors by caller.
888
+ */
889
+static const void __must_check *
890
+scmi_devm_get_protocol(struct scmi_device *sdev, u8 protocol_id,
891
+ struct scmi_protocol_handle **ph)
892
+{
893
+ struct scmi_protocol_instance *pi;
894
+
895
+ if (!ph)
896
+ return ERR_PTR(-EINVAL);
897
+
898
+ pi = __scmi_devres_get_protocol_instance(sdev, protocol_id);
899
+ if (IS_ERR(pi))
900
+ return pi;
901
+
902
+ *ph = &pi->ph;
903
+
904
+ return pi->proto->ops;
905
+}
906
+
907
+/**
908
+ * scmi_devm_acquire_protocol - Devres managed helper to get hold of a protocol
909
+ * @sdev: A reference to an scmi_device whose embedded struct device is to
910
+ * be used for devres accounting.
911
+ * @protocol_id: The protocol being requested.
912
+ *
913
+ * Get hold of a protocol accounting for its usage, possibly triggering its
914
+ * initialization but without getting access to its protocol specific operations
915
+ * and handle.
916
+ *
917
+ * Being a devres based managed method, protocol hold will be automatically
918
+ * released, and possibly de-initialized on last user, once the SCMI driver
919
+ * owning the scmi_device is unbound from it.
920
+ *
921
+ * Return: 0 on SUCCESS
922
+ */
923
+static int __must_check scmi_devm_acquire_protocol(struct scmi_device *sdev,
924
+ u8 protocol_id)
925
+{
926
+ struct scmi_protocol_instance *pi;
927
+
928
+ pi = __scmi_devres_get_protocol_instance(sdev, protocol_id);
929
+ if (IS_ERR(pi))
930
+ return PTR_ERR(pi);
931
+
932
+ return 0;
933
+}
934
+
935
+static int scmi_devm_protocol_match(struct device *dev, void *res, void *data)
936
+{
937
+ struct scmi_protocol_devres *dres = res;
938
+
939
+ if (WARN_ON(!dres || !data))
940
+ return 0;
941
+
942
+ return dres->protocol_id == *((u8 *)data);
943
+}
944
+
945
+/**
946
+ * scmi_devm_put_protocol - Devres managed put protocol operations and handle
947
+ * @sdev: A reference to an scmi_device whose embedded struct device is to
948
+ * be used for devres accounting.
949
+ * @protocol_id: The protocol being requested.
950
+ *
951
+ * Explicitly release a protocol hold previously obtained calling the above
952
+ * @scmi_devm_get_protocol_ops.
953
+ */
954
+static void scmi_devm_put_protocol(struct scmi_device *sdev, u8 protocol_id)
955
+{
956
+ int ret;
957
+
958
+ ret = devres_release(&sdev->dev, scmi_devm_release_protocol,
959
+ scmi_devm_protocol_match, &protocol_id);
960
+ WARN_ON(ret);
961
+}
962
+
963
+static inline
964
+struct scmi_handle *scmi_handle_get_from_info(struct scmi_info *info)
965
+{
966
+ info->users++;
967
+ return &info->handle;
468968 }
469969
470970 /**
....@@ -488,8 +988,7 @@
488988 list_for_each(p, &scmi_list) {
489989 info = list_entry(p, struct scmi_info, node);
490990 if (dev->parent == info->dev) {
491
- handle = &info->handle;
492
- info->users++;
991
+ handle = scmi_handle_get_from_info(info);
493992 break;
494993 }
495994 }
....@@ -526,13 +1025,13 @@
5261025 return 0;
5271026 }
5281027
529
-static int scmi_xfer_info_init(struct scmi_info *sinfo)
1028
+static int __scmi_xfer_info_init(struct scmi_info *sinfo,
1029
+ struct scmi_xfers_info *info)
5301030 {
5311031 int i;
5321032 struct scmi_xfer *xfer;
5331033 struct device *dev = sinfo->dev;
5341034 const struct scmi_desc *desc = sinfo->desc;
535
- struct scmi_xfers_info *info = &sinfo->tx_minfo;
5361035
5371036 /* Pre-allocated messages, no more than what hdr.seq can support */
5381037 if (WARN_ON(!desc->max_msg || desc->max_msg > MSG_TOKEN_MAX)) {
....@@ -566,6 +1065,16 @@
5661065 spin_lock_init(&info->xfer_lock);
5671066
5681067 return 0;
1068
+}
1069
+
1070
+static int scmi_xfer_info_init(struct scmi_info *sinfo)
1071
+{
1072
+ int ret = __scmi_xfer_info_init(sinfo, &sinfo->tx_minfo);
1073
+
1074
+ if (!ret && idr_find(&sinfo->rx_idr, SCMI_PROTOCOL_BASE))
1075
+ ret = __scmi_xfer_info_init(sinfo, &sinfo->rx_minfo);
1076
+
1077
+ return ret;
5691078 }
5701079
5711080 static int scmi_chan_setup(struct scmi_info *info, struct device *dev,
....@@ -617,10 +1126,59 @@
6171126 {
6181127 int ret = scmi_chan_setup(info, dev, prot_id, true);
6191128
620
- if (!ret) /* Rx is optional, hence no error check */
621
- scmi_chan_setup(info, dev, prot_id, false);
1129
+ if (!ret) {
1130
+ /* Rx is optional, report only memory errors */
1131
+ ret = scmi_chan_setup(info, dev, prot_id, false);
1132
+ if (ret && ret != -ENOMEM)
1133
+ ret = 0;
1134
+ }
6221135
6231136 return ret;
1137
+}
1138
+
1139
+/**
1140
+ * scmi_get_protocol_device - Helper to get/create an SCMI device.
1141
+ *
1142
+ * @np: A device node representing a valid active protocols for the referred
1143
+ * SCMI instance.
1144
+ * @info: The referred SCMI instance for which we are getting/creating this
1145
+ * device.
1146
+ * @prot_id: The protocol ID.
1147
+ * @name: The device name.
1148
+ *
1149
+ * Referring to the specific SCMI instance identified by @info, this helper
1150
+ * takes care to return a properly initialized device matching the requested
1151
+ * @proto_id and @name: if device was still not existent it is created as a
1152
+ * child of the specified SCMI instance @info and its transport properly
1153
+ * initialized as usual.
1154
+ */
1155
+static inline struct scmi_device *
1156
+scmi_get_protocol_device(struct device_node *np, struct scmi_info *info,
1157
+ int prot_id, const char *name)
1158
+{
1159
+ struct scmi_device *sdev;
1160
+
1161
+ /* Already created for this parent SCMI instance ? */
1162
+ sdev = scmi_find_child_dev(info->dev, prot_id, name);
1163
+ if (sdev)
1164
+ return sdev;
1165
+
1166
+ pr_debug("Creating SCMI device (%s) for protocol %x\n", name, prot_id);
1167
+
1168
+ sdev = scmi_device_create(np, info->dev, prot_id, name);
1169
+ if (!sdev) {
1170
+ dev_err(info->dev, "failed to create %d protocol device\n",
1171
+ prot_id);
1172
+ return NULL;
1173
+ }
1174
+
1175
+ if (scmi_txrx_setup(info, &sdev->dev, prot_id)) {
1176
+ dev_err(&sdev->dev, "failed to setup transport\n");
1177
+ scmi_device_destroy(sdev);
1178
+ return NULL;
1179
+ }
1180
+
1181
+ return sdev;
6241182 }
6251183
6261184 static inline void
....@@ -629,55 +1187,231 @@
6291187 {
6301188 struct scmi_device *sdev;
6311189
632
- sdev = scmi_device_create(np, info->dev, prot_id, name);
633
- if (!sdev) {
634
- dev_err(info->dev, "failed to create %d protocol device\n",
635
- prot_id);
1190
+ sdev = scmi_get_protocol_device(np, info, prot_id, name);
1191
+ if (!sdev)
6361192 return;
637
- }
638
-
639
- if (scmi_txrx_setup(info, &sdev->dev, prot_id)) {
640
- dev_err(&sdev->dev, "failed to setup transport\n");
641
- scmi_device_destroy(sdev);
642
- return;
643
- }
6441193
6451194 /* setup handle now as the transport is ready */
6461195 scmi_set_handle(sdev);
6471196 }
6481197
649
-#define MAX_SCMI_DEV_PER_PROTOCOL 2
650
-struct scmi_prot_devnames {
651
- int protocol_id;
652
- char *names[MAX_SCMI_DEV_PER_PROTOCOL];
653
-};
654
-
655
-static struct scmi_prot_devnames devnames[] = {
656
- { SCMI_PROTOCOL_POWER, { "genpd" },},
657
- { SCMI_PROTOCOL_PERF, { "cpufreq" },},
658
- { SCMI_PROTOCOL_CLOCK, { "clocks" },},
659
- { SCMI_PROTOCOL_SENSOR, { "hwmon" },},
660
- { SCMI_PROTOCOL_RESET, { "reset" },},
661
-};
662
-
663
-static inline void
664
-scmi_create_protocol_devices(struct device_node *np, struct scmi_info *info,
665
- int prot_id)
1198
+/**
1199
+ * scmi_create_protocol_devices - Create devices for all pending requests for
1200
+ * this SCMI instance.
1201
+ *
1202
+ * @np: The device node describing the protocol
1203
+ * @info: The SCMI instance descriptor
1204
+ * @prot_id: The protocol ID
1205
+ *
1206
+ * All devices previously requested for this instance (if any) are found and
1207
+ * created by scanning the proper @&scmi_requested_devices entry.
1208
+ */
1209
+static void scmi_create_protocol_devices(struct device_node *np,
1210
+ struct scmi_info *info, int prot_id)
6661211 {
667
- int loop, cnt;
1212
+ struct list_head *phead;
6681213
669
- for (loop = 0; loop < ARRAY_SIZE(devnames); loop++) {
670
- if (devnames[loop].protocol_id != prot_id)
671
- continue;
1214
+ mutex_lock(&scmi_requested_devices_mtx);
1215
+ phead = idr_find(&scmi_requested_devices, prot_id);
1216
+ if (phead) {
1217
+ struct scmi_requested_dev *rdev;
6721218
673
- for (cnt = 0; cnt < ARRAY_SIZE(devnames[loop].names); cnt++) {
674
- const char *name = devnames[loop].names[cnt];
1219
+ list_for_each_entry(rdev, phead, node)
1220
+ scmi_create_protocol_device(np, info, prot_id,
1221
+ rdev->id_table->name);
1222
+ }
1223
+ mutex_unlock(&scmi_requested_devices_mtx);
1224
+}
6751225
676
- if (name)
677
- scmi_create_protocol_device(np, info, prot_id,
678
- name);
1226
+/**
1227
+ * scmi_request_protocol_device - Helper to request a device
1228
+ *
1229
+ * @id_table: A protocol/name pair descriptor for the device to be created.
1230
+ *
1231
+ * This helper let an SCMI driver request specific devices identified by the
1232
+ * @id_table to be created for each active SCMI instance.
1233
+ *
1234
+ * The requested device name MUST NOT be already existent for any protocol;
1235
+ * at first the freshly requested @id_table is annotated in the IDR table
1236
+ * @scmi_requested_devices, then a matching device is created for each already
1237
+ * active SCMI instance. (if any)
1238
+ *
1239
+ * This way the requested device is created straight-away for all the already
1240
+ * initialized(probed) SCMI instances (handles) and it remains also annotated
1241
+ * as pending creation if the requesting SCMI driver was loaded before some
1242
+ * SCMI instance and related transports were available: when such late instance
1243
+ * is probed, its probe will take care to scan the list of pending requested
1244
+ * devices and create those on its own (see @scmi_create_protocol_devices and
1245
+ * its enclosing loop)
1246
+ *
1247
+ * Return: 0 on Success
1248
+ */
1249
+int scmi_request_protocol_device(const struct scmi_device_id *id_table)
1250
+{
1251
+ int ret = 0;
1252
+ unsigned int id = 0;
1253
+ struct list_head *head, *phead = NULL;
1254
+ struct scmi_requested_dev *rdev;
1255
+ struct scmi_info *info;
1256
+
1257
+ pr_debug("Requesting SCMI device (%s) for protocol %x\n",
1258
+ id_table->name, id_table->protocol_id);
1259
+
1260
+ /*
1261
+ * Search for the matching protocol rdev list and then search
1262
+ * of any existent equally named device...fails if any duplicate found.
1263
+ */
1264
+ mutex_lock(&scmi_requested_devices_mtx);
1265
+ idr_for_each_entry(&scmi_requested_devices, head, id) {
1266
+ if (!phead) {
1267
+ /* A list found registered in the IDR is never empty */
1268
+ rdev = list_first_entry(head, struct scmi_requested_dev,
1269
+ node);
1270
+ if (rdev->id_table->protocol_id ==
1271
+ id_table->protocol_id)
1272
+ phead = head;
1273
+ }
1274
+ list_for_each_entry(rdev, head, node) {
1275
+ if (!strcmp(rdev->id_table->name, id_table->name)) {
1276
+ pr_err("Ignoring duplicate request [%d] %s\n",
1277
+ rdev->id_table->protocol_id,
1278
+ rdev->id_table->name);
1279
+ ret = -EINVAL;
1280
+ goto out;
1281
+ }
6791282 }
6801283 }
1284
+
1285
+ /*
1286
+ * No duplicate found for requested id_table, so let's create a new
1287
+ * requested device entry for this new valid request.
1288
+ */
1289
+ rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
1290
+ if (!rdev) {
1291
+ ret = -ENOMEM;
1292
+ goto out;
1293
+ }
1294
+ rdev->id_table = id_table;
1295
+
1296
+ /*
1297
+ * Append the new requested device table descriptor to the head of the
1298
+ * related protocol list, eventually creating such head if not already
1299
+ * there.
1300
+ */
1301
+ if (!phead) {
1302
+ phead = kzalloc(sizeof(*phead), GFP_KERNEL);
1303
+ if (!phead) {
1304
+ kfree(rdev);
1305
+ ret = -ENOMEM;
1306
+ goto out;
1307
+ }
1308
+ INIT_LIST_HEAD(phead);
1309
+
1310
+ ret = idr_alloc(&scmi_requested_devices, (void *)phead,
1311
+ id_table->protocol_id,
1312
+ id_table->protocol_id + 1, GFP_KERNEL);
1313
+ if (ret != id_table->protocol_id) {
1314
+ pr_err("Failed to save SCMI device - ret:%d\n", ret);
1315
+ kfree(rdev);
1316
+ kfree(phead);
1317
+ ret = -EINVAL;
1318
+ goto out;
1319
+ }
1320
+ ret = 0;
1321
+ }
1322
+ list_add(&rdev->node, phead);
1323
+
1324
+ /*
1325
+ * Now effectively create and initialize the requested device for every
1326
+ * already initialized SCMI instance which has registered the requested
1327
+ * protocol as a valid active one: i.e. defined in DT and supported by
1328
+ * current platform FW.
1329
+ */
1330
+ mutex_lock(&scmi_list_mutex);
1331
+ list_for_each_entry(info, &scmi_list, node) {
1332
+ struct device_node *child;
1333
+
1334
+ child = idr_find(&info->active_protocols,
1335
+ id_table->protocol_id);
1336
+ if (child) {
1337
+ struct scmi_device *sdev;
1338
+
1339
+ sdev = scmi_get_protocol_device(child, info,
1340
+ id_table->protocol_id,
1341
+ id_table->name);
1342
+ /* Set handle if not already set: device existed */
1343
+ if (sdev && !sdev->handle)
1344
+ sdev->handle = scmi_handle_get_from_info(info);
1345
+ } else {
1346
+ dev_err(info->dev,
1347
+ "Failed. SCMI protocol %d not active.\n",
1348
+ id_table->protocol_id);
1349
+ }
1350
+ }
1351
+ mutex_unlock(&scmi_list_mutex);
1352
+
1353
+out:
1354
+ mutex_unlock(&scmi_requested_devices_mtx);
1355
+
1356
+ return ret;
1357
+}
1358
+
1359
+/**
1360
+ * scmi_unrequest_protocol_device - Helper to unrequest a device
1361
+ *
1362
+ * @id_table: A protocol/name pair descriptor for the device to be unrequested.
1363
+ *
1364
+ * An helper to let an SCMI driver release its request about devices; note that
1365
+ * devices are created and initialized once the first SCMI driver request them
1366
+ * but they destroyed only on SCMI core unloading/unbinding.
1367
+ *
1368
+ * The current SCMI transport layer uses such devices as internal references and
1369
+ * as such they could be shared as same transport between multiple drivers so
1370
+ * that cannot be safely destroyed till the whole SCMI stack is removed.
1371
+ * (unless adding further burden of refcounting.)
1372
+ */
1373
+void scmi_unrequest_protocol_device(const struct scmi_device_id *id_table)
1374
+{
1375
+ struct list_head *phead;
1376
+
1377
+ pr_debug("Unrequesting SCMI device (%s) for protocol %x\n",
1378
+ id_table->name, id_table->protocol_id);
1379
+
1380
+ mutex_lock(&scmi_requested_devices_mtx);
1381
+ phead = idr_find(&scmi_requested_devices, id_table->protocol_id);
1382
+ if (phead) {
1383
+ struct scmi_requested_dev *victim, *tmp;
1384
+
1385
+ list_for_each_entry_safe(victim, tmp, phead, node) {
1386
+ if (!strcmp(victim->id_table->name, id_table->name)) {
1387
+ list_del(&victim->node);
1388
+ kfree(victim);
1389
+ break;
1390
+ }
1391
+ }
1392
+
1393
+ if (list_empty(phead)) {
1394
+ idr_remove(&scmi_requested_devices,
1395
+ id_table->protocol_id);
1396
+ kfree(phead);
1397
+ }
1398
+ }
1399
+ mutex_unlock(&scmi_requested_devices_mtx);
1400
+}
1401
+
1402
+static int scmi_cleanup_txrx_channels(struct scmi_info *info)
1403
+{
1404
+ int ret;
1405
+ struct idr *idr = &info->tx_idr;
1406
+
1407
+ ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
1408
+ idr_destroy(&info->tx_idr);
1409
+
1410
+ idr = &info->rx_idr;
1411
+ ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
1412
+ idr_destroy(&info->rx_idr);
1413
+
1414
+ return ret;
6811415 }
6821416
6831417 static int scmi_probe(struct platform_device *pdev)
....@@ -700,10 +1434,9 @@
7001434 info->dev = dev;
7011435 info->desc = desc;
7021436 INIT_LIST_HEAD(&info->node);
703
-
704
- ret = scmi_xfer_info_init(info);
705
- if (ret)
706
- return ret;
1437
+ idr_init(&info->protocols);
1438
+ mutex_init(&info->protocols_mtx);
1439
+ idr_init(&info->active_protocols);
7071440
7081441 platform_set_drvdata(pdev, info);
7091442 idr_init(&info->tx_idr);
....@@ -712,15 +1445,30 @@
7121445 handle = &info->handle;
7131446 handle->dev = info->dev;
7141447 handle->version = &info->version;
1448
+ handle->devm_acquire_protocol = scmi_devm_acquire_protocol;
1449
+ handle->devm_get_protocol = scmi_devm_get_protocol;
1450
+ handle->devm_put_protocol = scmi_devm_put_protocol;
7151451
7161452 ret = scmi_txrx_setup(info, dev, SCMI_PROTOCOL_BASE);
7171453 if (ret)
7181454 return ret;
7191455
720
- ret = scmi_base_protocol_init(handle);
1456
+ ret = scmi_xfer_info_init(info);
1457
+ if (ret)
1458
+ goto clear_txrx_setup;
1459
+
1460
+ if (scmi_notification_init(handle))
1461
+ dev_err(dev, "SCMI Notifications NOT available.\n");
1462
+
1463
+ /*
1464
+ * Trigger SCMI Base protocol initialization.
1465
+ * It's mandatory and won't be ever released/deinit until the
1466
+ * SCMI stack is shutdown/unloaded as a whole.
1467
+ */
1468
+ ret = scmi_acquire_protocol(handle, SCMI_PROTOCOL_BASE);
7211469 if (ret) {
722
- dev_err(dev, "unable to communicate with SCMI(%d)\n", ret);
723
- return ret;
1470
+ dev_err(dev, "unable to communicate with SCMI\n");
1471
+ goto notification_exit;
7241472 }
7251473
7261474 mutex_lock(&scmi_list_mutex);
....@@ -742,10 +1490,29 @@
7421490 continue;
7431491 }
7441492
1493
+ /*
1494
+ * Save this valid DT protocol descriptor amongst
1495
+ * @active_protocols for this SCMI instance/
1496
+ */
1497
+ ret = idr_alloc(&info->active_protocols, child,
1498
+ prot_id, prot_id + 1, GFP_KERNEL);
1499
+ if (ret != prot_id) {
1500
+ dev_err(dev, "SCMI protocol %d already activated. Skip\n",
1501
+ prot_id);
1502
+ continue;
1503
+ }
1504
+
1505
+ of_node_get(child);
7451506 scmi_create_protocol_devices(child, info, prot_id);
7461507 }
7471508
7481509 return 0;
1510
+
1511
+notification_exit:
1512
+ scmi_notification_exit(&info->handle);
1513
+clear_txrx_setup:
1514
+ scmi_cleanup_txrx_channels(info);
1515
+ return ret;
7491516 }
7501517
7511518 void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id)
....@@ -755,9 +1522,9 @@
7551522
7561523 static int scmi_remove(struct platform_device *pdev)
7571524 {
758
- int ret = 0;
1525
+ int ret = 0, id;
7591526 struct scmi_info *info = platform_get_drvdata(pdev);
760
- struct idr *idr = &info->tx_idr;
1527
+ struct device_node *child;
7611528
7621529 mutex_lock(&scmi_list_mutex);
7631530 if (info->users)
....@@ -769,21 +1536,72 @@
7691536 if (ret)
7701537 return ret;
7711538
1539
+ scmi_notification_exit(&info->handle);
1540
+
1541
+ mutex_lock(&info->protocols_mtx);
1542
+ idr_destroy(&info->protocols);
1543
+ mutex_unlock(&info->protocols_mtx);
1544
+
1545
+ idr_for_each_entry(&info->active_protocols, child, id)
1546
+ of_node_put(child);
1547
+ idr_destroy(&info->active_protocols);
1548
+
7721549 /* Safe to free channels since no more users */
773
- ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
774
- idr_destroy(&info->tx_idr);
775
-
776
- idr = &info->rx_idr;
777
- ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
778
- idr_destroy(&info->rx_idr);
779
-
780
- return ret;
1550
+ return scmi_cleanup_txrx_channels(info);
7811551 }
1552
+
1553
+static ssize_t protocol_version_show(struct device *dev,
1554
+ struct device_attribute *attr, char *buf)
1555
+{
1556
+ struct scmi_info *info = dev_get_drvdata(dev);
1557
+
1558
+ return sprintf(buf, "%u.%u\n", info->version.major_ver,
1559
+ info->version.minor_ver);
1560
+}
1561
+static DEVICE_ATTR_RO(protocol_version);
1562
+
1563
+static ssize_t firmware_version_show(struct device *dev,
1564
+ struct device_attribute *attr, char *buf)
1565
+{
1566
+ struct scmi_info *info = dev_get_drvdata(dev);
1567
+
1568
+ return sprintf(buf, "0x%x\n", info->version.impl_ver);
1569
+}
1570
+static DEVICE_ATTR_RO(firmware_version);
1571
+
1572
+static ssize_t vendor_id_show(struct device *dev,
1573
+ struct device_attribute *attr, char *buf)
1574
+{
1575
+ struct scmi_info *info = dev_get_drvdata(dev);
1576
+
1577
+ return sprintf(buf, "%s\n", info->version.vendor_id);
1578
+}
1579
+static DEVICE_ATTR_RO(vendor_id);
1580
+
1581
+static ssize_t sub_vendor_id_show(struct device *dev,
1582
+ struct device_attribute *attr, char *buf)
1583
+{
1584
+ struct scmi_info *info = dev_get_drvdata(dev);
1585
+
1586
+ return sprintf(buf, "%s\n", info->version.sub_vendor_id);
1587
+}
1588
+static DEVICE_ATTR_RO(sub_vendor_id);
1589
+
1590
+static struct attribute *versions_attrs[] = {
1591
+ &dev_attr_firmware_version.attr,
1592
+ &dev_attr_protocol_version.attr,
1593
+ &dev_attr_vendor_id.attr,
1594
+ &dev_attr_sub_vendor_id.attr,
1595
+ NULL,
1596
+};
1597
+ATTRIBUTE_GROUPS(versions);
7821598
7831599 /* Each compatible listed below must have descriptor associated with it */
7841600 static const struct of_device_id scmi_of_match[] = {
1601
+#ifdef CONFIG_MAILBOX
7851602 { .compatible = "arm,scmi", .data = &scmi_mailbox_desc },
786
-#ifdef CONFIG_HAVE_ARM_SMCCC
1603
+#endif
1604
+#ifdef CONFIG_HAVE_ARM_SMCCC_DISCOVERY
7871605 { .compatible = "arm,scmi-smc", .data = &scmi_smc_desc},
7881606 #endif
7891607 { /* Sentinel */ },
....@@ -794,7 +1612,9 @@
7941612 static struct platform_driver scmi_driver = {
7951613 .driver = {
7961614 .name = "arm-scmi",
1615
+ .suppress_bind_attrs = true,
7971616 .of_match_table = scmi_of_match,
1617
+ .dev_groups = versions_groups,
7981618 },
7991619 .probe = scmi_probe,
8001620 .remove = scmi_remove,
....@@ -804,11 +1624,15 @@
8041624 {
8051625 scmi_bus_init();
8061626
1627
+ scmi_base_register();
1628
+
8071629 scmi_clock_register();
8081630 scmi_perf_register();
8091631 scmi_power_register();
8101632 scmi_reset_register();
8111633 scmi_sensors_register();
1634
+ scmi_voltage_register();
1635
+ scmi_system_register();
8121636
8131637 return platform_driver_register(&scmi_driver);
8141638 }
....@@ -816,19 +1640,23 @@
8161640
8171641 static void __exit scmi_driver_exit(void)
8181642 {
819
- scmi_bus_exit();
1643
+ scmi_base_unregister();
8201644
8211645 scmi_clock_unregister();
8221646 scmi_perf_unregister();
8231647 scmi_power_unregister();
8241648 scmi_reset_unregister();
8251649 scmi_sensors_unregister();
1650
+ scmi_voltage_unregister();
1651
+ scmi_system_unregister();
1652
+
1653
+ scmi_bus_exit();
8261654
8271655 platform_driver_unregister(&scmi_driver);
8281656 }
8291657 module_exit(scmi_driver_exit);
8301658
831
-MODULE_ALIAS("platform: arm-scmi");
1659
+MODULE_ALIAS("platform:arm-scmi");
8321660 MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
8331661 MODULE_DESCRIPTION("ARM SCMI protocol driver");
8341662 MODULE_LICENSE("GPL v2");