hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/drivers/firmware/ti_sci.c
....@@ -2,7 +2,7 @@
22 /*
33 * Texas Instruments System Control Interface Protocol Driver
44 *
5
- * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/
5
+ * Copyright (C) 2015-2016 Texas Instruments Incorporated - https://www.ti.com/
66 * Nishanth Menon
77 */
88
....@@ -66,14 +66,14 @@
6666
6767 /**
6868 * struct ti_sci_desc - Description of SoC integration
69
- * @host_id: Host identifier representing the compute entity
69
+ * @default_host_id: Host identifier representing the compute entity
7070 * @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds)
7171 * @max_msgs: Maximum number of messages that can be pending
7272 * simultaneously in the system
7373 * @max_msg_size: Maximum size of data per message that can be handled.
7474 */
7575 struct ti_sci_desc {
76
- u8 host_id;
76
+ u8 default_host_id;
7777 int max_rx_timeout_ms;
7878 int max_msgs;
7979 int max_msg_size;
....@@ -94,6 +94,7 @@
9494 * @chan_rx: Receive mailbox channel
9595 * @minfo: Message info
9696 * @node: list head
97
+ * @host_id: Host ID
9798 * @users: Number of users of this instance
9899 */
99100 struct ti_sci_info {
....@@ -110,6 +111,7 @@
110111 struct mbox_chan *chan_rx;
111112 struct ti_sci_xfers_info minfo;
112113 struct list_head node;
114
+ u8 host_id;
113115 /* protected by ti_sci_list_mutex */
114116 int users;
115117
....@@ -144,25 +146,8 @@
144146 return 0;
145147 }
146148
147
-/**
148
- * ti_sci_debug_open() - debug file open
149
- * @inode: inode pointer
150
- * @file: file pointer
151
- *
152
- * Return: result of single_open
153
- */
154
-static int ti_sci_debug_open(struct inode *inode, struct file *file)
155
-{
156
- return single_open(file, ti_sci_debug_show, inode->i_private);
157
-}
158
-
159
-/* log file operations */
160
-static const struct file_operations ti_sci_debug_fops = {
161
- .open = ti_sci_debug_open,
162
- .read = seq_read,
163
- .llseek = seq_lseek,
164
- .release = single_release,
165
-};
149
+/* Provide the log file operations interface*/
150
+DEFINE_SHOW_ATTRIBUTE(ti_sci_debug);
166151
167152 /**
168153 * ti_sci_debugfs_create() - Create log debug file
....@@ -370,7 +355,7 @@
370355
371356 hdr->seq = xfer_id;
372357 hdr->type = msg_type;
373
- hdr->host = info->desc->host_id;
358
+ hdr->host = info->host_id;
374359 hdr->flags = msg_flags;
375360
376361 return xfer;
....@@ -632,6 +617,7 @@
632617
633618 /**
634619 * ti_sci_cmd_get_device() - command to request for device managed by TISCI
620
+ * that can be shared with other hosts.
635621 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
636622 * @id: Device Identifier
637623 *
....@@ -639,11 +625,29 @@
639625 * usage count by balancing get_device with put_device. No refcounting is
640626 * managed by driver for that purpose.
641627 *
642
- * NOTE: The request is for exclusive access for the processor.
643
- *
644628 * Return: 0 if all went fine, else return appropriate error.
645629 */
646630 static int ti_sci_cmd_get_device(const struct ti_sci_handle *handle, u32 id)
631
+{
632
+ return ti_sci_set_device_state(handle, id, 0,
633
+ MSG_DEVICE_SW_STATE_ON);
634
+}
635
+
636
+/**
637
+ * ti_sci_cmd_get_device_exclusive() - command to request for device managed by
638
+ * TISCI that is exclusively owned by the
639
+ * requesting host.
640
+ * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
641
+ * @id: Device Identifier
642
+ *
643
+ * Request for the device - NOTE: the client MUST maintain integrity of
644
+ * usage count by balancing get_device with put_device. No refcounting is
645
+ * managed by driver for that purpose.
646
+ *
647
+ * Return: 0 if all went fine, else return appropriate error.
648
+ */
649
+static int ti_sci_cmd_get_device_exclusive(const struct ti_sci_handle *handle,
650
+ u32 id)
647651 {
648652 return ti_sci_set_device_state(handle, id,
649653 MSG_FLAG_DEVICE_EXCLUSIVE,
....@@ -662,6 +666,26 @@
662666 * Return: 0 if all went fine, else return appropriate error.
663667 */
664668 static int ti_sci_cmd_idle_device(const struct ti_sci_handle *handle, u32 id)
669
+{
670
+ return ti_sci_set_device_state(handle, id, 0,
671
+ MSG_DEVICE_SW_STATE_RETENTION);
672
+}
673
+
674
+/**
675
+ * ti_sci_cmd_idle_device_exclusive() - Command to idle a device managed by
676
+ * TISCI that is exclusively owned by
677
+ * requesting host.
678
+ * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
679
+ * @id: Device Identifier
680
+ *
681
+ * Request for the device - NOTE: the client MUST maintain integrity of
682
+ * usage count by balancing get_device with put_device. No refcounting is
683
+ * managed by driver for that purpose.
684
+ *
685
+ * Return: 0 if all went fine, else return appropriate error.
686
+ */
687
+static int ti_sci_cmd_idle_device_exclusive(const struct ti_sci_handle *handle,
688
+ u32 id)
665689 {
666690 return ti_sci_set_device_state(handle, id,
667691 MSG_FLAG_DEVICE_EXCLUSIVE,
....@@ -913,7 +937,7 @@
913937 * Return: 0 if all went well, else returns appropriate error value.
914938 */
915939 static int ti_sci_set_clock_state(const struct ti_sci_handle *handle,
916
- u32 dev_id, u8 clk_id,
940
+ u32 dev_id, u32 clk_id,
917941 u32 flags, u8 state)
918942 {
919943 struct ti_sci_info *info;
....@@ -941,7 +965,12 @@
941965 }
942966 req = (struct ti_sci_msg_req_set_clock_state *)xfer->xfer_buf;
943967 req->dev_id = dev_id;
944
- req->clk_id = clk_id;
968
+ if (clk_id < 255) {
969
+ req->clk_id = clk_id;
970
+ } else {
971
+ req->clk_id = 255;
972
+ req->clk_id_32 = clk_id;
973
+ }
945974 req->request_state = state;
946975
947976 ret = ti_sci_do_xfer(info, xfer);
....@@ -973,7 +1002,7 @@
9731002 * Return: 0 if all went well, else returns appropriate error value.
9741003 */
9751004 static int ti_sci_cmd_get_clock_state(const struct ti_sci_handle *handle,
976
- u32 dev_id, u8 clk_id,
1005
+ u32 dev_id, u32 clk_id,
9771006 u8 *programmed_state, u8 *current_state)
9781007 {
9791008 struct ti_sci_info *info;
....@@ -1004,7 +1033,12 @@
10041033 }
10051034 req = (struct ti_sci_msg_req_get_clock_state *)xfer->xfer_buf;
10061035 req->dev_id = dev_id;
1007
- req->clk_id = clk_id;
1036
+ if (clk_id < 255) {
1037
+ req->clk_id = clk_id;
1038
+ } else {
1039
+ req->clk_id = 255;
1040
+ req->clk_id_32 = clk_id;
1041
+ }
10081042
10091043 ret = ti_sci_do_xfer(info, xfer);
10101044 if (ret) {
....@@ -1044,8 +1078,8 @@
10441078 * Return: 0 if all went well, else returns appropriate error value.
10451079 */
10461080 static int ti_sci_cmd_get_clock(const struct ti_sci_handle *handle, u32 dev_id,
1047
- u8 clk_id, bool needs_ssc, bool can_change_freq,
1048
- bool enable_input_term)
1081
+ u32 clk_id, bool needs_ssc,
1082
+ bool can_change_freq, bool enable_input_term)
10491083 {
10501084 u32 flags = 0;
10511085
....@@ -1070,9 +1104,10 @@
10701104 * Return: 0 if all went well, else returns appropriate error value.
10711105 */
10721106 static int ti_sci_cmd_idle_clock(const struct ti_sci_handle *handle,
1073
- u32 dev_id, u8 clk_id)
1107
+ u32 dev_id, u32 clk_id)
10741108 {
1075
- return ti_sci_set_clock_state(handle, dev_id, clk_id, 0,
1109
+ return ti_sci_set_clock_state(handle, dev_id, clk_id,
1110
+ MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE,
10761111 MSG_CLOCK_SW_STATE_UNREQ);
10771112 }
10781113
....@@ -1089,9 +1124,10 @@
10891124 * Return: 0 if all went well, else returns appropriate error value.
10901125 */
10911126 static int ti_sci_cmd_put_clock(const struct ti_sci_handle *handle,
1092
- u32 dev_id, u8 clk_id)
1127
+ u32 dev_id, u32 clk_id)
10931128 {
1094
- return ti_sci_set_clock_state(handle, dev_id, clk_id, 0,
1129
+ return ti_sci_set_clock_state(handle, dev_id, clk_id,
1130
+ MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE,
10951131 MSG_CLOCK_SW_STATE_AUTO);
10961132 }
10971133
....@@ -1107,7 +1143,7 @@
11071143 * Return: 0 if all went well, else returns appropriate error value.
11081144 */
11091145 static int ti_sci_cmd_clk_is_auto(const struct ti_sci_handle *handle,
1110
- u32 dev_id, u8 clk_id, bool *req_state)
1146
+ u32 dev_id, u32 clk_id, bool *req_state)
11111147 {
11121148 u8 state = 0;
11131149 int ret;
....@@ -1136,7 +1172,7 @@
11361172 * Return: 0 if all went well, else returns appropriate error value.
11371173 */
11381174 static int ti_sci_cmd_clk_is_on(const struct ti_sci_handle *handle, u32 dev_id,
1139
- u8 clk_id, bool *req_state, bool *curr_state)
1175
+ u32 clk_id, bool *req_state, bool *curr_state)
11401176 {
11411177 u8 c_state = 0, r_state = 0;
11421178 int ret;
....@@ -1169,7 +1205,7 @@
11691205 * Return: 0 if all went well, else returns appropriate error value.
11701206 */
11711207 static int ti_sci_cmd_clk_is_off(const struct ti_sci_handle *handle, u32 dev_id,
1172
- u8 clk_id, bool *req_state, bool *curr_state)
1208
+ u32 clk_id, bool *req_state, bool *curr_state)
11731209 {
11741210 u8 c_state = 0, r_state = 0;
11751211 int ret;
....@@ -1201,7 +1237,7 @@
12011237 * Return: 0 if all went well, else returns appropriate error value.
12021238 */
12031239 static int ti_sci_cmd_clk_set_parent(const struct ti_sci_handle *handle,
1204
- u32 dev_id, u8 clk_id, u8 parent_id)
1240
+ u32 dev_id, u32 clk_id, u32 parent_id)
12051241 {
12061242 struct ti_sci_info *info;
12071243 struct ti_sci_msg_req_set_clock_parent *req;
....@@ -1228,8 +1264,18 @@
12281264 }
12291265 req = (struct ti_sci_msg_req_set_clock_parent *)xfer->xfer_buf;
12301266 req->dev_id = dev_id;
1231
- req->clk_id = clk_id;
1232
- req->parent_id = parent_id;
1267
+ if (clk_id < 255) {
1268
+ req->clk_id = clk_id;
1269
+ } else {
1270
+ req->clk_id = 255;
1271
+ req->clk_id_32 = clk_id;
1272
+ }
1273
+ if (parent_id < 255) {
1274
+ req->parent_id = parent_id;
1275
+ } else {
1276
+ req->parent_id = 255;
1277
+ req->parent_id_32 = parent_id;
1278
+ }
12331279
12341280 ret = ti_sci_do_xfer(info, xfer);
12351281 if (ret) {
....@@ -1259,7 +1305,7 @@
12591305 * Return: 0 if all went well, else returns appropriate error value.
12601306 */
12611307 static int ti_sci_cmd_clk_get_parent(const struct ti_sci_handle *handle,
1262
- u32 dev_id, u8 clk_id, u8 *parent_id)
1308
+ u32 dev_id, u32 clk_id, u32 *parent_id)
12631309 {
12641310 struct ti_sci_info *info;
12651311 struct ti_sci_msg_req_get_clock_parent *req;
....@@ -1286,7 +1332,12 @@
12861332 }
12871333 req = (struct ti_sci_msg_req_get_clock_parent *)xfer->xfer_buf;
12881334 req->dev_id = dev_id;
1289
- req->clk_id = clk_id;
1335
+ if (clk_id < 255) {
1336
+ req->clk_id = clk_id;
1337
+ } else {
1338
+ req->clk_id = 255;
1339
+ req->clk_id_32 = clk_id;
1340
+ }
12901341
12911342 ret = ti_sci_do_xfer(info, xfer);
12921343 if (ret) {
....@@ -1296,10 +1347,14 @@
12961347
12971348 resp = (struct ti_sci_msg_resp_get_clock_parent *)xfer->xfer_buf;
12981349
1299
- if (!ti_sci_is_response_ack(resp))
1350
+ if (!ti_sci_is_response_ack(resp)) {
13001351 ret = -ENODEV;
1301
- else
1302
- *parent_id = resp->parent_id;
1352
+ } else {
1353
+ if (resp->parent_id < 255)
1354
+ *parent_id = resp->parent_id;
1355
+ else
1356
+ *parent_id = resp->parent_id_32;
1357
+ }
13031358
13041359 fail:
13051360 ti_sci_put_one_xfer(&info->minfo, xfer);
....@@ -1319,8 +1374,8 @@
13191374 * Return: 0 if all went well, else returns appropriate error value.
13201375 */
13211376 static int ti_sci_cmd_clk_get_num_parents(const struct ti_sci_handle *handle,
1322
- u32 dev_id, u8 clk_id,
1323
- u8 *num_parents)
1377
+ u32 dev_id, u32 clk_id,
1378
+ u32 *num_parents)
13241379 {
13251380 struct ti_sci_info *info;
13261381 struct ti_sci_msg_req_get_clock_num_parents *req;
....@@ -1347,7 +1402,12 @@
13471402 }
13481403 req = (struct ti_sci_msg_req_get_clock_num_parents *)xfer->xfer_buf;
13491404 req->dev_id = dev_id;
1350
- req->clk_id = clk_id;
1405
+ if (clk_id < 255) {
1406
+ req->clk_id = clk_id;
1407
+ } else {
1408
+ req->clk_id = 255;
1409
+ req->clk_id_32 = clk_id;
1410
+ }
13511411
13521412 ret = ti_sci_do_xfer(info, xfer);
13531413 if (ret) {
....@@ -1357,10 +1417,14 @@
13571417
13581418 resp = (struct ti_sci_msg_resp_get_clock_num_parents *)xfer->xfer_buf;
13591419
1360
- if (!ti_sci_is_response_ack(resp))
1420
+ if (!ti_sci_is_response_ack(resp)) {
13611421 ret = -ENODEV;
1362
- else
1363
- *num_parents = resp->num_parents;
1422
+ } else {
1423
+ if (resp->num_parents < 255)
1424
+ *num_parents = resp->num_parents;
1425
+ else
1426
+ *num_parents = resp->num_parents_32;
1427
+ }
13641428
13651429 fail:
13661430 ti_sci_put_one_xfer(&info->minfo, xfer);
....@@ -1388,7 +1452,7 @@
13881452 * Return: 0 if all went well, else returns appropriate error value.
13891453 */
13901454 static int ti_sci_cmd_clk_get_match_freq(const struct ti_sci_handle *handle,
1391
- u32 dev_id, u8 clk_id, u64 min_freq,
1455
+ u32 dev_id, u32 clk_id, u64 min_freq,
13921456 u64 target_freq, u64 max_freq,
13931457 u64 *match_freq)
13941458 {
....@@ -1417,7 +1481,12 @@
14171481 }
14181482 req = (struct ti_sci_msg_req_query_clock_freq *)xfer->xfer_buf;
14191483 req->dev_id = dev_id;
1420
- req->clk_id = clk_id;
1484
+ if (clk_id < 255) {
1485
+ req->clk_id = clk_id;
1486
+ } else {
1487
+ req->clk_id = 255;
1488
+ req->clk_id_32 = clk_id;
1489
+ }
14211490 req->min_freq_hz = min_freq;
14221491 req->target_freq_hz = target_freq;
14231492 req->max_freq_hz = max_freq;
....@@ -1460,7 +1529,7 @@
14601529 * Return: 0 if all went well, else returns appropriate error value.
14611530 */
14621531 static int ti_sci_cmd_clk_set_freq(const struct ti_sci_handle *handle,
1463
- u32 dev_id, u8 clk_id, u64 min_freq,
1532
+ u32 dev_id, u32 clk_id, u64 min_freq,
14641533 u64 target_freq, u64 max_freq)
14651534 {
14661535 struct ti_sci_info *info;
....@@ -1488,7 +1557,12 @@
14881557 }
14891558 req = (struct ti_sci_msg_req_set_clock_freq *)xfer->xfer_buf;
14901559 req->dev_id = dev_id;
1491
- req->clk_id = clk_id;
1560
+ if (clk_id < 255) {
1561
+ req->clk_id = clk_id;
1562
+ } else {
1563
+ req->clk_id = 255;
1564
+ req->clk_id_32 = clk_id;
1565
+ }
14921566 req->min_freq_hz = min_freq;
14931567 req->target_freq_hz = target_freq;
14941568 req->max_freq_hz = max_freq;
....@@ -1521,7 +1595,7 @@
15211595 * Return: 0 if all went well, else returns appropriate error value.
15221596 */
15231597 static int ti_sci_cmd_clk_get_freq(const struct ti_sci_handle *handle,
1524
- u32 dev_id, u8 clk_id, u64 *freq)
1598
+ u32 dev_id, u32 clk_id, u64 *freq)
15251599 {
15261600 struct ti_sci_info *info;
15271601 struct ti_sci_msg_req_get_clock_freq *req;
....@@ -1548,7 +1622,12 @@
15481622 }
15491623 req = (struct ti_sci_msg_req_get_clock_freq *)xfer->xfer_buf;
15501624 req->dev_id = dev_id;
1551
- req->clk_id = clk_id;
1625
+ if (clk_id < 255) {
1626
+ req->clk_id = clk_id;
1627
+ } else {
1628
+ req->clk_id = 255;
1629
+ req->clk_id_32 = clk_id;
1630
+ }
15521631
15531632 ret = ti_sci_do_xfer(info, xfer);
15541633 if (ret) {
....@@ -1615,6 +1694,1175 @@
16151694 return ret;
16161695 }
16171696
1697
+/**
1698
+ * ti_sci_get_resource_range - Helper to get a range of resources assigned
1699
+ * to a host. Resource is uniquely identified by
1700
+ * type and subtype.
1701
+ * @handle: Pointer to TISCI handle.
1702
+ * @dev_id: TISCI device ID.
1703
+ * @subtype: Resource assignment subtype that is being requested
1704
+ * from the given device.
1705
+ * @s_host: Host processor ID to which the resources are allocated
1706
+ * @range_start: Start index of the resource range
1707
+ * @range_num: Number of resources in the range
1708
+ *
1709
+ * Return: 0 if all went fine, else return appropriate error.
1710
+ */
1711
+static int ti_sci_get_resource_range(const struct ti_sci_handle *handle,
1712
+ u32 dev_id, u8 subtype, u8 s_host,
1713
+ u16 *range_start, u16 *range_num)
1714
+{
1715
+ struct ti_sci_msg_resp_get_resource_range *resp;
1716
+ struct ti_sci_msg_req_get_resource_range *req;
1717
+ struct ti_sci_xfer *xfer;
1718
+ struct ti_sci_info *info;
1719
+ struct device *dev;
1720
+ int ret = 0;
1721
+
1722
+ if (IS_ERR(handle))
1723
+ return PTR_ERR(handle);
1724
+ if (!handle)
1725
+ return -EINVAL;
1726
+
1727
+ info = handle_to_ti_sci_info(handle);
1728
+ dev = info->dev;
1729
+
1730
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_RESOURCE_RANGE,
1731
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1732
+ sizeof(*req), sizeof(*resp));
1733
+ if (IS_ERR(xfer)) {
1734
+ ret = PTR_ERR(xfer);
1735
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
1736
+ return ret;
1737
+ }
1738
+
1739
+ req = (struct ti_sci_msg_req_get_resource_range *)xfer->xfer_buf;
1740
+ req->secondary_host = s_host;
1741
+ req->type = dev_id & MSG_RM_RESOURCE_TYPE_MASK;
1742
+ req->subtype = subtype & MSG_RM_RESOURCE_SUBTYPE_MASK;
1743
+
1744
+ ret = ti_sci_do_xfer(info, xfer);
1745
+ if (ret) {
1746
+ dev_err(dev, "Mbox send fail %d\n", ret);
1747
+ goto fail;
1748
+ }
1749
+
1750
+ resp = (struct ti_sci_msg_resp_get_resource_range *)xfer->xfer_buf;
1751
+
1752
+ if (!ti_sci_is_response_ack(resp)) {
1753
+ ret = -ENODEV;
1754
+ } else if (!resp->range_start && !resp->range_num) {
1755
+ ret = -ENODEV;
1756
+ } else {
1757
+ *range_start = resp->range_start;
1758
+ *range_num = resp->range_num;
1759
+ };
1760
+
1761
+fail:
1762
+ ti_sci_put_one_xfer(&info->minfo, xfer);
1763
+
1764
+ return ret;
1765
+}
1766
+
1767
+/**
1768
+ * ti_sci_cmd_get_resource_range - Get a range of resources assigned to host
1769
+ * that is same as ti sci interface host.
1770
+ * @handle: Pointer to TISCI handle.
1771
+ * @dev_id: TISCI device ID.
1772
+ * @subtype: Resource assignment subtype that is being requested
1773
+ * from the given device.
1774
+ * @range_start: Start index of the resource range
1775
+ * @range_num: Number of resources in the range
1776
+ *
1777
+ * Return: 0 if all went fine, else return appropriate error.
1778
+ */
1779
+static int ti_sci_cmd_get_resource_range(const struct ti_sci_handle *handle,
1780
+ u32 dev_id, u8 subtype,
1781
+ u16 *range_start, u16 *range_num)
1782
+{
1783
+ return ti_sci_get_resource_range(handle, dev_id, subtype,
1784
+ TI_SCI_IRQ_SECONDARY_HOST_INVALID,
1785
+ range_start, range_num);
1786
+}
1787
+
1788
+/**
1789
+ * ti_sci_cmd_get_resource_range_from_shost - Get a range of resources
1790
+ * assigned to a specified host.
1791
+ * @handle: Pointer to TISCI handle.
1792
+ * @dev_id: TISCI device ID.
1793
+ * @subtype: Resource assignment subtype that is being requested
1794
+ * from the given device.
1795
+ * @s_host: Host processor ID to which the resources are allocated
1796
+ * @range_start: Start index of the resource range
1797
+ * @range_num: Number of resources in the range
1798
+ *
1799
+ * Return: 0 if all went fine, else return appropriate error.
1800
+ */
1801
+static
1802
+int ti_sci_cmd_get_resource_range_from_shost(const struct ti_sci_handle *handle,
1803
+ u32 dev_id, u8 subtype, u8 s_host,
1804
+ u16 *range_start, u16 *range_num)
1805
+{
1806
+ return ti_sci_get_resource_range(handle, dev_id, subtype, s_host,
1807
+ range_start, range_num);
1808
+}
1809
+
1810
+/**
1811
+ * ti_sci_manage_irq() - Helper api to configure/release the irq route between
1812
+ * the requested source and destination
1813
+ * @handle: Pointer to TISCI handle.
1814
+ * @valid_params: Bit fields defining the validity of certain params
1815
+ * @src_id: Device ID of the IRQ source
1816
+ * @src_index: IRQ source index within the source device
1817
+ * @dst_id: Device ID of the IRQ destination
1818
+ * @dst_host_irq: IRQ number of the destination device
1819
+ * @ia_id: Device ID of the IA, if the IRQ flows through this IA
1820
+ * @vint: Virtual interrupt to be used within the IA
1821
+ * @global_event: Global event number to be used for the requesting event
1822
+ * @vint_status_bit: Virtual interrupt status bit to be used for the event
1823
+ * @s_host: Secondary host ID to which the irq/event is being
1824
+ * requested for.
1825
+ * @type: Request type irq set or release.
1826
+ *
1827
+ * Return: 0 if all went fine, else return appropriate error.
1828
+ */
1829
+static int ti_sci_manage_irq(const struct ti_sci_handle *handle,
1830
+ u32 valid_params, u16 src_id, u16 src_index,
1831
+ u16 dst_id, u16 dst_host_irq, u16 ia_id, u16 vint,
1832
+ u16 global_event, u8 vint_status_bit, u8 s_host,
1833
+ u16 type)
1834
+{
1835
+ struct ti_sci_msg_req_manage_irq *req;
1836
+ struct ti_sci_msg_hdr *resp;
1837
+ struct ti_sci_xfer *xfer;
1838
+ struct ti_sci_info *info;
1839
+ struct device *dev;
1840
+ int ret = 0;
1841
+
1842
+ if (IS_ERR(handle))
1843
+ return PTR_ERR(handle);
1844
+ if (!handle)
1845
+ return -EINVAL;
1846
+
1847
+ info = handle_to_ti_sci_info(handle);
1848
+ dev = info->dev;
1849
+
1850
+ xfer = ti_sci_get_one_xfer(info, type, TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1851
+ sizeof(*req), sizeof(*resp));
1852
+ if (IS_ERR(xfer)) {
1853
+ ret = PTR_ERR(xfer);
1854
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
1855
+ return ret;
1856
+ }
1857
+ req = (struct ti_sci_msg_req_manage_irq *)xfer->xfer_buf;
1858
+ req->valid_params = valid_params;
1859
+ req->src_id = src_id;
1860
+ req->src_index = src_index;
1861
+ req->dst_id = dst_id;
1862
+ req->dst_host_irq = dst_host_irq;
1863
+ req->ia_id = ia_id;
1864
+ req->vint = vint;
1865
+ req->global_event = global_event;
1866
+ req->vint_status_bit = vint_status_bit;
1867
+ req->secondary_host = s_host;
1868
+
1869
+ ret = ti_sci_do_xfer(info, xfer);
1870
+ if (ret) {
1871
+ dev_err(dev, "Mbox send fail %d\n", ret);
1872
+ goto fail;
1873
+ }
1874
+
1875
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
1876
+
1877
+ ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
1878
+
1879
+fail:
1880
+ ti_sci_put_one_xfer(&info->minfo, xfer);
1881
+
1882
+ return ret;
1883
+}
1884
+
1885
+/**
1886
+ * ti_sci_set_irq() - Helper api to configure the irq route between the
1887
+ * requested source and destination
1888
+ * @handle: Pointer to TISCI handle.
1889
+ * @valid_params: Bit fields defining the validity of certain params
1890
+ * @src_id: Device ID of the IRQ source
1891
+ * @src_index: IRQ source index within the source device
1892
+ * @dst_id: Device ID of the IRQ destination
1893
+ * @dst_host_irq: IRQ number of the destination device
1894
+ * @ia_id: Device ID of the IA, if the IRQ flows through this IA
1895
+ * @vint: Virtual interrupt to be used within the IA
1896
+ * @global_event: Global event number to be used for the requesting event
1897
+ * @vint_status_bit: Virtual interrupt status bit to be used for the event
1898
+ * @s_host: Secondary host ID to which the irq/event is being
1899
+ * requested for.
1900
+ *
1901
+ * Return: 0 if all went fine, else return appropriate error.
1902
+ */
1903
+static int ti_sci_set_irq(const struct ti_sci_handle *handle, u32 valid_params,
1904
+ u16 src_id, u16 src_index, u16 dst_id,
1905
+ u16 dst_host_irq, u16 ia_id, u16 vint,
1906
+ u16 global_event, u8 vint_status_bit, u8 s_host)
1907
+{
1908
+ pr_debug("%s: IRQ set with valid_params = 0x%x from src = %d, index = %d, to dst = %d, irq = %d,via ia_id = %d, vint = %d, global event = %d,status_bit = %d\n",
1909
+ __func__, valid_params, src_id, src_index,
1910
+ dst_id, dst_host_irq, ia_id, vint, global_event,
1911
+ vint_status_bit);
1912
+
1913
+ return ti_sci_manage_irq(handle, valid_params, src_id, src_index,
1914
+ dst_id, dst_host_irq, ia_id, vint,
1915
+ global_event, vint_status_bit, s_host,
1916
+ TI_SCI_MSG_SET_IRQ);
1917
+}
1918
+
1919
+/**
1920
+ * ti_sci_free_irq() - Helper api to free the irq route between the
1921
+ * requested source and destination
1922
+ * @handle: Pointer to TISCI handle.
1923
+ * @valid_params: Bit fields defining the validity of certain params
1924
+ * @src_id: Device ID of the IRQ source
1925
+ * @src_index: IRQ source index within the source device
1926
+ * @dst_id: Device ID of the IRQ destination
1927
+ * @dst_host_irq: IRQ number of the destination device
1928
+ * @ia_id: Device ID of the IA, if the IRQ flows through this IA
1929
+ * @vint: Virtual interrupt to be used within the IA
1930
+ * @global_event: Global event number to be used for the requesting event
1931
+ * @vint_status_bit: Virtual interrupt status bit to be used for the event
1932
+ * @s_host: Secondary host ID to which the irq/event is being
1933
+ * requested for.
1934
+ *
1935
+ * Return: 0 if all went fine, else return appropriate error.
1936
+ */
1937
+static int ti_sci_free_irq(const struct ti_sci_handle *handle, u32 valid_params,
1938
+ u16 src_id, u16 src_index, u16 dst_id,
1939
+ u16 dst_host_irq, u16 ia_id, u16 vint,
1940
+ u16 global_event, u8 vint_status_bit, u8 s_host)
1941
+{
1942
+ pr_debug("%s: IRQ release with valid_params = 0x%x from src = %d, index = %d, to dst = %d, irq = %d,via ia_id = %d, vint = %d, global event = %d,status_bit = %d\n",
1943
+ __func__, valid_params, src_id, src_index,
1944
+ dst_id, dst_host_irq, ia_id, vint, global_event,
1945
+ vint_status_bit);
1946
+
1947
+ return ti_sci_manage_irq(handle, valid_params, src_id, src_index,
1948
+ dst_id, dst_host_irq, ia_id, vint,
1949
+ global_event, vint_status_bit, s_host,
1950
+ TI_SCI_MSG_FREE_IRQ);
1951
+}
1952
+
1953
+/**
1954
+ * ti_sci_cmd_set_irq() - Configure a host irq route between the requested
1955
+ * source and destination.
1956
+ * @handle: Pointer to TISCI handle.
1957
+ * @src_id: Device ID of the IRQ source
1958
+ * @src_index: IRQ source index within the source device
1959
+ * @dst_id: Device ID of the IRQ destination
1960
+ * @dst_host_irq: IRQ number of the destination device
1961
+ * @vint_irq: Boolean specifying if this interrupt belongs to
1962
+ * Interrupt Aggregator.
1963
+ *
1964
+ * Return: 0 if all went fine, else return appropriate error.
1965
+ */
1966
+static int ti_sci_cmd_set_irq(const struct ti_sci_handle *handle, u16 src_id,
1967
+ u16 src_index, u16 dst_id, u16 dst_host_irq)
1968
+{
1969
+ u32 valid_params = MSG_FLAG_DST_ID_VALID | MSG_FLAG_DST_HOST_IRQ_VALID;
1970
+
1971
+ return ti_sci_set_irq(handle, valid_params, src_id, src_index, dst_id,
1972
+ dst_host_irq, 0, 0, 0, 0, 0);
1973
+}
1974
+
1975
+/**
1976
+ * ti_sci_cmd_set_event_map() - Configure an event based irq route between the
1977
+ * requested source and Interrupt Aggregator.
1978
+ * @handle: Pointer to TISCI handle.
1979
+ * @src_id: Device ID of the IRQ source
1980
+ * @src_index: IRQ source index within the source device
1981
+ * @ia_id: Device ID of the IA, if the IRQ flows through this IA
1982
+ * @vint: Virtual interrupt to be used within the IA
1983
+ * @global_event: Global event number to be used for the requesting event
1984
+ * @vint_status_bit: Virtual interrupt status bit to be used for the event
1985
+ *
1986
+ * Return: 0 if all went fine, else return appropriate error.
1987
+ */
1988
+static int ti_sci_cmd_set_event_map(const struct ti_sci_handle *handle,
1989
+ u16 src_id, u16 src_index, u16 ia_id,
1990
+ u16 vint, u16 global_event,
1991
+ u8 vint_status_bit)
1992
+{
1993
+ u32 valid_params = MSG_FLAG_IA_ID_VALID | MSG_FLAG_VINT_VALID |
1994
+ MSG_FLAG_GLB_EVNT_VALID |
1995
+ MSG_FLAG_VINT_STS_BIT_VALID;
1996
+
1997
+ return ti_sci_set_irq(handle, valid_params, src_id, src_index, 0, 0,
1998
+ ia_id, vint, global_event, vint_status_bit, 0);
1999
+}
2000
+
2001
+/**
2002
+ * ti_sci_cmd_free_irq() - Free a host irq route between the between the
2003
+ * requested source and destination.
2004
+ * @handle: Pointer to TISCI handle.
2005
+ * @src_id: Device ID of the IRQ source
2006
+ * @src_index: IRQ source index within the source device
2007
+ * @dst_id: Device ID of the IRQ destination
2008
+ * @dst_host_irq: IRQ number of the destination device
2009
+ * @vint_irq: Boolean specifying if this interrupt belongs to
2010
+ * Interrupt Aggregator.
2011
+ *
2012
+ * Return: 0 if all went fine, else return appropriate error.
2013
+ */
2014
+static int ti_sci_cmd_free_irq(const struct ti_sci_handle *handle, u16 src_id,
2015
+ u16 src_index, u16 dst_id, u16 dst_host_irq)
2016
+{
2017
+ u32 valid_params = MSG_FLAG_DST_ID_VALID | MSG_FLAG_DST_HOST_IRQ_VALID;
2018
+
2019
+ return ti_sci_free_irq(handle, valid_params, src_id, src_index, dst_id,
2020
+ dst_host_irq, 0, 0, 0, 0, 0);
2021
+}
2022
+
2023
+/**
2024
+ * ti_sci_cmd_free_event_map() - Free an event map between the requested source
2025
+ * and Interrupt Aggregator.
2026
+ * @handle: Pointer to TISCI handle.
2027
+ * @src_id: Device ID of the IRQ source
2028
+ * @src_index: IRQ source index within the source device
2029
+ * @ia_id: Device ID of the IA, if the IRQ flows through this IA
2030
+ * @vint: Virtual interrupt to be used within the IA
2031
+ * @global_event: Global event number to be used for the requesting event
2032
+ * @vint_status_bit: Virtual interrupt status bit to be used for the event
2033
+ *
2034
+ * Return: 0 if all went fine, else return appropriate error.
2035
+ */
2036
+static int ti_sci_cmd_free_event_map(const struct ti_sci_handle *handle,
2037
+ u16 src_id, u16 src_index, u16 ia_id,
2038
+ u16 vint, u16 global_event,
2039
+ u8 vint_status_bit)
2040
+{
2041
+ u32 valid_params = MSG_FLAG_IA_ID_VALID |
2042
+ MSG_FLAG_VINT_VALID | MSG_FLAG_GLB_EVNT_VALID |
2043
+ MSG_FLAG_VINT_STS_BIT_VALID;
2044
+
2045
+ return ti_sci_free_irq(handle, valid_params, src_id, src_index, 0, 0,
2046
+ ia_id, vint, global_event, vint_status_bit, 0);
2047
+}
2048
+
2049
+/**
2050
+ * ti_sci_cmd_ring_config() - configure RA ring
2051
+ * @handle: Pointer to TI SCI handle.
2052
+ * @valid_params: Bitfield defining validity of ring configuration
2053
+ * parameters
2054
+ * @nav_id: Device ID of Navigator Subsystem from which the ring is
2055
+ * allocated
2056
+ * @index: Ring index
2057
+ * @addr_lo: The ring base address lo 32 bits
2058
+ * @addr_hi: The ring base address hi 32 bits
2059
+ * @count: Number of ring elements
2060
+ * @mode: The mode of the ring
2061
+ * @size: The ring element size.
2062
+ * @order_id: Specifies the ring's bus order ID
2063
+ *
2064
+ * Return: 0 if all went well, else returns appropriate error value.
2065
+ *
2066
+ * See @ti_sci_msg_rm_ring_cfg_req for more info.
2067
+ */
2068
+static int ti_sci_cmd_ring_config(const struct ti_sci_handle *handle,
2069
+ u32 valid_params, u16 nav_id, u16 index,
2070
+ u32 addr_lo, u32 addr_hi, u32 count,
2071
+ u8 mode, u8 size, u8 order_id)
2072
+{
2073
+ struct ti_sci_msg_rm_ring_cfg_req *req;
2074
+ struct ti_sci_msg_hdr *resp;
2075
+ struct ti_sci_xfer *xfer;
2076
+ struct ti_sci_info *info;
2077
+ struct device *dev;
2078
+ int ret = 0;
2079
+
2080
+ if (IS_ERR_OR_NULL(handle))
2081
+ return -EINVAL;
2082
+
2083
+ info = handle_to_ti_sci_info(handle);
2084
+ dev = info->dev;
2085
+
2086
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_RING_CFG,
2087
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2088
+ sizeof(*req), sizeof(*resp));
2089
+ if (IS_ERR(xfer)) {
2090
+ ret = PTR_ERR(xfer);
2091
+ dev_err(dev, "RM_RA:Message config failed(%d)\n", ret);
2092
+ return ret;
2093
+ }
2094
+ req = (struct ti_sci_msg_rm_ring_cfg_req *)xfer->xfer_buf;
2095
+ req->valid_params = valid_params;
2096
+ req->nav_id = nav_id;
2097
+ req->index = index;
2098
+ req->addr_lo = addr_lo;
2099
+ req->addr_hi = addr_hi;
2100
+ req->count = count;
2101
+ req->mode = mode;
2102
+ req->size = size;
2103
+ req->order_id = order_id;
2104
+
2105
+ ret = ti_sci_do_xfer(info, xfer);
2106
+ if (ret) {
2107
+ dev_err(dev, "RM_RA:Mbox config send fail %d\n", ret);
2108
+ goto fail;
2109
+ }
2110
+
2111
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2112
+ ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2113
+
2114
+fail:
2115
+ ti_sci_put_one_xfer(&info->minfo, xfer);
2116
+ dev_dbg(dev, "RM_RA:config ring %u ret:%d\n", index, ret);
2117
+ return ret;
2118
+}
2119
+
2120
+/**
2121
+ * ti_sci_cmd_ring_get_config() - get RA ring configuration
2122
+ * @handle: Pointer to TI SCI handle.
2123
+ * @nav_id: Device ID of Navigator Subsystem from which the ring is
2124
+ * allocated
2125
+ * @index: Ring index
2126
+ * @addr_lo: Returns ring's base address lo 32 bits
2127
+ * @addr_hi: Returns ring's base address hi 32 bits
2128
+ * @count: Returns number of ring elements
2129
+ * @mode: Returns mode of the ring
2130
+ * @size: Returns ring element size
2131
+ * @order_id: Returns ring's bus order ID
2132
+ *
2133
+ * Return: 0 if all went well, else returns appropriate error value.
2134
+ *
2135
+ * See @ti_sci_msg_rm_ring_get_cfg_req for more info.
2136
+ */
2137
+static int ti_sci_cmd_ring_get_config(const struct ti_sci_handle *handle,
2138
+ u32 nav_id, u32 index, u8 *mode,
2139
+ u32 *addr_lo, u32 *addr_hi,
2140
+ u32 *count, u8 *size, u8 *order_id)
2141
+{
2142
+ struct ti_sci_msg_rm_ring_get_cfg_resp *resp;
2143
+ struct ti_sci_msg_rm_ring_get_cfg_req *req;
2144
+ struct ti_sci_xfer *xfer;
2145
+ struct ti_sci_info *info;
2146
+ struct device *dev;
2147
+ int ret = 0;
2148
+
2149
+ if (IS_ERR_OR_NULL(handle))
2150
+ return -EINVAL;
2151
+
2152
+ info = handle_to_ti_sci_info(handle);
2153
+ dev = info->dev;
2154
+
2155
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_RING_GET_CFG,
2156
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2157
+ sizeof(*req), sizeof(*resp));
2158
+ if (IS_ERR(xfer)) {
2159
+ ret = PTR_ERR(xfer);
2160
+ dev_err(dev,
2161
+ "RM_RA:Message get config failed(%d)\n", ret);
2162
+ return ret;
2163
+ }
2164
+ req = (struct ti_sci_msg_rm_ring_get_cfg_req *)xfer->xfer_buf;
2165
+ req->nav_id = nav_id;
2166
+ req->index = index;
2167
+
2168
+ ret = ti_sci_do_xfer(info, xfer);
2169
+ if (ret) {
2170
+ dev_err(dev, "RM_RA:Mbox get config send fail %d\n", ret);
2171
+ goto fail;
2172
+ }
2173
+
2174
+ resp = (struct ti_sci_msg_rm_ring_get_cfg_resp *)xfer->xfer_buf;
2175
+
2176
+ if (!ti_sci_is_response_ack(resp)) {
2177
+ ret = -ENODEV;
2178
+ } else {
2179
+ if (mode)
2180
+ *mode = resp->mode;
2181
+ if (addr_lo)
2182
+ *addr_lo = resp->addr_lo;
2183
+ if (addr_hi)
2184
+ *addr_hi = resp->addr_hi;
2185
+ if (count)
2186
+ *count = resp->count;
2187
+ if (size)
2188
+ *size = resp->size;
2189
+ if (order_id)
2190
+ *order_id = resp->order_id;
2191
+ };
2192
+
2193
+fail:
2194
+ ti_sci_put_one_xfer(&info->minfo, xfer);
2195
+ dev_dbg(dev, "RM_RA:get config ring %u ret:%d\n", index, ret);
2196
+ return ret;
2197
+}
2198
+
2199
+/**
2200
+ * ti_sci_cmd_rm_psil_pair() - Pair PSI-L source to destination thread
2201
+ * @handle: Pointer to TI SCI handle.
2202
+ * @nav_id: Device ID of Navigator Subsystem which should be used for
2203
+ * pairing
2204
+ * @src_thread: Source PSI-L thread ID
2205
+ * @dst_thread: Destination PSI-L thread ID
2206
+ *
2207
+ * Return: 0 if all went well, else returns appropriate error value.
2208
+ */
2209
+static int ti_sci_cmd_rm_psil_pair(const struct ti_sci_handle *handle,
2210
+ u32 nav_id, u32 src_thread, u32 dst_thread)
2211
+{
2212
+ struct ti_sci_msg_psil_pair *req;
2213
+ struct ti_sci_msg_hdr *resp;
2214
+ struct ti_sci_xfer *xfer;
2215
+ struct ti_sci_info *info;
2216
+ struct device *dev;
2217
+ int ret = 0;
2218
+
2219
+ if (IS_ERR(handle))
2220
+ return PTR_ERR(handle);
2221
+ if (!handle)
2222
+ return -EINVAL;
2223
+
2224
+ info = handle_to_ti_sci_info(handle);
2225
+ dev = info->dev;
2226
+
2227
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_PSIL_PAIR,
2228
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2229
+ sizeof(*req), sizeof(*resp));
2230
+ if (IS_ERR(xfer)) {
2231
+ ret = PTR_ERR(xfer);
2232
+ dev_err(dev, "RM_PSIL:Message reconfig failed(%d)\n", ret);
2233
+ return ret;
2234
+ }
2235
+ req = (struct ti_sci_msg_psil_pair *)xfer->xfer_buf;
2236
+ req->nav_id = nav_id;
2237
+ req->src_thread = src_thread;
2238
+ req->dst_thread = dst_thread;
2239
+
2240
+ ret = ti_sci_do_xfer(info, xfer);
2241
+ if (ret) {
2242
+ dev_err(dev, "RM_PSIL:Mbox send fail %d\n", ret);
2243
+ goto fail;
2244
+ }
2245
+
2246
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2247
+ ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2248
+
2249
+fail:
2250
+ ti_sci_put_one_xfer(&info->minfo, xfer);
2251
+
2252
+ return ret;
2253
+}
2254
+
2255
+/**
2256
+ * ti_sci_cmd_rm_psil_unpair() - Unpair PSI-L source from destination thread
2257
+ * @handle: Pointer to TI SCI handle.
2258
+ * @nav_id: Device ID of Navigator Subsystem which should be used for
2259
+ * unpairing
2260
+ * @src_thread: Source PSI-L thread ID
2261
+ * @dst_thread: Destination PSI-L thread ID
2262
+ *
2263
+ * Return: 0 if all went well, else returns appropriate error value.
2264
+ */
2265
+static int ti_sci_cmd_rm_psil_unpair(const struct ti_sci_handle *handle,
2266
+ u32 nav_id, u32 src_thread, u32 dst_thread)
2267
+{
2268
+ struct ti_sci_msg_psil_unpair *req;
2269
+ struct ti_sci_msg_hdr *resp;
2270
+ struct ti_sci_xfer *xfer;
2271
+ struct ti_sci_info *info;
2272
+ struct device *dev;
2273
+ int ret = 0;
2274
+
2275
+ if (IS_ERR(handle))
2276
+ return PTR_ERR(handle);
2277
+ if (!handle)
2278
+ return -EINVAL;
2279
+
2280
+ info = handle_to_ti_sci_info(handle);
2281
+ dev = info->dev;
2282
+
2283
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_PSIL_UNPAIR,
2284
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2285
+ sizeof(*req), sizeof(*resp));
2286
+ if (IS_ERR(xfer)) {
2287
+ ret = PTR_ERR(xfer);
2288
+ dev_err(dev, "RM_PSIL:Message reconfig failed(%d)\n", ret);
2289
+ return ret;
2290
+ }
2291
+ req = (struct ti_sci_msg_psil_unpair *)xfer->xfer_buf;
2292
+ req->nav_id = nav_id;
2293
+ req->src_thread = src_thread;
2294
+ req->dst_thread = dst_thread;
2295
+
2296
+ ret = ti_sci_do_xfer(info, xfer);
2297
+ if (ret) {
2298
+ dev_err(dev, "RM_PSIL:Mbox send fail %d\n", ret);
2299
+ goto fail;
2300
+ }
2301
+
2302
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2303
+ ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2304
+
2305
+fail:
2306
+ ti_sci_put_one_xfer(&info->minfo, xfer);
2307
+
2308
+ return ret;
2309
+}
2310
+
2311
+/**
2312
+ * ti_sci_cmd_rm_udmap_tx_ch_cfg() - Configure a UDMAP TX channel
2313
+ * @handle: Pointer to TI SCI handle.
2314
+ * @params: Pointer to ti_sci_msg_rm_udmap_tx_ch_cfg TX channel config
2315
+ * structure
2316
+ *
2317
+ * Return: 0 if all went well, else returns appropriate error value.
2318
+ *
2319
+ * See @ti_sci_msg_rm_udmap_tx_ch_cfg and @ti_sci_msg_rm_udmap_tx_ch_cfg_req for
2320
+ * more info.
2321
+ */
2322
+static int ti_sci_cmd_rm_udmap_tx_ch_cfg(const struct ti_sci_handle *handle,
2323
+ const struct ti_sci_msg_rm_udmap_tx_ch_cfg *params)
2324
+{
2325
+ struct ti_sci_msg_rm_udmap_tx_ch_cfg_req *req;
2326
+ struct ti_sci_msg_hdr *resp;
2327
+ struct ti_sci_xfer *xfer;
2328
+ struct ti_sci_info *info;
2329
+ struct device *dev;
2330
+ int ret = 0;
2331
+
2332
+ if (IS_ERR_OR_NULL(handle))
2333
+ return -EINVAL;
2334
+
2335
+ info = handle_to_ti_sci_info(handle);
2336
+ dev = info->dev;
2337
+
2338
+ xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_TX_CH_CFG,
2339
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2340
+ sizeof(*req), sizeof(*resp));
2341
+ if (IS_ERR(xfer)) {
2342
+ ret = PTR_ERR(xfer);
2343
+ dev_err(dev, "Message TX_CH_CFG alloc failed(%d)\n", ret);
2344
+ return ret;
2345
+ }
2346
+ req = (struct ti_sci_msg_rm_udmap_tx_ch_cfg_req *)xfer->xfer_buf;
2347
+ req->valid_params = params->valid_params;
2348
+ req->nav_id = params->nav_id;
2349
+ req->index = params->index;
2350
+ req->tx_pause_on_err = params->tx_pause_on_err;
2351
+ req->tx_filt_einfo = params->tx_filt_einfo;
2352
+ req->tx_filt_pswords = params->tx_filt_pswords;
2353
+ req->tx_atype = params->tx_atype;
2354
+ req->tx_chan_type = params->tx_chan_type;
2355
+ req->tx_supr_tdpkt = params->tx_supr_tdpkt;
2356
+ req->tx_fetch_size = params->tx_fetch_size;
2357
+ req->tx_credit_count = params->tx_credit_count;
2358
+ req->txcq_qnum = params->txcq_qnum;
2359
+ req->tx_priority = params->tx_priority;
2360
+ req->tx_qos = params->tx_qos;
2361
+ req->tx_orderid = params->tx_orderid;
2362
+ req->fdepth = params->fdepth;
2363
+ req->tx_sched_priority = params->tx_sched_priority;
2364
+ req->tx_burst_size = params->tx_burst_size;
2365
+
2366
+ ret = ti_sci_do_xfer(info, xfer);
2367
+ if (ret) {
2368
+ dev_err(dev, "Mbox send TX_CH_CFG fail %d\n", ret);
2369
+ goto fail;
2370
+ }
2371
+
2372
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2373
+ ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2374
+
2375
+fail:
2376
+ ti_sci_put_one_xfer(&info->minfo, xfer);
2377
+ dev_dbg(dev, "TX_CH_CFG: chn %u ret:%u\n", params->index, ret);
2378
+ return ret;
2379
+}
2380
+
2381
+/**
2382
+ * ti_sci_cmd_rm_udmap_rx_ch_cfg() - Configure a UDMAP RX channel
2383
+ * @handle: Pointer to TI SCI handle.
2384
+ * @params: Pointer to ti_sci_msg_rm_udmap_rx_ch_cfg RX channel config
2385
+ * structure
2386
+ *
2387
+ * Return: 0 if all went well, else returns appropriate error value.
2388
+ *
2389
+ * See @ti_sci_msg_rm_udmap_rx_ch_cfg and @ti_sci_msg_rm_udmap_rx_ch_cfg_req for
2390
+ * more info.
2391
+ */
2392
+static int ti_sci_cmd_rm_udmap_rx_ch_cfg(const struct ti_sci_handle *handle,
2393
+ const struct ti_sci_msg_rm_udmap_rx_ch_cfg *params)
2394
+{
2395
+ struct ti_sci_msg_rm_udmap_rx_ch_cfg_req *req;
2396
+ struct ti_sci_msg_hdr *resp;
2397
+ struct ti_sci_xfer *xfer;
2398
+ struct ti_sci_info *info;
2399
+ struct device *dev;
2400
+ int ret = 0;
2401
+
2402
+ if (IS_ERR_OR_NULL(handle))
2403
+ return -EINVAL;
2404
+
2405
+ info = handle_to_ti_sci_info(handle);
2406
+ dev = info->dev;
2407
+
2408
+ xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_RX_CH_CFG,
2409
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2410
+ sizeof(*req), sizeof(*resp));
2411
+ if (IS_ERR(xfer)) {
2412
+ ret = PTR_ERR(xfer);
2413
+ dev_err(dev, "Message RX_CH_CFG alloc failed(%d)\n", ret);
2414
+ return ret;
2415
+ }
2416
+ req = (struct ti_sci_msg_rm_udmap_rx_ch_cfg_req *)xfer->xfer_buf;
2417
+ req->valid_params = params->valid_params;
2418
+ req->nav_id = params->nav_id;
2419
+ req->index = params->index;
2420
+ req->rx_fetch_size = params->rx_fetch_size;
2421
+ req->rxcq_qnum = params->rxcq_qnum;
2422
+ req->rx_priority = params->rx_priority;
2423
+ req->rx_qos = params->rx_qos;
2424
+ req->rx_orderid = params->rx_orderid;
2425
+ req->rx_sched_priority = params->rx_sched_priority;
2426
+ req->flowid_start = params->flowid_start;
2427
+ req->flowid_cnt = params->flowid_cnt;
2428
+ req->rx_pause_on_err = params->rx_pause_on_err;
2429
+ req->rx_atype = params->rx_atype;
2430
+ req->rx_chan_type = params->rx_chan_type;
2431
+ req->rx_ignore_short = params->rx_ignore_short;
2432
+ req->rx_ignore_long = params->rx_ignore_long;
2433
+ req->rx_burst_size = params->rx_burst_size;
2434
+
2435
+ ret = ti_sci_do_xfer(info, xfer);
2436
+ if (ret) {
2437
+ dev_err(dev, "Mbox send RX_CH_CFG fail %d\n", ret);
2438
+ goto fail;
2439
+ }
2440
+
2441
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2442
+ ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2443
+
2444
+fail:
2445
+ ti_sci_put_one_xfer(&info->minfo, xfer);
2446
+ dev_dbg(dev, "RX_CH_CFG: chn %u ret:%d\n", params->index, ret);
2447
+ return ret;
2448
+}
2449
+
2450
+/**
2451
+ * ti_sci_cmd_rm_udmap_rx_flow_cfg() - Configure UDMAP RX FLOW
2452
+ * @handle: Pointer to TI SCI handle.
2453
+ * @params: Pointer to ti_sci_msg_rm_udmap_flow_cfg RX FLOW config
2454
+ * structure
2455
+ *
2456
+ * Return: 0 if all went well, else returns appropriate error value.
2457
+ *
2458
+ * See @ti_sci_msg_rm_udmap_flow_cfg and @ti_sci_msg_rm_udmap_flow_cfg_req for
2459
+ * more info.
2460
+ */
2461
+static int ti_sci_cmd_rm_udmap_rx_flow_cfg(const struct ti_sci_handle *handle,
2462
+ const struct ti_sci_msg_rm_udmap_flow_cfg *params)
2463
+{
2464
+ struct ti_sci_msg_rm_udmap_flow_cfg_req *req;
2465
+ struct ti_sci_msg_hdr *resp;
2466
+ struct ti_sci_xfer *xfer;
2467
+ struct ti_sci_info *info;
2468
+ struct device *dev;
2469
+ int ret = 0;
2470
+
2471
+ if (IS_ERR_OR_NULL(handle))
2472
+ return -EINVAL;
2473
+
2474
+ info = handle_to_ti_sci_info(handle);
2475
+ dev = info->dev;
2476
+
2477
+ xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_FLOW_CFG,
2478
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2479
+ sizeof(*req), sizeof(*resp));
2480
+ if (IS_ERR(xfer)) {
2481
+ ret = PTR_ERR(xfer);
2482
+ dev_err(dev, "RX_FL_CFG: Message alloc failed(%d)\n", ret);
2483
+ return ret;
2484
+ }
2485
+ req = (struct ti_sci_msg_rm_udmap_flow_cfg_req *)xfer->xfer_buf;
2486
+ req->valid_params = params->valid_params;
2487
+ req->nav_id = params->nav_id;
2488
+ req->flow_index = params->flow_index;
2489
+ req->rx_einfo_present = params->rx_einfo_present;
2490
+ req->rx_psinfo_present = params->rx_psinfo_present;
2491
+ req->rx_error_handling = params->rx_error_handling;
2492
+ req->rx_desc_type = params->rx_desc_type;
2493
+ req->rx_sop_offset = params->rx_sop_offset;
2494
+ req->rx_dest_qnum = params->rx_dest_qnum;
2495
+ req->rx_src_tag_hi = params->rx_src_tag_hi;
2496
+ req->rx_src_tag_lo = params->rx_src_tag_lo;
2497
+ req->rx_dest_tag_hi = params->rx_dest_tag_hi;
2498
+ req->rx_dest_tag_lo = params->rx_dest_tag_lo;
2499
+ req->rx_src_tag_hi_sel = params->rx_src_tag_hi_sel;
2500
+ req->rx_src_tag_lo_sel = params->rx_src_tag_lo_sel;
2501
+ req->rx_dest_tag_hi_sel = params->rx_dest_tag_hi_sel;
2502
+ req->rx_dest_tag_lo_sel = params->rx_dest_tag_lo_sel;
2503
+ req->rx_fdq0_sz0_qnum = params->rx_fdq0_sz0_qnum;
2504
+ req->rx_fdq1_qnum = params->rx_fdq1_qnum;
2505
+ req->rx_fdq2_qnum = params->rx_fdq2_qnum;
2506
+ req->rx_fdq3_qnum = params->rx_fdq3_qnum;
2507
+ req->rx_ps_location = params->rx_ps_location;
2508
+
2509
+ ret = ti_sci_do_xfer(info, xfer);
2510
+ if (ret) {
2511
+ dev_err(dev, "RX_FL_CFG: Mbox send fail %d\n", ret);
2512
+ goto fail;
2513
+ }
2514
+
2515
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2516
+ ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2517
+
2518
+fail:
2519
+ ti_sci_put_one_xfer(&info->minfo, xfer);
2520
+ dev_dbg(info->dev, "RX_FL_CFG: %u ret:%d\n", params->flow_index, ret);
2521
+ return ret;
2522
+}
2523
+
2524
+/**
2525
+ * ti_sci_cmd_proc_request() - Command to request a physical processor control
2526
+ * @handle: Pointer to TI SCI handle
2527
+ * @proc_id: Processor ID this request is for
2528
+ *
2529
+ * Return: 0 if all went well, else returns appropriate error value.
2530
+ */
2531
+static int ti_sci_cmd_proc_request(const struct ti_sci_handle *handle,
2532
+ u8 proc_id)
2533
+{
2534
+ struct ti_sci_msg_req_proc_request *req;
2535
+ struct ti_sci_msg_hdr *resp;
2536
+ struct ti_sci_info *info;
2537
+ struct ti_sci_xfer *xfer;
2538
+ struct device *dev;
2539
+ int ret = 0;
2540
+
2541
+ if (!handle)
2542
+ return -EINVAL;
2543
+ if (IS_ERR(handle))
2544
+ return PTR_ERR(handle);
2545
+
2546
+ info = handle_to_ti_sci_info(handle);
2547
+ dev = info->dev;
2548
+
2549
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PROC_REQUEST,
2550
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2551
+ sizeof(*req), sizeof(*resp));
2552
+ if (IS_ERR(xfer)) {
2553
+ ret = PTR_ERR(xfer);
2554
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
2555
+ return ret;
2556
+ }
2557
+ req = (struct ti_sci_msg_req_proc_request *)xfer->xfer_buf;
2558
+ req->processor_id = proc_id;
2559
+
2560
+ ret = ti_sci_do_xfer(info, xfer);
2561
+ if (ret) {
2562
+ dev_err(dev, "Mbox send fail %d\n", ret);
2563
+ goto fail;
2564
+ }
2565
+
2566
+ resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2567
+
2568
+ ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2569
+
2570
+fail:
2571
+ ti_sci_put_one_xfer(&info->minfo, xfer);
2572
+
2573
+ return ret;
2574
+}
2575
+
2576
+/**
2577
+ * ti_sci_cmd_proc_release() - Command to release a physical processor control
2578
+ * @handle: Pointer to TI SCI handle
2579
+ * @proc_id: Processor ID this request is for
2580
+ *
2581
+ * Return: 0 if all went well, else returns appropriate error value.
2582
+ */
2583
+static int ti_sci_cmd_proc_release(const struct ti_sci_handle *handle,
2584
+ u8 proc_id)
2585
+{
2586
+ struct ti_sci_msg_req_proc_release *req;
2587
+ struct ti_sci_msg_hdr *resp;
2588
+ struct ti_sci_info *info;
2589
+ struct ti_sci_xfer *xfer;
2590
+ struct device *dev;
2591
+ int ret = 0;
2592
+
2593
+ if (!handle)
2594
+ return -EINVAL;
2595
+ if (IS_ERR(handle))
2596
+ return PTR_ERR(handle);
2597
+
2598
+ info = handle_to_ti_sci_info(handle);
2599
+ dev = info->dev;
2600
+
2601
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PROC_RELEASE,
2602
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2603
+ sizeof(*req), sizeof(*resp));
2604
+ if (IS_ERR(xfer)) {
2605
+ ret = PTR_ERR(xfer);
2606
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
2607
+ return ret;
2608
+ }
2609
+ req = (struct ti_sci_msg_req_proc_release *)xfer->xfer_buf;
2610
+ req->processor_id = proc_id;
2611
+
2612
+ ret = ti_sci_do_xfer(info, xfer);
2613
+ if (ret) {
2614
+ dev_err(dev, "Mbox send fail %d\n", ret);
2615
+ goto fail;
2616
+ }
2617
+
2618
+ resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2619
+
2620
+ ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2621
+
2622
+fail:
2623
+ ti_sci_put_one_xfer(&info->minfo, xfer);
2624
+
2625
+ return ret;
2626
+}
2627
+
2628
+/**
2629
+ * ti_sci_cmd_proc_handover() - Command to handover a physical processor
2630
+ * control to a host in the processor's access
2631
+ * control list.
2632
+ * @handle: Pointer to TI SCI handle
2633
+ * @proc_id: Processor ID this request is for
2634
+ * @host_id: Host ID to get the control of the processor
2635
+ *
2636
+ * Return: 0 if all went well, else returns appropriate error value.
2637
+ */
2638
+static int ti_sci_cmd_proc_handover(const struct ti_sci_handle *handle,
2639
+ u8 proc_id, u8 host_id)
2640
+{
2641
+ struct ti_sci_msg_req_proc_handover *req;
2642
+ struct ti_sci_msg_hdr *resp;
2643
+ struct ti_sci_info *info;
2644
+ struct ti_sci_xfer *xfer;
2645
+ struct device *dev;
2646
+ int ret = 0;
2647
+
2648
+ if (!handle)
2649
+ return -EINVAL;
2650
+ if (IS_ERR(handle))
2651
+ return PTR_ERR(handle);
2652
+
2653
+ info = handle_to_ti_sci_info(handle);
2654
+ dev = info->dev;
2655
+
2656
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PROC_HANDOVER,
2657
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2658
+ sizeof(*req), sizeof(*resp));
2659
+ if (IS_ERR(xfer)) {
2660
+ ret = PTR_ERR(xfer);
2661
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
2662
+ return ret;
2663
+ }
2664
+ req = (struct ti_sci_msg_req_proc_handover *)xfer->xfer_buf;
2665
+ req->processor_id = proc_id;
2666
+ req->host_id = host_id;
2667
+
2668
+ ret = ti_sci_do_xfer(info, xfer);
2669
+ if (ret) {
2670
+ dev_err(dev, "Mbox send fail %d\n", ret);
2671
+ goto fail;
2672
+ }
2673
+
2674
+ resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2675
+
2676
+ ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2677
+
2678
+fail:
2679
+ ti_sci_put_one_xfer(&info->minfo, xfer);
2680
+
2681
+ return ret;
2682
+}
2683
+
2684
+/**
2685
+ * ti_sci_cmd_proc_set_config() - Command to set the processor boot
2686
+ * configuration flags
2687
+ * @handle: Pointer to TI SCI handle
2688
+ * @proc_id: Processor ID this request is for
2689
+ * @config_flags_set: Configuration flags to be set
2690
+ * @config_flags_clear: Configuration flags to be cleared.
2691
+ *
2692
+ * Return: 0 if all went well, else returns appropriate error value.
2693
+ */
2694
+static int ti_sci_cmd_proc_set_config(const struct ti_sci_handle *handle,
2695
+ u8 proc_id, u64 bootvector,
2696
+ u32 config_flags_set,
2697
+ u32 config_flags_clear)
2698
+{
2699
+ struct ti_sci_msg_req_set_config *req;
2700
+ struct ti_sci_msg_hdr *resp;
2701
+ struct ti_sci_info *info;
2702
+ struct ti_sci_xfer *xfer;
2703
+ struct device *dev;
2704
+ int ret = 0;
2705
+
2706
+ if (!handle)
2707
+ return -EINVAL;
2708
+ if (IS_ERR(handle))
2709
+ return PTR_ERR(handle);
2710
+
2711
+ info = handle_to_ti_sci_info(handle);
2712
+ dev = info->dev;
2713
+
2714
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CONFIG,
2715
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2716
+ sizeof(*req), sizeof(*resp));
2717
+ if (IS_ERR(xfer)) {
2718
+ ret = PTR_ERR(xfer);
2719
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
2720
+ return ret;
2721
+ }
2722
+ req = (struct ti_sci_msg_req_set_config *)xfer->xfer_buf;
2723
+ req->processor_id = proc_id;
2724
+ req->bootvector_low = bootvector & TI_SCI_ADDR_LOW_MASK;
2725
+ req->bootvector_high = (bootvector & TI_SCI_ADDR_HIGH_MASK) >>
2726
+ TI_SCI_ADDR_HIGH_SHIFT;
2727
+ req->config_flags_set = config_flags_set;
2728
+ req->config_flags_clear = config_flags_clear;
2729
+
2730
+ ret = ti_sci_do_xfer(info, xfer);
2731
+ if (ret) {
2732
+ dev_err(dev, "Mbox send fail %d\n", ret);
2733
+ goto fail;
2734
+ }
2735
+
2736
+ resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2737
+
2738
+ ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2739
+
2740
+fail:
2741
+ ti_sci_put_one_xfer(&info->minfo, xfer);
2742
+
2743
+ return ret;
2744
+}
2745
+
2746
+/**
2747
+ * ti_sci_cmd_proc_set_control() - Command to set the processor boot
2748
+ * control flags
2749
+ * @handle: Pointer to TI SCI handle
2750
+ * @proc_id: Processor ID this request is for
2751
+ * @control_flags_set: Control flags to be set
2752
+ * @control_flags_clear: Control flags to be cleared
2753
+ *
2754
+ * Return: 0 if all went well, else returns appropriate error value.
2755
+ */
2756
+static int ti_sci_cmd_proc_set_control(const struct ti_sci_handle *handle,
2757
+ u8 proc_id, u32 control_flags_set,
2758
+ u32 control_flags_clear)
2759
+{
2760
+ struct ti_sci_msg_req_set_ctrl *req;
2761
+ struct ti_sci_msg_hdr *resp;
2762
+ struct ti_sci_info *info;
2763
+ struct ti_sci_xfer *xfer;
2764
+ struct device *dev;
2765
+ int ret = 0;
2766
+
2767
+ if (!handle)
2768
+ return -EINVAL;
2769
+ if (IS_ERR(handle))
2770
+ return PTR_ERR(handle);
2771
+
2772
+ info = handle_to_ti_sci_info(handle);
2773
+ dev = info->dev;
2774
+
2775
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CTRL,
2776
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2777
+ sizeof(*req), sizeof(*resp));
2778
+ if (IS_ERR(xfer)) {
2779
+ ret = PTR_ERR(xfer);
2780
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
2781
+ return ret;
2782
+ }
2783
+ req = (struct ti_sci_msg_req_set_ctrl *)xfer->xfer_buf;
2784
+ req->processor_id = proc_id;
2785
+ req->control_flags_set = control_flags_set;
2786
+ req->control_flags_clear = control_flags_clear;
2787
+
2788
+ ret = ti_sci_do_xfer(info, xfer);
2789
+ if (ret) {
2790
+ dev_err(dev, "Mbox send fail %d\n", ret);
2791
+ goto fail;
2792
+ }
2793
+
2794
+ resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2795
+
2796
+ ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2797
+
2798
+fail:
2799
+ ti_sci_put_one_xfer(&info->minfo, xfer);
2800
+
2801
+ return ret;
2802
+}
2803
+
2804
+/**
2805
+ * ti_sci_cmd_get_boot_status() - Command to get the processor boot status
2806
+ * @handle: Pointer to TI SCI handle
2807
+ * @proc_id: Processor ID this request is for
2808
+ *
2809
+ * Return: 0 if all went well, else returns appropriate error value.
2810
+ */
2811
+static int ti_sci_cmd_proc_get_status(const struct ti_sci_handle *handle,
2812
+ u8 proc_id, u64 *bv, u32 *cfg_flags,
2813
+ u32 *ctrl_flags, u32 *sts_flags)
2814
+{
2815
+ struct ti_sci_msg_resp_get_status *resp;
2816
+ struct ti_sci_msg_req_get_status *req;
2817
+ struct ti_sci_info *info;
2818
+ struct ti_sci_xfer *xfer;
2819
+ struct device *dev;
2820
+ int ret = 0;
2821
+
2822
+ if (!handle)
2823
+ return -EINVAL;
2824
+ if (IS_ERR(handle))
2825
+ return PTR_ERR(handle);
2826
+
2827
+ info = handle_to_ti_sci_info(handle);
2828
+ dev = info->dev;
2829
+
2830
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_STATUS,
2831
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2832
+ sizeof(*req), sizeof(*resp));
2833
+ if (IS_ERR(xfer)) {
2834
+ ret = PTR_ERR(xfer);
2835
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
2836
+ return ret;
2837
+ }
2838
+ req = (struct ti_sci_msg_req_get_status *)xfer->xfer_buf;
2839
+ req->processor_id = proc_id;
2840
+
2841
+ ret = ti_sci_do_xfer(info, xfer);
2842
+ if (ret) {
2843
+ dev_err(dev, "Mbox send fail %d\n", ret);
2844
+ goto fail;
2845
+ }
2846
+
2847
+ resp = (struct ti_sci_msg_resp_get_status *)xfer->tx_message.buf;
2848
+
2849
+ if (!ti_sci_is_response_ack(resp)) {
2850
+ ret = -ENODEV;
2851
+ } else {
2852
+ *bv = (resp->bootvector_low & TI_SCI_ADDR_LOW_MASK) |
2853
+ (((u64)resp->bootvector_high << TI_SCI_ADDR_HIGH_SHIFT) &
2854
+ TI_SCI_ADDR_HIGH_MASK);
2855
+ *cfg_flags = resp->config_flags;
2856
+ *ctrl_flags = resp->control_flags;
2857
+ *sts_flags = resp->status_flags;
2858
+ }
2859
+
2860
+fail:
2861
+ ti_sci_put_one_xfer(&info->minfo, xfer);
2862
+
2863
+ return ret;
2864
+}
2865
+
16182866 /*
16192867 * ti_sci_setup_ops() - Setup the operations structures
16202868 * @info: pointer to TISCI pointer
....@@ -1625,11 +2873,19 @@
16252873 struct ti_sci_core_ops *core_ops = &ops->core_ops;
16262874 struct ti_sci_dev_ops *dops = &ops->dev_ops;
16272875 struct ti_sci_clk_ops *cops = &ops->clk_ops;
2876
+ struct ti_sci_rm_core_ops *rm_core_ops = &ops->rm_core_ops;
2877
+ struct ti_sci_rm_irq_ops *iops = &ops->rm_irq_ops;
2878
+ struct ti_sci_rm_ringacc_ops *rops = &ops->rm_ring_ops;
2879
+ struct ti_sci_rm_psil_ops *psilops = &ops->rm_psil_ops;
2880
+ struct ti_sci_rm_udmap_ops *udmap_ops = &ops->rm_udmap_ops;
2881
+ struct ti_sci_proc_ops *pops = &ops->proc_ops;
16282882
16292883 core_ops->reboot_device = ti_sci_cmd_core_reboot;
16302884
16312885 dops->get_device = ti_sci_cmd_get_device;
2886
+ dops->get_device_exclusive = ti_sci_cmd_get_device_exclusive;
16322887 dops->idle_device = ti_sci_cmd_idle_device;
2888
+ dops->idle_device_exclusive = ti_sci_cmd_idle_device_exclusive;
16332889 dops->put_device = ti_sci_cmd_put_device;
16342890
16352891 dops->is_valid = ti_sci_cmd_dev_is_valid;
....@@ -1655,6 +2911,32 @@
16552911 cops->get_best_match_freq = ti_sci_cmd_clk_get_match_freq;
16562912 cops->set_freq = ti_sci_cmd_clk_set_freq;
16572913 cops->get_freq = ti_sci_cmd_clk_get_freq;
2914
+
2915
+ rm_core_ops->get_range = ti_sci_cmd_get_resource_range;
2916
+ rm_core_ops->get_range_from_shost =
2917
+ ti_sci_cmd_get_resource_range_from_shost;
2918
+
2919
+ iops->set_irq = ti_sci_cmd_set_irq;
2920
+ iops->set_event_map = ti_sci_cmd_set_event_map;
2921
+ iops->free_irq = ti_sci_cmd_free_irq;
2922
+ iops->free_event_map = ti_sci_cmd_free_event_map;
2923
+
2924
+ rops->config = ti_sci_cmd_ring_config;
2925
+ rops->get_config = ti_sci_cmd_ring_get_config;
2926
+
2927
+ psilops->pair = ti_sci_cmd_rm_psil_pair;
2928
+ psilops->unpair = ti_sci_cmd_rm_psil_unpair;
2929
+
2930
+ udmap_ops->tx_ch_cfg = ti_sci_cmd_rm_udmap_tx_ch_cfg;
2931
+ udmap_ops->rx_ch_cfg = ti_sci_cmd_rm_udmap_rx_ch_cfg;
2932
+ udmap_ops->rx_flow_cfg = ti_sci_cmd_rm_udmap_rx_flow_cfg;
2933
+
2934
+ pops->request = ti_sci_cmd_proc_request;
2935
+ pops->release = ti_sci_cmd_proc_release;
2936
+ pops->handover = ti_sci_cmd_proc_handover;
2937
+ pops->set_config = ti_sci_cmd_proc_set_config;
2938
+ pops->set_control = ti_sci_cmd_proc_set_control;
2939
+ pops->get_status = ti_sci_cmd_proc_get_status;
16582940 }
16592941
16602942 /**
....@@ -1779,6 +3061,272 @@
17793061 }
17803062 EXPORT_SYMBOL_GPL(devm_ti_sci_get_handle);
17813063
3064
+/**
3065
+ * ti_sci_get_by_phandle() - Get the TI SCI handle using DT phandle
3066
+ * @np: device node
3067
+ * @property: property name containing phandle on TISCI node
3068
+ *
3069
+ * NOTE: The function does not track individual clients of the framework
3070
+ * and is expected to be maintained by caller of TI SCI protocol library.
3071
+ * ti_sci_put_handle must be balanced with successful ti_sci_get_by_phandle
3072
+ * Return: pointer to handle if successful, else:
3073
+ * -EPROBE_DEFER if the instance is not ready
3074
+ * -ENODEV if the required node handler is missing
3075
+ * -EINVAL if invalid conditions are encountered.
3076
+ */
3077
+const struct ti_sci_handle *ti_sci_get_by_phandle(struct device_node *np,
3078
+ const char *property)
3079
+{
3080
+ struct ti_sci_handle *handle = NULL;
3081
+ struct device_node *ti_sci_np;
3082
+ struct ti_sci_info *info;
3083
+ struct list_head *p;
3084
+
3085
+ if (!np) {
3086
+ pr_err("I need a device pointer\n");
3087
+ return ERR_PTR(-EINVAL);
3088
+ }
3089
+
3090
+ ti_sci_np = of_parse_phandle(np, property, 0);
3091
+ if (!ti_sci_np)
3092
+ return ERR_PTR(-ENODEV);
3093
+
3094
+ mutex_lock(&ti_sci_list_mutex);
3095
+ list_for_each(p, &ti_sci_list) {
3096
+ info = list_entry(p, struct ti_sci_info, node);
3097
+ if (ti_sci_np == info->dev->of_node) {
3098
+ handle = &info->handle;
3099
+ info->users++;
3100
+ break;
3101
+ }
3102
+ }
3103
+ mutex_unlock(&ti_sci_list_mutex);
3104
+ of_node_put(ti_sci_np);
3105
+
3106
+ if (!handle)
3107
+ return ERR_PTR(-EPROBE_DEFER);
3108
+
3109
+ return handle;
3110
+}
3111
+EXPORT_SYMBOL_GPL(ti_sci_get_by_phandle);
3112
+
3113
+/**
3114
+ * devm_ti_sci_get_by_phandle() - Managed get handle using phandle
3115
+ * @dev: Device pointer requesting TISCI handle
3116
+ * @property: property name containing phandle on TISCI node
3117
+ *
3118
+ * NOTE: This releases the handle once the device resources are
3119
+ * no longer needed. MUST NOT BE released with ti_sci_put_handle.
3120
+ * The function does not track individual clients of the framework
3121
+ * and is expected to be maintained by caller of TI SCI protocol library.
3122
+ *
3123
+ * Return: 0 if all went fine, else corresponding error.
3124
+ */
3125
+const struct ti_sci_handle *devm_ti_sci_get_by_phandle(struct device *dev,
3126
+ const char *property)
3127
+{
3128
+ const struct ti_sci_handle *handle;
3129
+ const struct ti_sci_handle **ptr;
3130
+
3131
+ ptr = devres_alloc(devm_ti_sci_release, sizeof(*ptr), GFP_KERNEL);
3132
+ if (!ptr)
3133
+ return ERR_PTR(-ENOMEM);
3134
+ handle = ti_sci_get_by_phandle(dev_of_node(dev), property);
3135
+
3136
+ if (!IS_ERR(handle)) {
3137
+ *ptr = handle;
3138
+ devres_add(dev, ptr);
3139
+ } else {
3140
+ devres_free(ptr);
3141
+ }
3142
+
3143
+ return handle;
3144
+}
3145
+EXPORT_SYMBOL_GPL(devm_ti_sci_get_by_phandle);
3146
+
3147
+/**
3148
+ * ti_sci_get_free_resource() - Get a free resource from TISCI resource.
3149
+ * @res: Pointer to the TISCI resource
3150
+ *
3151
+ * Return: resource num if all went ok else TI_SCI_RESOURCE_NULL.
3152
+ */
3153
+u16 ti_sci_get_free_resource(struct ti_sci_resource *res)
3154
+{
3155
+ unsigned long flags;
3156
+ u16 set, free_bit;
3157
+
3158
+ raw_spin_lock_irqsave(&res->lock, flags);
3159
+ for (set = 0; set < res->sets; set++) {
3160
+ free_bit = find_first_zero_bit(res->desc[set].res_map,
3161
+ res->desc[set].num);
3162
+ if (free_bit != res->desc[set].num) {
3163
+ set_bit(free_bit, res->desc[set].res_map);
3164
+ raw_spin_unlock_irqrestore(&res->lock, flags);
3165
+ return res->desc[set].start + free_bit;
3166
+ }
3167
+ }
3168
+ raw_spin_unlock_irqrestore(&res->lock, flags);
3169
+
3170
+ return TI_SCI_RESOURCE_NULL;
3171
+}
3172
+EXPORT_SYMBOL_GPL(ti_sci_get_free_resource);
3173
+
3174
+/**
3175
+ * ti_sci_release_resource() - Release a resource from TISCI resource.
3176
+ * @res: Pointer to the TISCI resource
3177
+ * @id: Resource id to be released.
3178
+ */
3179
+void ti_sci_release_resource(struct ti_sci_resource *res, u16 id)
3180
+{
3181
+ unsigned long flags;
3182
+ u16 set;
3183
+
3184
+ raw_spin_lock_irqsave(&res->lock, flags);
3185
+ for (set = 0; set < res->sets; set++) {
3186
+ if (res->desc[set].start <= id &&
3187
+ (res->desc[set].num + res->desc[set].start) > id)
3188
+ clear_bit(id - res->desc[set].start,
3189
+ res->desc[set].res_map);
3190
+ }
3191
+ raw_spin_unlock_irqrestore(&res->lock, flags);
3192
+}
3193
+EXPORT_SYMBOL_GPL(ti_sci_release_resource);
3194
+
3195
+/**
3196
+ * ti_sci_get_num_resources() - Get the number of resources in TISCI resource
3197
+ * @res: Pointer to the TISCI resource
3198
+ *
3199
+ * Return: Total number of available resources.
3200
+ */
3201
+u32 ti_sci_get_num_resources(struct ti_sci_resource *res)
3202
+{
3203
+ u32 set, count = 0;
3204
+
3205
+ for (set = 0; set < res->sets; set++)
3206
+ count += res->desc[set].num;
3207
+
3208
+ return count;
3209
+}
3210
+EXPORT_SYMBOL_GPL(ti_sci_get_num_resources);
3211
+
3212
+/**
3213
+ * devm_ti_sci_get_resource_sets() - Get a TISCI resources assigned to a device
3214
+ * @handle: TISCI handle
3215
+ * @dev: Device pointer to which the resource is assigned
3216
+ * @dev_id: TISCI device id to which the resource is assigned
3217
+ * @sub_types: Array of sub_types assigned corresponding to device
3218
+ * @sets: Number of sub_types
3219
+ *
3220
+ * Return: Pointer to ti_sci_resource if all went well else appropriate
3221
+ * error pointer.
3222
+ */
3223
+static struct ti_sci_resource *
3224
+devm_ti_sci_get_resource_sets(const struct ti_sci_handle *handle,
3225
+ struct device *dev, u32 dev_id, u32 *sub_types,
3226
+ u32 sets)
3227
+{
3228
+ struct ti_sci_resource *res;
3229
+ bool valid_set = false;
3230
+ int i, ret;
3231
+
3232
+ res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL);
3233
+ if (!res)
3234
+ return ERR_PTR(-ENOMEM);
3235
+
3236
+ res->sets = sets;
3237
+ res->desc = devm_kcalloc(dev, res->sets, sizeof(*res->desc),
3238
+ GFP_KERNEL);
3239
+ if (!res->desc)
3240
+ return ERR_PTR(-ENOMEM);
3241
+
3242
+ for (i = 0; i < res->sets; i++) {
3243
+ ret = handle->ops.rm_core_ops.get_range(handle, dev_id,
3244
+ sub_types[i],
3245
+ &res->desc[i].start,
3246
+ &res->desc[i].num);
3247
+ if (ret) {
3248
+ dev_dbg(dev, "dev = %d subtype %d not allocated for this host\n",
3249
+ dev_id, sub_types[i]);
3250
+ res->desc[i].start = 0;
3251
+ res->desc[i].num = 0;
3252
+ continue;
3253
+ }
3254
+
3255
+ dev_dbg(dev, "dev = %d, subtype = %d, start = %d, num = %d\n",
3256
+ dev_id, sub_types[i], res->desc[i].start,
3257
+ res->desc[i].num);
3258
+
3259
+ valid_set = true;
3260
+ res->desc[i].res_map =
3261
+ devm_kzalloc(dev, BITS_TO_LONGS(res->desc[i].num) *
3262
+ sizeof(*res->desc[i].res_map), GFP_KERNEL);
3263
+ if (!res->desc[i].res_map)
3264
+ return ERR_PTR(-ENOMEM);
3265
+ }
3266
+ raw_spin_lock_init(&res->lock);
3267
+
3268
+ if (valid_set)
3269
+ return res;
3270
+
3271
+ return ERR_PTR(-EINVAL);
3272
+}
3273
+
3274
+/**
3275
+ * devm_ti_sci_get_of_resource() - Get a TISCI resource assigned to a device
3276
+ * @handle: TISCI handle
3277
+ * @dev: Device pointer to which the resource is assigned
3278
+ * @dev_id: TISCI device id to which the resource is assigned
3279
+ * @of_prop: property name by which the resource are represented
3280
+ *
3281
+ * Return: Pointer to ti_sci_resource if all went well else appropriate
3282
+ * error pointer.
3283
+ */
3284
+struct ti_sci_resource *
3285
+devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle,
3286
+ struct device *dev, u32 dev_id, char *of_prop)
3287
+{
3288
+ struct ti_sci_resource *res;
3289
+ u32 *sub_types;
3290
+ int sets;
3291
+
3292
+ sets = of_property_count_elems_of_size(dev_of_node(dev), of_prop,
3293
+ sizeof(u32));
3294
+ if (sets < 0) {
3295
+ dev_err(dev, "%s resource type ids not available\n", of_prop);
3296
+ return ERR_PTR(sets);
3297
+ }
3298
+
3299
+ sub_types = kcalloc(sets, sizeof(*sub_types), GFP_KERNEL);
3300
+ if (!sub_types)
3301
+ return ERR_PTR(-ENOMEM);
3302
+
3303
+ of_property_read_u32_array(dev_of_node(dev), of_prop, sub_types, sets);
3304
+ res = devm_ti_sci_get_resource_sets(handle, dev, dev_id, sub_types,
3305
+ sets);
3306
+
3307
+ kfree(sub_types);
3308
+ return res;
3309
+}
3310
+EXPORT_SYMBOL_GPL(devm_ti_sci_get_of_resource);
3311
+
3312
+/**
3313
+ * devm_ti_sci_get_resource() - Get a resource range assigned to the device
3314
+ * @handle: TISCI handle
3315
+ * @dev: Device pointer to which the resource is assigned
3316
+ * @dev_id: TISCI device id to which the resource is assigned
3317
+ * @suub_type: TISCI resource subytpe representing the resource.
3318
+ *
3319
+ * Return: Pointer to ti_sci_resource if all went well else appropriate
3320
+ * error pointer.
3321
+ */
3322
+struct ti_sci_resource *
3323
+devm_ti_sci_get_resource(const struct ti_sci_handle *handle, struct device *dev,
3324
+ u32 dev_id, u32 sub_type)
3325
+{
3326
+ return devm_ti_sci_get_resource_sets(handle, dev, dev_id, &sub_type, 1);
3327
+}
3328
+EXPORT_SYMBOL_GPL(devm_ti_sci_get_resource);
3329
+
17823330 static int tisci_reboot_handler(struct notifier_block *nb, unsigned long mode,
17833331 void *cmd)
17843332 {
....@@ -1793,7 +3341,7 @@
17933341
17943342 /* Description for K2G */
17953343 static const struct ti_sci_desc ti_sci_pmmc_k2g_desc = {
1796
- .host_id = 2,
3344
+ .default_host_id = 2,
17973345 /* Conservative duration */
17983346 .max_rx_timeout_ms = 1000,
17993347 /* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
....@@ -1801,8 +3349,19 @@
18013349 .max_msg_size = 64,
18023350 };
18033351
3352
+/* Description for AM654 */
3353
+static const struct ti_sci_desc ti_sci_pmmc_am654_desc = {
3354
+ .default_host_id = 12,
3355
+ /* Conservative duration */
3356
+ .max_rx_timeout_ms = 10000,
3357
+ /* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
3358
+ .max_msgs = 20,
3359
+ .max_msg_size = 60,
3360
+};
3361
+
18043362 static const struct of_device_id ti_sci_of_match[] = {
18053363 {.compatible = "ti,k2g-sci", .data = &ti_sci_pmmc_k2g_desc},
3364
+ {.compatible = "ti,am654-sci", .data = &ti_sci_pmmc_am654_desc},
18063365 { /* Sentinel */ },
18073366 };
18083367 MODULE_DEVICE_TABLE(of, ti_sci_of_match);
....@@ -1819,6 +3378,7 @@
18193378 int ret = -EINVAL;
18203379 int i;
18213380 int reboot = 0;
3381
+ u32 h_id;
18223382
18233383 of_id = of_match_device(ti_sci_of_match, dev);
18243384 if (!of_id) {
....@@ -1833,6 +3393,19 @@
18333393
18343394 info->dev = dev;
18353395 info->desc = desc;
3396
+ ret = of_property_read_u32(dev->of_node, "ti,host-id", &h_id);
3397
+ /* if the property is not present in DT, use a default from desc */
3398
+ if (ret < 0) {
3399
+ info->host_id = info->desc->default_host_id;
3400
+ } else {
3401
+ if (!h_id) {
3402
+ dev_warn(dev, "Host ID 0 is reserved for firmware\n");
3403
+ info->host_id = info->desc->default_host_id;
3404
+ } else {
3405
+ info->host_id = h_id;
3406
+ }
3407
+ }
3408
+
18363409 reboot = of_property_read_bool(dev->of_node,
18373410 "ti,system-reboot-controller");
18383411 INIT_LIST_HEAD(&info->node);