hc
2024-05-11 04dd17822334871b23ea2862f7798fb0e0007777
kernel/drivers/gpu/drm/drm_dp_mst_topology.c
....@@ -20,20 +20,33 @@
2020 * OF THIS SOFTWARE.
2121 */
2222
23
-#include <linux/kernel.h>
23
+#include <linux/bitfield.h>
2424 #include <linux/delay.h>
25
-#include <linux/init.h>
2625 #include <linux/errno.h>
26
+#include <linux/i2c.h>
27
+#include <linux/init.h>
28
+#include <linux/kernel.h>
29
+#include <linux/random.h>
2730 #include <linux/sched.h>
2831 #include <linux/seq_file.h>
29
-#include <linux/i2c.h>
30
-#include <drm/drm_dp_mst_helper.h>
31
-#include <drm/drmP.h>
3232 #include <linux/iopoll.h>
3333
34
-#include <drm/drm_fixed.h>
34
+#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
35
+#include <linux/stacktrace.h>
36
+#include <linux/sort.h>
37
+#include <linux/timekeeping.h>
38
+#include <linux/math64.h>
39
+#endif
40
+
3541 #include <drm/drm_atomic.h>
3642 #include <drm/drm_atomic_helper.h>
43
+#include <drm/drm_dp_mst_helper.h>
44
+#include <drm/drm_drv.h>
45
+#include <drm/drm_print.h>
46
+#include <drm/drm_probe_helper.h>
47
+
48
+#include "drm_crtc_helper_internal.h"
49
+#include "drm_dp_mst_topology_internal.h"
3750
3851 /**
3952 * DOC: dp mst helper
....@@ -42,27 +55,144 @@
4255 * protocol. The helpers contain a topology manager and bandwidth manager.
4356 * The helpers encapsulate the sending and received of sideband msgs.
4457 */
58
+struct drm_dp_pending_up_req {
59
+ struct drm_dp_sideband_msg_hdr hdr;
60
+ struct drm_dp_sideband_msg_req_body msg;
61
+ struct list_head next;
62
+};
63
+
4564 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
4665 char *buf);
47
-static int test_calc_pbn_mode(void);
4866
49
-static void drm_dp_put_port(struct drm_dp_mst_port *port);
67
+static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port);
5068
5169 static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
5270 int id,
5371 struct drm_dp_payload *payload);
5472
55
-static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
56
- struct drm_dp_mst_branch *mstb);
73
+static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
74
+ struct drm_dp_mst_port *port,
75
+ int offset, int size, u8 *bytes);
76
+static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
77
+ struct drm_dp_mst_port *port,
78
+ int offset, int size, u8 *bytes);
79
+
80
+static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
81
+ struct drm_dp_mst_branch *mstb);
82
+
83
+static void
84
+drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr *mgr,
85
+ struct drm_dp_mst_branch *mstb);
86
+
5787 static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
5888 struct drm_dp_mst_branch *mstb,
5989 struct drm_dp_mst_port *port);
6090 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
6191 u8 *guid);
6292
63
-static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux);
64
-static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux);
93
+static int drm_dp_mst_register_i2c_bus(struct drm_dp_mst_port *port);
94
+static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_mst_port *port);
6595 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr);
96
+
97
+static bool drm_dp_mst_port_downstream_of_branch(struct drm_dp_mst_port *port,
98
+ struct drm_dp_mst_branch *branch);
99
+
100
+#define DBG_PREFIX "[dp_mst]"
101
+
102
+#define DP_STR(x) [DP_ ## x] = #x
103
+
104
+static const char *drm_dp_mst_req_type_str(u8 req_type)
105
+{
106
+ static const char * const req_type_str[] = {
107
+ DP_STR(GET_MSG_TRANSACTION_VERSION),
108
+ DP_STR(LINK_ADDRESS),
109
+ DP_STR(CONNECTION_STATUS_NOTIFY),
110
+ DP_STR(ENUM_PATH_RESOURCES),
111
+ DP_STR(ALLOCATE_PAYLOAD),
112
+ DP_STR(QUERY_PAYLOAD),
113
+ DP_STR(RESOURCE_STATUS_NOTIFY),
114
+ DP_STR(CLEAR_PAYLOAD_ID_TABLE),
115
+ DP_STR(REMOTE_DPCD_READ),
116
+ DP_STR(REMOTE_DPCD_WRITE),
117
+ DP_STR(REMOTE_I2C_READ),
118
+ DP_STR(REMOTE_I2C_WRITE),
119
+ DP_STR(POWER_UP_PHY),
120
+ DP_STR(POWER_DOWN_PHY),
121
+ DP_STR(SINK_EVENT_NOTIFY),
122
+ DP_STR(QUERY_STREAM_ENC_STATUS),
123
+ };
124
+
125
+ if (req_type >= ARRAY_SIZE(req_type_str) ||
126
+ !req_type_str[req_type])
127
+ return "unknown";
128
+
129
+ return req_type_str[req_type];
130
+}
131
+
132
+#undef DP_STR
133
+#define DP_STR(x) [DP_NAK_ ## x] = #x
134
+
135
+static const char *drm_dp_mst_nak_reason_str(u8 nak_reason)
136
+{
137
+ static const char * const nak_reason_str[] = {
138
+ DP_STR(WRITE_FAILURE),
139
+ DP_STR(INVALID_READ),
140
+ DP_STR(CRC_FAILURE),
141
+ DP_STR(BAD_PARAM),
142
+ DP_STR(DEFER),
143
+ DP_STR(LINK_FAILURE),
144
+ DP_STR(NO_RESOURCES),
145
+ DP_STR(DPCD_FAIL),
146
+ DP_STR(I2C_NAK),
147
+ DP_STR(ALLOCATE_FAIL),
148
+ };
149
+
150
+ if (nak_reason >= ARRAY_SIZE(nak_reason_str) ||
151
+ !nak_reason_str[nak_reason])
152
+ return "unknown";
153
+
154
+ return nak_reason_str[nak_reason];
155
+}
156
+
157
+#undef DP_STR
158
+#define DP_STR(x) [DRM_DP_SIDEBAND_TX_ ## x] = #x
159
+
160
+static const char *drm_dp_mst_sideband_tx_state_str(int state)
161
+{
162
+ static const char * const sideband_reason_str[] = {
163
+ DP_STR(QUEUED),
164
+ DP_STR(START_SEND),
165
+ DP_STR(SENT),
166
+ DP_STR(RX),
167
+ DP_STR(TIMEOUT),
168
+ };
169
+
170
+ if (state >= ARRAY_SIZE(sideband_reason_str) ||
171
+ !sideband_reason_str[state])
172
+ return "unknown";
173
+
174
+ return sideband_reason_str[state];
175
+}
176
+
177
+static int
178
+drm_dp_mst_rad_to_str(const u8 rad[8], u8 lct, char *out, size_t len)
179
+{
180
+ int i;
181
+ u8 unpacked_rad[16];
182
+
183
+ for (i = 0; i < lct; i++) {
184
+ if (i % 2)
185
+ unpacked_rad[i] = rad[i / 2] >> 4;
186
+ else
187
+ unpacked_rad[i] = rad[i / 2] & BIT_MASK(4);
188
+ }
189
+
190
+ /* TODO: Eventually add something to printk so we can format the rad
191
+ * like this: 1.2.3
192
+ */
193
+ return snprintf(out, len, "%*phC", lct, unpacked_rad);
194
+}
195
+
66196 /* sideband msg handling */
67197 static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles)
68198 {
....@@ -134,6 +264,7 @@
134264 static inline u8 drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr *hdr)
135265 {
136266 u8 size = 3;
267
+
137268 size += (hdr->lct / 2);
138269 return size;
139270 }
....@@ -144,6 +275,7 @@
144275 int idx = 0;
145276 int i;
146277 u8 crc4;
278
+
147279 buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf);
148280 for (i = 0; i < (hdr->lct / 2); i++)
149281 buf[idx++] = hdr->rad[i];
....@@ -164,6 +296,7 @@
164296 u8 len;
165297 int i;
166298 u8 idx;
299
+
167300 if (buf[0] == 0)
168301 return false;
169302 len = 3;
....@@ -194,16 +327,20 @@
194327 return true;
195328 }
196329
197
-static void drm_dp_encode_sideband_req(struct drm_dp_sideband_msg_req_body *req,
198
- struct drm_dp_sideband_msg_tx *raw)
330
+void
331
+drm_dp_encode_sideband_req(const struct drm_dp_sideband_msg_req_body *req,
332
+ struct drm_dp_sideband_msg_tx *raw)
199333 {
200334 int idx = 0;
201335 int i;
202336 u8 *buf = raw->msg;
337
+
203338 buf[idx++] = req->req_type & 0x7f;
204339
205340 switch (req->req_type) {
206341 case DP_ENUM_PATH_RESOURCES:
342
+ case DP_POWER_DOWN_PHY:
343
+ case DP_POWER_UP_PHY:
207344 buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
208345 idx++;
209346 break;
....@@ -291,19 +428,301 @@
291428 memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes);
292429 idx += req->u.i2c_write.num_bytes;
293430 break;
431
+ case DP_QUERY_STREAM_ENC_STATUS: {
432
+ const struct drm_dp_query_stream_enc_status *msg;
294433
295
- case DP_POWER_DOWN_PHY:
296
- case DP_POWER_UP_PHY:
297
- buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
434
+ msg = &req->u.enc_status;
435
+ buf[idx] = msg->stream_id;
298436 idx++;
437
+ memcpy(&buf[idx], msg->client_id, sizeof(msg->client_id));
438
+ idx += sizeof(msg->client_id);
439
+ buf[idx] = 0;
440
+ buf[idx] |= FIELD_PREP(GENMASK(1, 0), msg->stream_event);
441
+ buf[idx] |= msg->valid_stream_event ? BIT(2) : 0;
442
+ buf[idx] |= FIELD_PREP(GENMASK(4, 3), msg->stream_behavior);
443
+ buf[idx] |= msg->valid_stream_behavior ? BIT(5) : 0;
444
+ idx++;
445
+ }
299446 break;
300447 }
301448 raw->cur_len = idx;
449
+}
450
+EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_encode_sideband_req);
451
+
452
+/* Decode a sideband request we've encoded, mainly used for debugging */
453
+int
454
+drm_dp_decode_sideband_req(const struct drm_dp_sideband_msg_tx *raw,
455
+ struct drm_dp_sideband_msg_req_body *req)
456
+{
457
+ const u8 *buf = raw->msg;
458
+ int i, idx = 0;
459
+
460
+ req->req_type = buf[idx++] & 0x7f;
461
+ switch (req->req_type) {
462
+ case DP_ENUM_PATH_RESOURCES:
463
+ case DP_POWER_DOWN_PHY:
464
+ case DP_POWER_UP_PHY:
465
+ req->u.port_num.port_number = (buf[idx] >> 4) & 0xf;
466
+ break;
467
+ case DP_ALLOCATE_PAYLOAD:
468
+ {
469
+ struct drm_dp_allocate_payload *a =
470
+ &req->u.allocate_payload;
471
+
472
+ a->number_sdp_streams = buf[idx] & 0xf;
473
+ a->port_number = (buf[idx] >> 4) & 0xf;
474
+
475
+ WARN_ON(buf[++idx] & 0x80);
476
+ a->vcpi = buf[idx] & 0x7f;
477
+
478
+ a->pbn = buf[++idx] << 8;
479
+ a->pbn |= buf[++idx];
480
+
481
+ idx++;
482
+ for (i = 0; i < a->number_sdp_streams; i++) {
483
+ a->sdp_stream_sink[i] =
484
+ (buf[idx + (i / 2)] >> ((i % 2) ? 0 : 4)) & 0xf;
485
+ }
486
+ }
487
+ break;
488
+ case DP_QUERY_PAYLOAD:
489
+ req->u.query_payload.port_number = (buf[idx] >> 4) & 0xf;
490
+ WARN_ON(buf[++idx] & 0x80);
491
+ req->u.query_payload.vcpi = buf[idx] & 0x7f;
492
+ break;
493
+ case DP_REMOTE_DPCD_READ:
494
+ {
495
+ struct drm_dp_remote_dpcd_read *r = &req->u.dpcd_read;
496
+
497
+ r->port_number = (buf[idx] >> 4) & 0xf;
498
+
499
+ r->dpcd_address = (buf[idx] << 16) & 0xf0000;
500
+ r->dpcd_address |= (buf[++idx] << 8) & 0xff00;
501
+ r->dpcd_address |= buf[++idx] & 0xff;
502
+
503
+ r->num_bytes = buf[++idx];
504
+ }
505
+ break;
506
+ case DP_REMOTE_DPCD_WRITE:
507
+ {
508
+ struct drm_dp_remote_dpcd_write *w =
509
+ &req->u.dpcd_write;
510
+
511
+ w->port_number = (buf[idx] >> 4) & 0xf;
512
+
513
+ w->dpcd_address = (buf[idx] << 16) & 0xf0000;
514
+ w->dpcd_address |= (buf[++idx] << 8) & 0xff00;
515
+ w->dpcd_address |= buf[++idx] & 0xff;
516
+
517
+ w->num_bytes = buf[++idx];
518
+
519
+ w->bytes = kmemdup(&buf[++idx], w->num_bytes,
520
+ GFP_KERNEL);
521
+ if (!w->bytes)
522
+ return -ENOMEM;
523
+ }
524
+ break;
525
+ case DP_REMOTE_I2C_READ:
526
+ {
527
+ struct drm_dp_remote_i2c_read *r = &req->u.i2c_read;
528
+ struct drm_dp_remote_i2c_read_tx *tx;
529
+ bool failed = false;
530
+
531
+ r->num_transactions = buf[idx] & 0x3;
532
+ r->port_number = (buf[idx] >> 4) & 0xf;
533
+ for (i = 0; i < r->num_transactions; i++) {
534
+ tx = &r->transactions[i];
535
+
536
+ tx->i2c_dev_id = buf[++idx] & 0x7f;
537
+ tx->num_bytes = buf[++idx];
538
+ tx->bytes = kmemdup(&buf[++idx],
539
+ tx->num_bytes,
540
+ GFP_KERNEL);
541
+ if (!tx->bytes) {
542
+ failed = true;
543
+ break;
544
+ }
545
+ idx += tx->num_bytes;
546
+ tx->no_stop_bit = (buf[idx] >> 5) & 0x1;
547
+ tx->i2c_transaction_delay = buf[idx] & 0xf;
548
+ }
549
+
550
+ if (failed) {
551
+ for (i = 0; i < r->num_transactions; i++) {
552
+ tx = &r->transactions[i];
553
+ kfree(tx->bytes);
554
+ }
555
+ return -ENOMEM;
556
+ }
557
+
558
+ r->read_i2c_device_id = buf[++idx] & 0x7f;
559
+ r->num_bytes_read = buf[++idx];
560
+ }
561
+ break;
562
+ case DP_REMOTE_I2C_WRITE:
563
+ {
564
+ struct drm_dp_remote_i2c_write *w = &req->u.i2c_write;
565
+
566
+ w->port_number = (buf[idx] >> 4) & 0xf;
567
+ w->write_i2c_device_id = buf[++idx] & 0x7f;
568
+ w->num_bytes = buf[++idx];
569
+ w->bytes = kmemdup(&buf[++idx], w->num_bytes,
570
+ GFP_KERNEL);
571
+ if (!w->bytes)
572
+ return -ENOMEM;
573
+ }
574
+ break;
575
+ case DP_QUERY_STREAM_ENC_STATUS:
576
+ req->u.enc_status.stream_id = buf[idx++];
577
+ for (i = 0; i < sizeof(req->u.enc_status.client_id); i++)
578
+ req->u.enc_status.client_id[i] = buf[idx++];
579
+
580
+ req->u.enc_status.stream_event = FIELD_GET(GENMASK(1, 0),
581
+ buf[idx]);
582
+ req->u.enc_status.valid_stream_event = FIELD_GET(BIT(2),
583
+ buf[idx]);
584
+ req->u.enc_status.stream_behavior = FIELD_GET(GENMASK(4, 3),
585
+ buf[idx]);
586
+ req->u.enc_status.valid_stream_behavior = FIELD_GET(BIT(5),
587
+ buf[idx]);
588
+ break;
589
+ }
590
+
591
+ return 0;
592
+}
593
+EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_decode_sideband_req);
594
+
595
+void
596
+drm_dp_dump_sideband_msg_req_body(const struct drm_dp_sideband_msg_req_body *req,
597
+ int indent, struct drm_printer *printer)
598
+{
599
+ int i;
600
+
601
+#define P(f, ...) drm_printf_indent(printer, indent, f, ##__VA_ARGS__)
602
+ if (req->req_type == DP_LINK_ADDRESS) {
603
+ /* No contents to print */
604
+ P("type=%s\n", drm_dp_mst_req_type_str(req->req_type));
605
+ return;
606
+ }
607
+
608
+ P("type=%s contents:\n", drm_dp_mst_req_type_str(req->req_type));
609
+ indent++;
610
+
611
+ switch (req->req_type) {
612
+ case DP_ENUM_PATH_RESOURCES:
613
+ case DP_POWER_DOWN_PHY:
614
+ case DP_POWER_UP_PHY:
615
+ P("port=%d\n", req->u.port_num.port_number);
616
+ break;
617
+ case DP_ALLOCATE_PAYLOAD:
618
+ P("port=%d vcpi=%d pbn=%d sdp_streams=%d %*ph\n",
619
+ req->u.allocate_payload.port_number,
620
+ req->u.allocate_payload.vcpi, req->u.allocate_payload.pbn,
621
+ req->u.allocate_payload.number_sdp_streams,
622
+ req->u.allocate_payload.number_sdp_streams,
623
+ req->u.allocate_payload.sdp_stream_sink);
624
+ break;
625
+ case DP_QUERY_PAYLOAD:
626
+ P("port=%d vcpi=%d\n",
627
+ req->u.query_payload.port_number,
628
+ req->u.query_payload.vcpi);
629
+ break;
630
+ case DP_REMOTE_DPCD_READ:
631
+ P("port=%d dpcd_addr=%05x len=%d\n",
632
+ req->u.dpcd_read.port_number, req->u.dpcd_read.dpcd_address,
633
+ req->u.dpcd_read.num_bytes);
634
+ break;
635
+ case DP_REMOTE_DPCD_WRITE:
636
+ P("port=%d addr=%05x len=%d: %*ph\n",
637
+ req->u.dpcd_write.port_number,
638
+ req->u.dpcd_write.dpcd_address,
639
+ req->u.dpcd_write.num_bytes, req->u.dpcd_write.num_bytes,
640
+ req->u.dpcd_write.bytes);
641
+ break;
642
+ case DP_REMOTE_I2C_READ:
643
+ P("port=%d num_tx=%d id=%d size=%d:\n",
644
+ req->u.i2c_read.port_number,
645
+ req->u.i2c_read.num_transactions,
646
+ req->u.i2c_read.read_i2c_device_id,
647
+ req->u.i2c_read.num_bytes_read);
648
+
649
+ indent++;
650
+ for (i = 0; i < req->u.i2c_read.num_transactions; i++) {
651
+ const struct drm_dp_remote_i2c_read_tx *rtx =
652
+ &req->u.i2c_read.transactions[i];
653
+
654
+ P("%d: id=%03d size=%03d no_stop_bit=%d tx_delay=%03d: %*ph\n",
655
+ i, rtx->i2c_dev_id, rtx->num_bytes,
656
+ rtx->no_stop_bit, rtx->i2c_transaction_delay,
657
+ rtx->num_bytes, rtx->bytes);
658
+ }
659
+ break;
660
+ case DP_REMOTE_I2C_WRITE:
661
+ P("port=%d id=%d size=%d: %*ph\n",
662
+ req->u.i2c_write.port_number,
663
+ req->u.i2c_write.write_i2c_device_id,
664
+ req->u.i2c_write.num_bytes, req->u.i2c_write.num_bytes,
665
+ req->u.i2c_write.bytes);
666
+ break;
667
+ case DP_QUERY_STREAM_ENC_STATUS:
668
+ P("stream_id=%u client_id=%*ph stream_event=%x "
669
+ "valid_event=%d stream_behavior=%x valid_behavior=%d",
670
+ req->u.enc_status.stream_id,
671
+ (int)ARRAY_SIZE(req->u.enc_status.client_id),
672
+ req->u.enc_status.client_id, req->u.enc_status.stream_event,
673
+ req->u.enc_status.valid_stream_event,
674
+ req->u.enc_status.stream_behavior,
675
+ req->u.enc_status.valid_stream_behavior);
676
+ break;
677
+ default:
678
+ P("???\n");
679
+ break;
680
+ }
681
+#undef P
682
+}
683
+EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_dump_sideband_msg_req_body);
684
+
685
+static inline void
686
+drm_dp_mst_dump_sideband_msg_tx(struct drm_printer *p,
687
+ const struct drm_dp_sideband_msg_tx *txmsg)
688
+{
689
+ struct drm_dp_sideband_msg_req_body req;
690
+ char buf[64];
691
+ int ret;
692
+ int i;
693
+
694
+ drm_dp_mst_rad_to_str(txmsg->dst->rad, txmsg->dst->lct, buf,
695
+ sizeof(buf));
696
+ drm_printf(p, "txmsg cur_offset=%x cur_len=%x seqno=%x state=%s path_msg=%d dst=%s\n",
697
+ txmsg->cur_offset, txmsg->cur_len, txmsg->seqno,
698
+ drm_dp_mst_sideband_tx_state_str(txmsg->state),
699
+ txmsg->path_msg, buf);
700
+
701
+ ret = drm_dp_decode_sideband_req(txmsg, &req);
702
+ if (ret) {
703
+ drm_printf(p, "<failed to decode sideband req: %d>\n", ret);
704
+ return;
705
+ }
706
+ drm_dp_dump_sideband_msg_req_body(&req, 1, p);
707
+
708
+ switch (req.req_type) {
709
+ case DP_REMOTE_DPCD_WRITE:
710
+ kfree(req.u.dpcd_write.bytes);
711
+ break;
712
+ case DP_REMOTE_I2C_READ:
713
+ for (i = 0; i < req.u.i2c_read.num_transactions; i++)
714
+ kfree(req.u.i2c_read.transactions[i].bytes);
715
+ break;
716
+ case DP_REMOTE_I2C_WRITE:
717
+ kfree(req.u.i2c_write.bytes);
718
+ break;
719
+ }
302720 }
303721
304722 static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len)
305723 {
306724 u8 crc4;
725
+
307726 crc4 = drm_dp_msg_data_crc4(msg, len);
308727 msg[len] = crc4;
309728 }
....@@ -319,55 +738,53 @@
319738 raw->cur_len = idx;
320739 }
321740
322
-/* this adds a chunk of msg to the builder to get the final msg */
323
-static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg,
324
- u8 *replybuf, u8 replybuflen, bool hdr)
741
+static int drm_dp_sideband_msg_set_header(struct drm_dp_sideband_msg_rx *msg,
742
+ struct drm_dp_sideband_msg_hdr *hdr,
743
+ u8 hdrlen)
325744 {
326
- int ret;
745
+ /*
746
+ * ignore out-of-order messages or messages that are part of a
747
+ * failed transaction
748
+ */
749
+ if (!hdr->somt && !msg->have_somt)
750
+ return false;
751
+
752
+ /* get length contained in this portion */
753
+ msg->curchunk_idx = 0;
754
+ msg->curchunk_len = hdr->msg_len;
755
+ msg->curchunk_hdrlen = hdrlen;
756
+
757
+ /* we have already gotten an somt - don't bother parsing */
758
+ if (hdr->somt && msg->have_somt)
759
+ return false;
760
+
761
+ if (hdr->somt) {
762
+ memcpy(&msg->initial_hdr, hdr,
763
+ sizeof(struct drm_dp_sideband_msg_hdr));
764
+ msg->have_somt = true;
765
+ }
766
+ if (hdr->eomt)
767
+ msg->have_eomt = true;
768
+
769
+ return true;
770
+}
771
+
772
+/* this adds a chunk of msg to the builder to get the final msg */
773
+static bool drm_dp_sideband_append_payload(struct drm_dp_sideband_msg_rx *msg,
774
+ u8 *replybuf, u8 replybuflen)
775
+{
327776 u8 crc4;
328777
329
- if (hdr) {
330
- u8 hdrlen;
331
- struct drm_dp_sideband_msg_hdr recv_hdr;
332
- ret = drm_dp_decode_sideband_msg_hdr(&recv_hdr, replybuf, replybuflen, &hdrlen);
333
- if (ret == false) {
334
- print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16, 1, replybuf, replybuflen, false);
335
- return false;
336
- }
337
-
338
- /*
339
- * ignore out-of-order messages or messages that are part of a
340
- * failed transaction
341
- */
342
- if (!recv_hdr.somt && !msg->have_somt)
343
- return false;
344
-
345
- /* get length contained in this portion */
346
- msg->curchunk_len = recv_hdr.msg_len;
347
- msg->curchunk_hdrlen = hdrlen;
348
-
349
- /* we have already gotten an somt - don't bother parsing */
350
- if (recv_hdr.somt && msg->have_somt)
351
- return false;
352
-
353
- if (recv_hdr.somt) {
354
- memcpy(&msg->initial_hdr, &recv_hdr, sizeof(struct drm_dp_sideband_msg_hdr));
355
- msg->have_somt = true;
356
- }
357
- if (recv_hdr.eomt)
358
- msg->have_eomt = true;
359
-
360
- /* copy the bytes for the remainder of this header chunk */
361
- msg->curchunk_idx = min(msg->curchunk_len, (u8)(replybuflen - hdrlen));
362
- memcpy(&msg->chunk[0], replybuf + hdrlen, msg->curchunk_idx);
363
- } else {
364
- memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen);
365
- msg->curchunk_idx += replybuflen;
366
- }
778
+ memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen);
779
+ msg->curchunk_idx += replybuflen;
367780
368781 if (msg->curchunk_idx >= msg->curchunk_len) {
369782 /* do CRC */
370783 crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1);
784
+ if (crc4 != msg->chunk[msg->curchunk_len - 1])
785
+ print_hex_dump(KERN_DEBUG, "wrong crc",
786
+ DUMP_PREFIX_NONE, 16, 1,
787
+ msg->chunk, msg->curchunk_len, false);
371788 /* copy chunk into bigger msg */
372789 memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1);
373790 msg->curlen += msg->curchunk_len - 1;
....@@ -380,6 +797,7 @@
380797 {
381798 int idx = 1;
382799 int i;
800
+
383801 memcpy(repmsg->u.link_addr.guid, &raw->msg[idx], 16);
384802 idx += 16;
385803 repmsg->u.link_addr.nports = raw->msg[idx] & 0xf;
....@@ -431,6 +849,7 @@
431849 struct drm_dp_sideband_msg_reply_body *repmsg)
432850 {
433851 int idx = 1;
852
+
434853 repmsg->u.remote_dpcd_read_ack.port_number = raw->msg[idx] & 0xf;
435854 idx++;
436855 if (idx > raw->curlen)
....@@ -451,6 +870,7 @@
451870 struct drm_dp_sideband_msg_reply_body *repmsg)
452871 {
453872 int idx = 1;
873
+
454874 repmsg->u.remote_dpcd_write_ack.port_number = raw->msg[idx] & 0xf;
455875 idx++;
456876 if (idx > raw->curlen)
....@@ -484,6 +904,7 @@
484904 struct drm_dp_sideband_msg_reply_body *repmsg)
485905 {
486906 int idx = 1;
907
+
487908 repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf;
488909 repmsg->u.path_resources.fec_capable = raw->msg[idx] & 0x1;
489910 idx++;
....@@ -507,6 +928,7 @@
507928 struct drm_dp_sideband_msg_reply_body *repmsg)
508929 {
509930 int idx = 1;
931
+
510932 repmsg->u.allocate_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
511933 idx++;
512934 if (idx > raw->curlen)
....@@ -529,6 +951,7 @@
529951 struct drm_dp_sideband_msg_reply_body *repmsg)
530952 {
531953 int idx = 1;
954
+
532955 repmsg->u.query_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
533956 idx++;
534957 if (idx > raw->curlen)
....@@ -558,6 +981,42 @@
558981 return true;
559982 }
560983
984
+static bool
985
+drm_dp_sideband_parse_query_stream_enc_status(
986
+ struct drm_dp_sideband_msg_rx *raw,
987
+ struct drm_dp_sideband_msg_reply_body *repmsg)
988
+{
989
+ struct drm_dp_query_stream_enc_status_ack_reply *reply;
990
+
991
+ reply = &repmsg->u.enc_status;
992
+
993
+ reply->stream_id = raw->msg[3];
994
+
995
+ reply->reply_signed = raw->msg[2] & BIT(0);
996
+
997
+ /*
998
+ * NOTE: It's my impression from reading the spec that the below parsing
999
+ * is correct. However I noticed while testing with an HDCP 1.4 display
1000
+ * through an HDCP 2.2 hub that only bit 3 was set. In that case, I
1001
+ * would expect both bits to be set. So keep the parsing following the
1002
+ * spec, but beware reality might not match the spec (at least for some
1003
+ * configurations).
1004
+ */
1005
+ reply->hdcp_1x_device_present = raw->msg[2] & BIT(4);
1006
+ reply->hdcp_2x_device_present = raw->msg[2] & BIT(3);
1007
+
1008
+ reply->query_capable_device_present = raw->msg[2] & BIT(5);
1009
+ reply->legacy_device_present = raw->msg[2] & BIT(6);
1010
+ reply->unauthorizable_device_present = raw->msg[2] & BIT(7);
1011
+
1012
+ reply->auth_completed = !!(raw->msg[1] & BIT(3));
1013
+ reply->encryption_enabled = !!(raw->msg[1] & BIT(4));
1014
+ reply->repeater_present = !!(raw->msg[1] & BIT(5));
1015
+ reply->state = (raw->msg[1] & GENMASK(7, 6)) >> 6;
1016
+
1017
+ return true;
1018
+}
1019
+
5611020 static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
5621021 struct drm_dp_sideband_msg_reply_body *msg)
5631022 {
....@@ -565,7 +1024,7 @@
5651024 msg->reply_type = (raw->msg[0] & 0x80) >> 7;
5661025 msg->req_type = (raw->msg[0] & 0x7f);
5671026
568
- if (msg->reply_type) {
1027
+ if (msg->reply_type == DP_SIDEBAND_REPLY_NAK) {
5691028 memcpy(msg->u.nak.guid, &raw->msg[1], 16);
5701029 msg->u.nak.reason = raw->msg[17];
5711030 msg->u.nak.nak_data = raw->msg[18];
....@@ -583,6 +1042,8 @@
5831042 return drm_dp_sideband_parse_remote_dpcd_write(raw, msg);
5841043 case DP_REMOTE_I2C_READ:
5851044 return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg);
1045
+ case DP_REMOTE_I2C_WRITE:
1046
+ return true; /* since there's nothing to parse */
5861047 case DP_ENUM_PATH_RESOURCES:
5871048 return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg);
5881049 case DP_ALLOCATE_PAYLOAD:
....@@ -590,8 +1051,13 @@
5901051 case DP_POWER_DOWN_PHY:
5911052 case DP_POWER_UP_PHY:
5921053 return drm_dp_sideband_parse_power_updown_phy_ack(raw, msg);
1054
+ case DP_CLEAR_PAYLOAD_ID_TABLE:
1055
+ return true; /* since there's nothing to parse */
1056
+ case DP_QUERY_STREAM_ENC_STATUS:
1057
+ return drm_dp_sideband_parse_query_stream_enc_status(raw, msg);
5931058 default:
594
- DRM_ERROR("Got unknown reply 0x%02x\n", msg->req_type);
1059
+ DRM_ERROR("Got unknown reply 0x%02x (%s)\n", msg->req_type,
1060
+ drm_dp_mst_req_type_str(msg->req_type));
5951061 return false;
5961062 }
5971063 }
....@@ -658,12 +1124,14 @@
6581124 case DP_RESOURCE_STATUS_NOTIFY:
6591125 return drm_dp_sideband_parse_resource_status_notify(raw, msg);
6601126 default:
661
- DRM_ERROR("Got unknown request 0x%02x\n", msg->req_type);
1127
+ DRM_ERROR("Got unknown request 0x%02x (%s)\n", msg->req_type,
1128
+ drm_dp_mst_req_type_str(msg->req_type));
6621129 return false;
6631130 }
6641131 }
6651132
666
-static int build_dpcd_write(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes, u8 *bytes)
1133
+static void build_dpcd_write(struct drm_dp_sideband_msg_tx *msg,
1134
+ u8 port_num, u32 offset, u8 num_bytes, u8 *bytes)
6671135 {
6681136 struct drm_dp_sideband_msg_req_body req;
6691137
....@@ -673,20 +1141,27 @@
6731141 req.u.dpcd_write.num_bytes = num_bytes;
6741142 req.u.dpcd_write.bytes = bytes;
6751143 drm_dp_encode_sideband_req(&req, msg);
676
-
677
- return 0;
6781144 }
6791145
680
-static int build_link_address(struct drm_dp_sideband_msg_tx *msg)
1146
+static void build_link_address(struct drm_dp_sideband_msg_tx *msg)
6811147 {
6821148 struct drm_dp_sideband_msg_req_body req;
6831149
6841150 req.req_type = DP_LINK_ADDRESS;
6851151 drm_dp_encode_sideband_req(&req, msg);
686
- return 0;
6871152 }
6881153
689
-static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int port_num)
1154
+static void build_clear_payload_id_table(struct drm_dp_sideband_msg_tx *msg)
1155
+{
1156
+ struct drm_dp_sideband_msg_req_body req;
1157
+
1158
+ req.req_type = DP_CLEAR_PAYLOAD_ID_TABLE;
1159
+ drm_dp_encode_sideband_req(&req, msg);
1160
+ msg->path_msg = true;
1161
+}
1162
+
1163
+static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg,
1164
+ int port_num)
6901165 {
6911166 struct drm_dp_sideband_msg_req_body req;
6921167
....@@ -697,12 +1172,14 @@
6971172 return 0;
6981173 }
6991174
700
-static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_num,
701
- u8 vcpi, uint16_t pbn,
702
- u8 number_sdp_streams,
703
- u8 *sdp_stream_sink)
1175
+static void build_allocate_payload(struct drm_dp_sideband_msg_tx *msg,
1176
+ int port_num,
1177
+ u8 vcpi, uint16_t pbn,
1178
+ u8 number_sdp_streams,
1179
+ u8 *sdp_stream_sink)
7041180 {
7051181 struct drm_dp_sideband_msg_req_body req;
1182
+
7061183 memset(&req, 0, sizeof(req));
7071184 req.req_type = DP_ALLOCATE_PAYLOAD;
7081185 req.u.allocate_payload.port_number = port_num;
....@@ -713,11 +1190,10 @@
7131190 number_sdp_streams);
7141191 drm_dp_encode_sideband_req(&req, msg);
7151192 msg->path_msg = true;
716
- return 0;
7171193 }
7181194
719
-static int build_power_updown_phy(struct drm_dp_sideband_msg_tx *msg,
720
- int port_num, bool power_up)
1195
+static void build_power_updown_phy(struct drm_dp_sideband_msg_tx *msg,
1196
+ int port_num, bool power_up)
7211197 {
7221198 struct drm_dp_sideband_msg_req_body req;
7231199
....@@ -729,6 +1205,24 @@
7291205 req.u.port_num.port_number = port_num;
7301206 drm_dp_encode_sideband_req(&req, msg);
7311207 msg->path_msg = true;
1208
+}
1209
+
1210
+static int
1211
+build_query_stream_enc_status(struct drm_dp_sideband_msg_tx *msg, u8 stream_id,
1212
+ u8 *q_id)
1213
+{
1214
+ struct drm_dp_sideband_msg_req_body req;
1215
+
1216
+ req.req_type = DP_QUERY_STREAM_ENC_STATUS;
1217
+ req.u.enc_status.stream_id = stream_id;
1218
+ memcpy(req.u.enc_status.client_id, q_id,
1219
+ sizeof(req.u.enc_status.client_id));
1220
+ req.u.enc_status.stream_event = 0;
1221
+ req.u.enc_status.valid_stream_event = false;
1222
+ req.u.enc_status.stream_behavior = 0;
1223
+ req.u.enc_status.valid_stream_behavior = false;
1224
+
1225
+ drm_dp_encode_sideband_req(&req, msg);
7321226 return 0;
7331227 }
7341228
....@@ -765,6 +1259,7 @@
7651259 int vcpi)
7661260 {
7671261 int i;
1262
+
7681263 if (vcpi == 0)
7691264 return;
7701265
....@@ -773,11 +1268,11 @@
7731268 clear_bit(vcpi - 1, &mgr->vcpi_mask);
7741269
7751270 for (i = 0; i < mgr->max_payloads; i++) {
776
- if (mgr->proposed_vcpis[i])
777
- if (mgr->proposed_vcpis[i]->vcpi == vcpi) {
778
- mgr->proposed_vcpis[i] = NULL;
779
- clear_bit(i + 1, &mgr->payload_mask);
780
- }
1271
+ if (mgr->proposed_vcpis[i] &&
1272
+ mgr->proposed_vcpis[i]->vcpi == vcpi) {
1273
+ mgr->proposed_vcpis[i] = NULL;
1274
+ clear_bit(i + 1, &mgr->payload_mask);
1275
+ }
7811276 }
7821277 mutex_unlock(&mgr->payload_lock);
7831278 }
....@@ -801,12 +1296,38 @@
8011296 struct drm_dp_sideband_msg_tx *txmsg)
8021297 {
8031298 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
1299
+ unsigned long wait_timeout = msecs_to_jiffies(4000);
1300
+ unsigned long wait_expires = jiffies + wait_timeout;
8041301 int ret;
8051302
806
- ret = wait_event_timeout(mgr->tx_waitq,
807
- check_txmsg_state(mgr, txmsg),
808
- (4 * HZ));
809
- mutex_lock(&mstb->mgr->qlock);
1303
+ for (;;) {
1304
+ /*
1305
+ * If the driver provides a way for this, change to
1306
+ * poll-waiting for the MST reply interrupt if we didn't receive
1307
+ * it for 50 msec. This would cater for cases where the HPD
1308
+ * pulse signal got lost somewhere, even though the sink raised
1309
+ * the corresponding MST interrupt correctly. One example is the
1310
+ * Club 3D CAC-1557 TypeC -> DP adapter which for some reason
1311
+ * filters out short pulses with a duration less than ~540 usec.
1312
+ *
1313
+ * The poll period is 50 msec to avoid missing an interrupt
1314
+ * after the sink has cleared it (after a 110msec timeout
1315
+ * since it raised the interrupt).
1316
+ */
1317
+ ret = wait_event_timeout(mgr->tx_waitq,
1318
+ check_txmsg_state(mgr, txmsg),
1319
+ mgr->cbs->poll_hpd_irq ?
1320
+ msecs_to_jiffies(50) :
1321
+ wait_timeout);
1322
+
1323
+ if (ret || !mgr->cbs->poll_hpd_irq ||
1324
+ time_after(jiffies, wait_expires))
1325
+ break;
1326
+
1327
+ mgr->cbs->poll_hpd_irq(mgr);
1328
+ }
1329
+
1330
+ mutex_lock(&mgr->qlock);
8101331 if (ret > 0) {
8111332 if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) {
8121333 ret = -EIO;
....@@ -820,18 +1341,19 @@
8201341
8211342 /* remove from q */
8221343 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED ||
823
- txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND) {
1344
+ txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
1345
+ txmsg->state == DRM_DP_SIDEBAND_TX_SENT)
8241346 list_del(&txmsg->next);
825
- }
826
-
827
- if (txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
828
- txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
829
- mstb->tx_slots[txmsg->seqno] = NULL;
830
- }
8311347 }
8321348 out:
1349
+ if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) {
1350
+ struct drm_printer p = drm_debug_printer(DBG_PREFIX);
1351
+
1352
+ drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
1353
+ }
8331354 mutex_unlock(&mgr->qlock);
8341355
1356
+ drm_dp_mst_kick_tx(mgr);
8351357 return ret;
8361358 }
8371359
....@@ -847,142 +1369,606 @@
8471369 if (lct > 1)
8481370 memcpy(mstb->rad, rad, lct / 2);
8491371 INIT_LIST_HEAD(&mstb->ports);
850
- kref_init(&mstb->kref);
1372
+ kref_init(&mstb->topology_kref);
1373
+ kref_init(&mstb->malloc_kref);
8511374 return mstb;
8521375 }
8531376
854
-static void drm_dp_free_mst_port(struct kref *kref);
855
-
8561377 static void drm_dp_free_mst_branch_device(struct kref *kref)
8571378 {
858
- struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
859
- if (mstb->port_parent) {
860
- if (list_empty(&mstb->port_parent->next))
861
- kref_put(&mstb->port_parent->kref, drm_dp_free_mst_port);
862
- }
1379
+ struct drm_dp_mst_branch *mstb =
1380
+ container_of(kref, struct drm_dp_mst_branch, malloc_kref);
1381
+
1382
+ if (mstb->port_parent)
1383
+ drm_dp_mst_put_port_malloc(mstb->port_parent);
1384
+
8631385 kfree(mstb);
8641386 }
8651387
1388
+/**
1389
+ * DOC: Branch device and port refcounting
1390
+ *
1391
+ * Topology refcount overview
1392
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~
1393
+ *
1394
+ * The refcounting schemes for &struct drm_dp_mst_branch and &struct
1395
+ * drm_dp_mst_port are somewhat unusual. Both ports and branch devices have
1396
+ * two different kinds of refcounts: topology refcounts, and malloc refcounts.
1397
+ *
1398
+ * Topology refcounts are not exposed to drivers, and are handled internally
1399
+ * by the DP MST helpers. The helpers use them in order to prevent the
1400
+ * in-memory topology state from being changed in the middle of critical
1401
+ * operations like changing the internal state of payload allocations. This
1402
+ * means each branch and port will be considered to be connected to the rest
1403
+ * of the topology until its topology refcount reaches zero. Additionally,
1404
+ * for ports this means that their associated &struct drm_connector will stay
1405
+ * registered with userspace until the port's refcount reaches 0.
1406
+ *
1407
+ * Malloc refcount overview
1408
+ * ~~~~~~~~~~~~~~~~~~~~~~~~
1409
+ *
1410
+ * Malloc references are used to keep a &struct drm_dp_mst_port or &struct
1411
+ * drm_dp_mst_branch allocated even after all of its topology references have
1412
+ * been dropped, so that the driver or MST helpers can safely access each
1413
+ * branch's last known state before it was disconnected from the topology.
1414
+ * When the malloc refcount of a port or branch reaches 0, the memory
1415
+ * allocation containing the &struct drm_dp_mst_branch or &struct
1416
+ * drm_dp_mst_port respectively will be freed.
1417
+ *
1418
+ * For &struct drm_dp_mst_branch, malloc refcounts are not currently exposed
1419
+ * to drivers. As of writing this documentation, there are no drivers that
1420
+ * have a usecase for accessing &struct drm_dp_mst_branch outside of the MST
1421
+ * helpers. Exposing this API to drivers in a race-free manner would take more
1422
+ * tweaking of the refcounting scheme, however patches are welcome provided
1423
+ * there is a legitimate driver usecase for this.
1424
+ *
1425
+ * Refcount relationships in a topology
1426
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1427
+ *
1428
+ * Let's take a look at why the relationship between topology and malloc
1429
+ * refcounts is designed the way it is.
1430
+ *
1431
+ * .. kernel-figure:: dp-mst/topology-figure-1.dot
1432
+ *
1433
+ * An example of topology and malloc refs in a DP MST topology with two
1434
+ * active payloads. Topology refcount increments are indicated by solid
1435
+ * lines, and malloc refcount increments are indicated by dashed lines.
1436
+ * Each starts from the branch which incremented the refcount, and ends at
1437
+ * the branch to which the refcount belongs to, i.e. the arrow points the
1438
+ * same way as the C pointers used to reference a structure.
1439
+ *
1440
+ * As you can see in the above figure, every branch increments the topology
1441
+ * refcount of its children, and increments the malloc refcount of its
1442
+ * parent. Additionally, every payload increments the malloc refcount of its
1443
+ * assigned port by 1.
1444
+ *
1445
+ * So, what would happen if MSTB #3 from the above figure was unplugged from
1446
+ * the system, but the driver hadn't yet removed payload #2 from port #3? The
1447
+ * topology would start to look like the figure below.
1448
+ *
1449
+ * .. kernel-figure:: dp-mst/topology-figure-2.dot
1450
+ *
1451
+ * Ports and branch devices which have been released from memory are
1452
+ * colored grey, and references which have been removed are colored red.
1453
+ *
1454
+ * Whenever a port or branch device's topology refcount reaches zero, it will
1455
+ * decrement the topology refcounts of all its children, the malloc refcount
1456
+ * of its parent, and finally its own malloc refcount. For MSTB #4 and port
1457
+ * #4, this means they both have been disconnected from the topology and freed
1458
+ * from memory. But, because payload #2 is still holding a reference to port
1459
+ * #3, port #3 is removed from the topology but its &struct drm_dp_mst_port
1460
+ * is still accessible from memory. This also means port #3 has not yet
1461
+ * decremented the malloc refcount of MSTB #3, so its &struct
1462
+ * drm_dp_mst_branch will also stay allocated in memory until port #3's
1463
+ * malloc refcount reaches 0.
1464
+ *
1465
+ * This relationship is necessary because in order to release payload #2, we
1466
+ * need to be able to figure out the last relative of port #3 that's still
1467
+ * connected to the topology. In this case, we would travel up the topology as
1468
+ * shown below.
1469
+ *
1470
+ * .. kernel-figure:: dp-mst/topology-figure-3.dot
1471
+ *
1472
+ * And finally, remove payload #2 by communicating with port #2 through
1473
+ * sideband transactions.
1474
+ */
1475
+
1476
+/**
1477
+ * drm_dp_mst_get_mstb_malloc() - Increment the malloc refcount of a branch
1478
+ * device
1479
+ * @mstb: The &struct drm_dp_mst_branch to increment the malloc refcount of
1480
+ *
1481
+ * Increments &drm_dp_mst_branch.malloc_kref. When
1482
+ * &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb
1483
+ * will be released and @mstb may no longer be used.
1484
+ *
1485
+ * See also: drm_dp_mst_put_mstb_malloc()
1486
+ */
1487
+static void
1488
+drm_dp_mst_get_mstb_malloc(struct drm_dp_mst_branch *mstb)
1489
+{
1490
+ kref_get(&mstb->malloc_kref);
1491
+ DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref));
1492
+}
1493
+
1494
+/**
1495
+ * drm_dp_mst_put_mstb_malloc() - Decrement the malloc refcount of a branch
1496
+ * device
1497
+ * @mstb: The &struct drm_dp_mst_branch to decrement the malloc refcount of
1498
+ *
1499
+ * Decrements &drm_dp_mst_branch.malloc_kref. When
1500
+ * &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb
1501
+ * will be released and @mstb may no longer be used.
1502
+ *
1503
+ * See also: drm_dp_mst_get_mstb_malloc()
1504
+ */
1505
+static void
1506
+drm_dp_mst_put_mstb_malloc(struct drm_dp_mst_branch *mstb)
1507
+{
1508
+ DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref) - 1);
1509
+ kref_put(&mstb->malloc_kref, drm_dp_free_mst_branch_device);
1510
+}
1511
+
1512
+static void drm_dp_free_mst_port(struct kref *kref)
1513
+{
1514
+ struct drm_dp_mst_port *port =
1515
+ container_of(kref, struct drm_dp_mst_port, malloc_kref);
1516
+
1517
+ drm_dp_mst_put_mstb_malloc(port->parent);
1518
+ kfree(port);
1519
+}
1520
+
1521
+/**
1522
+ * drm_dp_mst_get_port_malloc() - Increment the malloc refcount of an MST port
1523
+ * @port: The &struct drm_dp_mst_port to increment the malloc refcount of
1524
+ *
1525
+ * Increments &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref
1526
+ * reaches 0, the memory allocation for @port will be released and @port may
1527
+ * no longer be used.
1528
+ *
1529
+ * Because @port could potentially be freed at any time by the DP MST helpers
1530
+ * if &drm_dp_mst_port.malloc_kref reaches 0, including during a call to this
1531
+ * function, drivers that which to make use of &struct drm_dp_mst_port should
1532
+ * ensure that they grab at least one main malloc reference to their MST ports
1533
+ * in &drm_dp_mst_topology_cbs.add_connector. This callback is called before
1534
+ * there is any chance for &drm_dp_mst_port.malloc_kref to reach 0.
1535
+ *
1536
+ * See also: drm_dp_mst_put_port_malloc()
1537
+ */
1538
+void
1539
+drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port)
1540
+{
1541
+ kref_get(&port->malloc_kref);
1542
+ DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->malloc_kref));
1543
+}
1544
+EXPORT_SYMBOL(drm_dp_mst_get_port_malloc);
1545
+
1546
+/**
1547
+ * drm_dp_mst_put_port_malloc() - Decrement the malloc refcount of an MST port
1548
+ * @port: The &struct drm_dp_mst_port to decrement the malloc refcount of
1549
+ *
1550
+ * Decrements &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref
1551
+ * reaches 0, the memory allocation for @port will be released and @port may
1552
+ * no longer be used.
1553
+ *
1554
+ * See also: drm_dp_mst_get_port_malloc()
1555
+ */
1556
+void
1557
+drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port)
1558
+{
1559
+ DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->malloc_kref) - 1);
1560
+ kref_put(&port->malloc_kref, drm_dp_free_mst_port);
1561
+}
1562
+EXPORT_SYMBOL(drm_dp_mst_put_port_malloc);
1563
+
1564
+#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
1565
+
1566
+#define STACK_DEPTH 8
1567
+
1568
+static noinline void
1569
+__topology_ref_save(struct drm_dp_mst_topology_mgr *mgr,
1570
+ struct drm_dp_mst_topology_ref_history *history,
1571
+ enum drm_dp_mst_topology_ref_type type)
1572
+{
1573
+ struct drm_dp_mst_topology_ref_entry *entry = NULL;
1574
+ depot_stack_handle_t backtrace;
1575
+ ulong stack_entries[STACK_DEPTH];
1576
+ uint n;
1577
+ int i;
1578
+
1579
+ n = stack_trace_save(stack_entries, ARRAY_SIZE(stack_entries), 1);
1580
+ backtrace = stack_depot_save(stack_entries, n, GFP_KERNEL);
1581
+ if (!backtrace)
1582
+ return;
1583
+
1584
+ /* Try to find an existing entry for this backtrace */
1585
+ for (i = 0; i < history->len; i++) {
1586
+ if (history->entries[i].backtrace == backtrace) {
1587
+ entry = &history->entries[i];
1588
+ break;
1589
+ }
1590
+ }
1591
+
1592
+ /* Otherwise add one */
1593
+ if (!entry) {
1594
+ struct drm_dp_mst_topology_ref_entry *new;
1595
+ int new_len = history->len + 1;
1596
+
1597
+ new = krealloc(history->entries, sizeof(*new) * new_len,
1598
+ GFP_KERNEL);
1599
+ if (!new)
1600
+ return;
1601
+
1602
+ entry = &new[history->len];
1603
+ history->len = new_len;
1604
+ history->entries = new;
1605
+
1606
+ entry->backtrace = backtrace;
1607
+ entry->type = type;
1608
+ entry->count = 0;
1609
+ }
1610
+ entry->count++;
1611
+ entry->ts_nsec = ktime_get_ns();
1612
+}
1613
+
1614
+static int
1615
+topology_ref_history_cmp(const void *a, const void *b)
1616
+{
1617
+ const struct drm_dp_mst_topology_ref_entry *entry_a = a, *entry_b = b;
1618
+
1619
+ if (entry_a->ts_nsec > entry_b->ts_nsec)
1620
+ return 1;
1621
+ else if (entry_a->ts_nsec < entry_b->ts_nsec)
1622
+ return -1;
1623
+ else
1624
+ return 0;
1625
+}
1626
+
1627
+static inline const char *
1628
+topology_ref_type_to_str(enum drm_dp_mst_topology_ref_type type)
1629
+{
1630
+ if (type == DRM_DP_MST_TOPOLOGY_REF_GET)
1631
+ return "get";
1632
+ else
1633
+ return "put";
1634
+}
1635
+
1636
+static void
1637
+__dump_topology_ref_history(struct drm_dp_mst_topology_ref_history *history,
1638
+ void *ptr, const char *type_str)
1639
+{
1640
+ struct drm_printer p = drm_debug_printer(DBG_PREFIX);
1641
+ char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
1642
+ int i;
1643
+
1644
+ if (!buf)
1645
+ return;
1646
+
1647
+ if (!history->len)
1648
+ goto out;
1649
+
1650
+ /* First, sort the list so that it goes from oldest to newest
1651
+ * reference entry
1652
+ */
1653
+ sort(history->entries, history->len, sizeof(*history->entries),
1654
+ topology_ref_history_cmp, NULL);
1655
+
1656
+ drm_printf(&p, "%s (%p) topology count reached 0, dumping history:\n",
1657
+ type_str, ptr);
1658
+
1659
+ for (i = 0; i < history->len; i++) {
1660
+ const struct drm_dp_mst_topology_ref_entry *entry =
1661
+ &history->entries[i];
1662
+ ulong *entries;
1663
+ uint nr_entries;
1664
+ u64 ts_nsec = entry->ts_nsec;
1665
+ u32 rem_nsec = do_div(ts_nsec, 1000000000);
1666
+
1667
+ nr_entries = stack_depot_fetch(entry->backtrace, &entries);
1668
+ stack_trace_snprint(buf, PAGE_SIZE, entries, nr_entries, 4);
1669
+
1670
+ drm_printf(&p, " %d %ss (last at %5llu.%06u):\n%s",
1671
+ entry->count,
1672
+ topology_ref_type_to_str(entry->type),
1673
+ ts_nsec, rem_nsec / 1000, buf);
1674
+ }
1675
+
1676
+ /* Now free the history, since this is the only time we expose it */
1677
+ kfree(history->entries);
1678
+out:
1679
+ kfree(buf);
1680
+}
1681
+
1682
+static __always_inline void
1683
+drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb)
1684
+{
1685
+ __dump_topology_ref_history(&mstb->topology_ref_history, mstb,
1686
+ "MSTB");
1687
+}
1688
+
1689
+static __always_inline void
1690
+drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port)
1691
+{
1692
+ __dump_topology_ref_history(&port->topology_ref_history, port,
1693
+ "Port");
1694
+}
1695
+
1696
+static __always_inline void
1697
+save_mstb_topology_ref(struct drm_dp_mst_branch *mstb,
1698
+ enum drm_dp_mst_topology_ref_type type)
1699
+{
1700
+ __topology_ref_save(mstb->mgr, &mstb->topology_ref_history, type);
1701
+}
1702
+
1703
+static __always_inline void
1704
+save_port_topology_ref(struct drm_dp_mst_port *port,
1705
+ enum drm_dp_mst_topology_ref_type type)
1706
+{
1707
+ __topology_ref_save(port->mgr, &port->topology_ref_history, type);
1708
+}
1709
+
1710
+static inline void
1711
+topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr)
1712
+{
1713
+ mutex_lock(&mgr->topology_ref_history_lock);
1714
+}
1715
+
1716
+static inline void
1717
+topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr)
1718
+{
1719
+ mutex_unlock(&mgr->topology_ref_history_lock);
1720
+}
1721
+#else
1722
+static inline void
1723
+topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr) {}
1724
+static inline void
1725
+topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr) {}
1726
+static inline void
1727
+drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb) {}
1728
+static inline void
1729
+drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port) {}
1730
+#define save_mstb_topology_ref(mstb, type)
1731
+#define save_port_topology_ref(port, type)
1732
+#endif
1733
+
8661734 static void drm_dp_destroy_mst_branch_device(struct kref *kref)
8671735 {
868
- struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
869
- struct drm_dp_mst_port *port, *tmp;
870
- bool wake_tx = false;
1736
+ struct drm_dp_mst_branch *mstb =
1737
+ container_of(kref, struct drm_dp_mst_branch, topology_kref);
1738
+ struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
1739
+
1740
+ drm_dp_mst_dump_mstb_topology_history(mstb);
1741
+
1742
+ INIT_LIST_HEAD(&mstb->destroy_next);
8711743
8721744 /*
873
- * init kref again to be used by ports to remove mst branch when it is
874
- * not needed anymore
1745
+ * This can get called under mgr->mutex, so we need to perform the
1746
+ * actual destruction of the mstb in another worker
8751747 */
876
- kref_init(kref);
877
-
878
- if (mstb->port_parent && list_empty(&mstb->port_parent->next))
879
- kref_get(&mstb->port_parent->kref);
880
-
881
- /*
882
- * destroy all ports - don't need lock
883
- * as there are no more references to the mst branch
884
- * device at this point.
885
- */
886
- list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
887
- list_del(&port->next);
888
- drm_dp_put_port(port);
889
- }
890
-
891
- /* drop any tx slots msg */
892
- mutex_lock(&mstb->mgr->qlock);
893
- if (mstb->tx_slots[0]) {
894
- mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
895
- mstb->tx_slots[0] = NULL;
896
- wake_tx = true;
897
- }
898
- if (mstb->tx_slots[1]) {
899
- mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
900
- mstb->tx_slots[1] = NULL;
901
- wake_tx = true;
902
- }
903
- mutex_unlock(&mstb->mgr->qlock);
904
-
905
- if (wake_tx)
906
- wake_up_all(&mstb->mgr->tx_waitq);
907
-
908
- kref_put(kref, drm_dp_free_mst_branch_device);
1748
+ mutex_lock(&mgr->delayed_destroy_lock);
1749
+ list_add(&mstb->destroy_next, &mgr->destroy_branch_device_list);
1750
+ mutex_unlock(&mgr->delayed_destroy_lock);
1751
+ queue_work(mgr->delayed_destroy_wq, &mgr->delayed_destroy_work);
9091752 }
9101753
911
-static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch *mstb)
1754
+/**
1755
+ * drm_dp_mst_topology_try_get_mstb() - Increment the topology refcount of a
1756
+ * branch device unless it's zero
1757
+ * @mstb: &struct drm_dp_mst_branch to increment the topology refcount of
1758
+ *
1759
+ * Attempts to grab a topology reference to @mstb, if it hasn't yet been
1760
+ * removed from the topology (e.g. &drm_dp_mst_branch.topology_kref has
1761
+ * reached 0). Holding a topology reference implies that a malloc reference
1762
+ * will be held to @mstb as long as the user holds the topology reference.
1763
+ *
1764
+ * Care should be taken to ensure that the user has at least one malloc
1765
+ * reference to @mstb. If you already have a topology reference to @mstb, you
1766
+ * should use drm_dp_mst_topology_get_mstb() instead.
1767
+ *
1768
+ * See also:
1769
+ * drm_dp_mst_topology_get_mstb()
1770
+ * drm_dp_mst_topology_put_mstb()
1771
+ *
1772
+ * Returns:
1773
+ * * 1: A topology reference was grabbed successfully
1774
+ * * 0: @port is no longer in the topology, no reference was grabbed
1775
+ */
1776
+static int __must_check
1777
+drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch *mstb)
9121778 {
913
- kref_put(&mstb->kref, drm_dp_destroy_mst_branch_device);
1779
+ int ret;
1780
+
1781
+ topology_ref_history_lock(mstb->mgr);
1782
+ ret = kref_get_unless_zero(&mstb->topology_kref);
1783
+ if (ret) {
1784
+ DRM_DEBUG("mstb %p (%d)\n",
1785
+ mstb, kref_read(&mstb->topology_kref));
1786
+ save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET);
1787
+ }
1788
+
1789
+ topology_ref_history_unlock(mstb->mgr);
1790
+
1791
+ return ret;
9141792 }
9151793
916
-
917
-static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt)
1794
+/**
1795
+ * drm_dp_mst_topology_get_mstb() - Increment the topology refcount of a
1796
+ * branch device
1797
+ * @mstb: The &struct drm_dp_mst_branch to increment the topology refcount of
1798
+ *
1799
+ * Increments &drm_dp_mst_branch.topology_refcount without checking whether or
1800
+ * not it's already reached 0. This is only valid to use in scenarios where
1801
+ * you are already guaranteed to have at least one active topology reference
1802
+ * to @mstb. Otherwise, drm_dp_mst_topology_try_get_mstb() must be used.
1803
+ *
1804
+ * See also:
1805
+ * drm_dp_mst_topology_try_get_mstb()
1806
+ * drm_dp_mst_topology_put_mstb()
1807
+ */
1808
+static void drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch *mstb)
9181809 {
919
- struct drm_dp_mst_branch *mstb;
1810
+ topology_ref_history_lock(mstb->mgr);
9201811
921
- switch (old_pdt) {
922
- case DP_PEER_DEVICE_DP_LEGACY_CONV:
923
- case DP_PEER_DEVICE_SST_SINK:
924
- /* remove i2c over sideband */
925
- drm_dp_mst_unregister_i2c_bus(&port->aux);
926
- break;
927
- case DP_PEER_DEVICE_MST_BRANCHING:
928
- mstb = port->mstb;
929
- port->mstb = NULL;
930
- drm_dp_put_mst_branch_device(mstb);
931
- break;
932
- }
1812
+ save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET);
1813
+ WARN_ON(kref_read(&mstb->topology_kref) == 0);
1814
+ kref_get(&mstb->topology_kref);
1815
+ DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref));
1816
+
1817
+ topology_ref_history_unlock(mstb->mgr);
1818
+}
1819
+
1820
+/**
1821
+ * drm_dp_mst_topology_put_mstb() - release a topology reference to a branch
1822
+ * device
1823
+ * @mstb: The &struct drm_dp_mst_branch to release the topology reference from
1824
+ *
1825
+ * Releases a topology reference from @mstb by decrementing
1826
+ * &drm_dp_mst_branch.topology_kref.
1827
+ *
1828
+ * See also:
1829
+ * drm_dp_mst_topology_try_get_mstb()
1830
+ * drm_dp_mst_topology_get_mstb()
1831
+ */
1832
+static void
1833
+drm_dp_mst_topology_put_mstb(struct drm_dp_mst_branch *mstb)
1834
+{
1835
+ topology_ref_history_lock(mstb->mgr);
1836
+
1837
+ DRM_DEBUG("mstb %p (%d)\n",
1838
+ mstb, kref_read(&mstb->topology_kref) - 1);
1839
+ save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_PUT);
1840
+
1841
+ topology_ref_history_unlock(mstb->mgr);
1842
+ kref_put(&mstb->topology_kref, drm_dp_destroy_mst_branch_device);
9331843 }
9341844
9351845 static void drm_dp_destroy_port(struct kref *kref)
9361846 {
937
- struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
1847
+ struct drm_dp_mst_port *port =
1848
+ container_of(kref, struct drm_dp_mst_port, topology_kref);
9381849 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
9391850
940
- if (!port->input) {
941
- port->vcpi.num_slots = 0;
1851
+ drm_dp_mst_dump_port_topology_history(port);
9421852
943
- kfree(port->cached_edid);
944
-
945
- /*
946
- * The only time we don't have a connector
947
- * on an output port is if the connector init
948
- * fails.
949
- */
950
- if (port->connector) {
951
- /* we can't destroy the connector here, as
952
- * we might be holding the mode_config.mutex
953
- * from an EDID retrieval */
954
-
955
- mutex_lock(&mgr->destroy_connector_lock);
956
- kref_get(&port->parent->kref);
957
- list_add(&port->next, &mgr->destroy_connector_list);
958
- mutex_unlock(&mgr->destroy_connector_lock);
959
- schedule_work(&mgr->destroy_connector_work);
960
- return;
961
- }
962
- /* no need to clean up vcpi
963
- * as if we have no connector we never setup a vcpi */
964
- drm_dp_port_teardown_pdt(port, port->pdt);
965
- port->pdt = DP_PEER_DEVICE_NONE;
1853
+ /* There's nothing that needs locking to destroy an input port yet */
1854
+ if (port->input) {
1855
+ drm_dp_mst_put_port_malloc(port);
1856
+ return;
9661857 }
967
- kfree(port);
1858
+
1859
+ kfree(port->cached_edid);
1860
+
1861
+ /*
1862
+ * we can't destroy the connector here, as we might be holding the
1863
+ * mode_config.mutex from an EDID retrieval
1864
+ */
1865
+ mutex_lock(&mgr->delayed_destroy_lock);
1866
+ list_add(&port->next, &mgr->destroy_port_list);
1867
+ mutex_unlock(&mgr->delayed_destroy_lock);
1868
+ queue_work(mgr->delayed_destroy_wq, &mgr->delayed_destroy_work);
9681869 }
9691870
970
-static void drm_dp_put_port(struct drm_dp_mst_port *port)
1871
+/**
1872
+ * drm_dp_mst_topology_try_get_port() - Increment the topology refcount of a
1873
+ * port unless it's zero
1874
+ * @port: &struct drm_dp_mst_port to increment the topology refcount of
1875
+ *
1876
+ * Attempts to grab a topology reference to @port, if it hasn't yet been
1877
+ * removed from the topology (e.g. &drm_dp_mst_port.topology_kref has reached
1878
+ * 0). Holding a topology reference implies that a malloc reference will be
1879
+ * held to @port as long as the user holds the topology reference.
1880
+ *
1881
+ * Care should be taken to ensure that the user has at least one malloc
1882
+ * reference to @port. If you already have a topology reference to @port, you
1883
+ * should use drm_dp_mst_topology_get_port() instead.
1884
+ *
1885
+ * See also:
1886
+ * drm_dp_mst_topology_get_port()
1887
+ * drm_dp_mst_topology_put_port()
1888
+ *
1889
+ * Returns:
1890
+ * * 1: A topology reference was grabbed successfully
1891
+ * * 0: @port is no longer in the topology, no reference was grabbed
1892
+ */
1893
+static int __must_check
1894
+drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port *port)
9711895 {
972
- kref_put(&port->kref, drm_dp_destroy_port);
1896
+ int ret;
1897
+
1898
+ topology_ref_history_lock(port->mgr);
1899
+ ret = kref_get_unless_zero(&port->topology_kref);
1900
+ if (ret) {
1901
+ DRM_DEBUG("port %p (%d)\n",
1902
+ port, kref_read(&port->topology_kref));
1903
+ save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET);
1904
+ }
1905
+
1906
+ topology_ref_history_unlock(port->mgr);
1907
+ return ret;
9731908 }
9741909
975
-static struct drm_dp_mst_branch *drm_dp_mst_get_validated_mstb_ref_locked(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_branch *to_find)
1910
+/**
1911
+ * drm_dp_mst_topology_get_port() - Increment the topology refcount of a port
1912
+ * @port: The &struct drm_dp_mst_port to increment the topology refcount of
1913
+ *
1914
+ * Increments &drm_dp_mst_port.topology_refcount without checking whether or
1915
+ * not it's already reached 0. This is only valid to use in scenarios where
1916
+ * you are already guaranteed to have at least one active topology reference
1917
+ * to @port. Otherwise, drm_dp_mst_topology_try_get_port() must be used.
1918
+ *
1919
+ * See also:
1920
+ * drm_dp_mst_topology_try_get_port()
1921
+ * drm_dp_mst_topology_put_port()
1922
+ */
1923
+static void drm_dp_mst_topology_get_port(struct drm_dp_mst_port *port)
1924
+{
1925
+ topology_ref_history_lock(port->mgr);
1926
+
1927
+ WARN_ON(kref_read(&port->topology_kref) == 0);
1928
+ kref_get(&port->topology_kref);
1929
+ DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->topology_kref));
1930
+ save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET);
1931
+
1932
+ topology_ref_history_unlock(port->mgr);
1933
+}
1934
+
1935
+/**
1936
+ * drm_dp_mst_topology_put_port() - release a topology reference to a port
1937
+ * @port: The &struct drm_dp_mst_port to release the topology reference from
1938
+ *
1939
+ * Releases a topology reference from @port by decrementing
1940
+ * &drm_dp_mst_port.topology_kref.
1941
+ *
1942
+ * See also:
1943
+ * drm_dp_mst_topology_try_get_port()
1944
+ * drm_dp_mst_topology_get_port()
1945
+ */
1946
+static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port)
1947
+{
1948
+ topology_ref_history_lock(port->mgr);
1949
+
1950
+ DRM_DEBUG("port %p (%d)\n",
1951
+ port, kref_read(&port->topology_kref) - 1);
1952
+ save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_PUT);
1953
+
1954
+ topology_ref_history_unlock(port->mgr);
1955
+ kref_put(&port->topology_kref, drm_dp_destroy_port);
1956
+}
1957
+
1958
+static struct drm_dp_mst_branch *
1959
+drm_dp_mst_topology_get_mstb_validated_locked(struct drm_dp_mst_branch *mstb,
1960
+ struct drm_dp_mst_branch *to_find)
9761961 {
9771962 struct drm_dp_mst_port *port;
9781963 struct drm_dp_mst_branch *rmstb;
979
- if (to_find == mstb) {
980
- kref_get(&mstb->kref);
1964
+
1965
+ if (to_find == mstb)
9811966 return mstb;
982
- }
1967
+
9831968 list_for_each_entry(port, &mstb->ports, next) {
9841969 if (port->mstb) {
985
- rmstb = drm_dp_mst_get_validated_mstb_ref_locked(port->mstb, to_find);
1970
+ rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
1971
+ port->mstb, to_find);
9861972 if (rmstb)
9871973 return rmstb;
9881974 }
....@@ -990,27 +1976,37 @@
9901976 return NULL;
9911977 }
9921978
993
-static struct drm_dp_mst_branch *drm_dp_get_validated_mstb_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_branch *mstb)
1979
+static struct drm_dp_mst_branch *
1980
+drm_dp_mst_topology_get_mstb_validated(struct drm_dp_mst_topology_mgr *mgr,
1981
+ struct drm_dp_mst_branch *mstb)
9941982 {
9951983 struct drm_dp_mst_branch *rmstb = NULL;
1984
+
9961985 mutex_lock(&mgr->lock);
997
- if (mgr->mst_primary)
998
- rmstb = drm_dp_mst_get_validated_mstb_ref_locked(mgr->mst_primary, mstb);
1986
+ if (mgr->mst_primary) {
1987
+ rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
1988
+ mgr->mst_primary, mstb);
1989
+
1990
+ if (rmstb && !drm_dp_mst_topology_try_get_mstb(rmstb))
1991
+ rmstb = NULL;
1992
+ }
9991993 mutex_unlock(&mgr->lock);
10001994 return rmstb;
10011995 }
10021996
1003
-static struct drm_dp_mst_port *drm_dp_mst_get_port_ref_locked(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_port *to_find)
1997
+static struct drm_dp_mst_port *
1998
+drm_dp_mst_topology_get_port_validated_locked(struct drm_dp_mst_branch *mstb,
1999
+ struct drm_dp_mst_port *to_find)
10042000 {
10052001 struct drm_dp_mst_port *port, *mport;
10062002
10072003 list_for_each_entry(port, &mstb->ports, next) {
1008
- if (port == to_find) {
1009
- kref_get(&port->kref);
2004
+ if (port == to_find)
10102005 return port;
1011
- }
2006
+
10122007 if (port->mstb) {
1013
- mport = drm_dp_mst_get_port_ref_locked(port->mstb, to_find);
2008
+ mport = drm_dp_mst_topology_get_port_validated_locked(
2009
+ port->mstb, to_find);
10142010 if (mport)
10152011 return mport;
10162012 }
....@@ -1018,12 +2014,20 @@
10182014 return NULL;
10192015 }
10202016
1021
-static struct drm_dp_mst_port *drm_dp_get_validated_port_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2017
+static struct drm_dp_mst_port *
2018
+drm_dp_mst_topology_get_port_validated(struct drm_dp_mst_topology_mgr *mgr,
2019
+ struct drm_dp_mst_port *port)
10222020 {
10232021 struct drm_dp_mst_port *rport = NULL;
2022
+
10242023 mutex_lock(&mgr->lock);
1025
- if (mgr->mst_primary)
1026
- rport = drm_dp_mst_get_port_ref_locked(mgr->mst_primary, port);
2024
+ if (mgr->mst_primary) {
2025
+ rport = drm_dp_mst_topology_get_port_validated_locked(
2026
+ mgr->mst_primary, port);
2027
+
2028
+ if (rport && !drm_dp_mst_topology_try_get_port(rport))
2029
+ rport = NULL;
2030
+ }
10272031 mutex_unlock(&mgr->lock);
10282032 return rport;
10292033 }
....@@ -1031,11 +2035,12 @@
10312035 static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num)
10322036 {
10332037 struct drm_dp_mst_port *port;
2038
+ int ret;
10342039
10352040 list_for_each_entry(port, &mstb->ports, next) {
10362041 if (port->port_num == port_num) {
1037
- kref_get(&port->kref);
1038
- return port;
2042
+ ret = drm_dp_mst_topology_try_get_port(port);
2043
+ return ret ? port : NULL;
10392044 }
10402045 }
10412046
....@@ -1053,6 +2058,7 @@
10532058 int parent_lct = port->parent->lct;
10542059 int shift = 4;
10552060 int idx = (parent_lct - 1) / 2;
2061
+
10562062 if (parent_lct > 1) {
10572063 memcpy(rad, port->parent->rad, idx + 1);
10582064 shift = (parent_lct % 2) ? 4 : 0;
....@@ -1063,58 +2069,165 @@
10632069 return parent_lct + 1;
10642070 }
10652071
1066
-/*
1067
- * return sends link address for new mstb
1068
- */
1069
-static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port)
2072
+static bool drm_dp_mst_is_end_device(u8 pdt, bool mcs)
10702073 {
1071
- int ret;
1072
- u8 rad[6], lct;
1073
- bool send_link = false;
1074
- switch (port->pdt) {
2074
+ switch (pdt) {
10752075 case DP_PEER_DEVICE_DP_LEGACY_CONV:
10762076 case DP_PEER_DEVICE_SST_SINK:
1077
- /* add i2c over sideband */
1078
- ret = drm_dp_mst_register_i2c_bus(&port->aux);
1079
- break;
2077
+ return true;
10802078 case DP_PEER_DEVICE_MST_BRANCHING:
1081
- lct = drm_dp_calculate_rad(port, rad);
2079
+ /* For sst branch device */
2080
+ if (!mcs)
2081
+ return true;
10822082
1083
- port->mstb = drm_dp_add_mst_branch_device(lct, rad);
1084
- if (port->mstb) {
1085
- port->mstb->mgr = port->mgr;
1086
- port->mstb->port_parent = port;
1087
-
1088
- send_link = true;
1089
- }
1090
- break;
2083
+ return false;
10912084 }
1092
- return send_link;
2085
+ return true;
10932086 }
10942087
1095
-static void drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
2088
+static int
2089
+drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt,
2090
+ bool new_mcs)
10962091 {
1097
- int ret;
2092
+ struct drm_dp_mst_topology_mgr *mgr = port->mgr;
2093
+ struct drm_dp_mst_branch *mstb;
2094
+ u8 rad[8], lct;
2095
+ int ret = 0;
2096
+
2097
+ if (port->pdt == new_pdt && port->mcs == new_mcs)
2098
+ return 0;
2099
+
2100
+ /* Teardown the old pdt, if there is one */
2101
+ if (port->pdt != DP_PEER_DEVICE_NONE) {
2102
+ if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
2103
+ /*
2104
+ * If the new PDT would also have an i2c bus,
2105
+ * don't bother with reregistering it
2106
+ */
2107
+ if (new_pdt != DP_PEER_DEVICE_NONE &&
2108
+ drm_dp_mst_is_end_device(new_pdt, new_mcs)) {
2109
+ port->pdt = new_pdt;
2110
+ port->mcs = new_mcs;
2111
+ return 0;
2112
+ }
2113
+
2114
+ /* remove i2c over sideband */
2115
+ drm_dp_mst_unregister_i2c_bus(port);
2116
+ } else {
2117
+ mutex_lock(&mgr->lock);
2118
+ drm_dp_mst_topology_put_mstb(port->mstb);
2119
+ port->mstb = NULL;
2120
+ mutex_unlock(&mgr->lock);
2121
+ }
2122
+ }
2123
+
2124
+ port->pdt = new_pdt;
2125
+ port->mcs = new_mcs;
2126
+
2127
+ if (port->pdt != DP_PEER_DEVICE_NONE) {
2128
+ if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
2129
+ /* add i2c over sideband */
2130
+ ret = drm_dp_mst_register_i2c_bus(port);
2131
+ } else {
2132
+ lct = drm_dp_calculate_rad(port, rad);
2133
+ mstb = drm_dp_add_mst_branch_device(lct, rad);
2134
+ if (!mstb) {
2135
+ ret = -ENOMEM;
2136
+ DRM_ERROR("Failed to create MSTB for port %p",
2137
+ port);
2138
+ goto out;
2139
+ }
2140
+
2141
+ mutex_lock(&mgr->lock);
2142
+ port->mstb = mstb;
2143
+ mstb->mgr = port->mgr;
2144
+ mstb->port_parent = port;
2145
+
2146
+ /*
2147
+ * Make sure this port's memory allocation stays
2148
+ * around until its child MSTB releases it
2149
+ */
2150
+ drm_dp_mst_get_port_malloc(port);
2151
+ mutex_unlock(&mgr->lock);
2152
+
2153
+ /* And make sure we send a link address for this */
2154
+ ret = 1;
2155
+ }
2156
+ }
2157
+
2158
+out:
2159
+ if (ret < 0)
2160
+ port->pdt = DP_PEER_DEVICE_NONE;
2161
+ return ret;
2162
+}
2163
+
2164
+/**
2165
+ * drm_dp_mst_dpcd_read() - read a series of bytes from the DPCD via sideband
2166
+ * @aux: Fake sideband AUX CH
2167
+ * @offset: address of the (first) register to read
2168
+ * @buffer: buffer to store the register values
2169
+ * @size: number of bytes in @buffer
2170
+ *
2171
+ * Performs the same functionality for remote devices via
2172
+ * sideband messaging as drm_dp_dpcd_read() does for local
2173
+ * devices via actual AUX CH.
2174
+ *
2175
+ * Return: Number of bytes read, or negative error code on failure.
2176
+ */
2177
+ssize_t drm_dp_mst_dpcd_read(struct drm_dp_aux *aux,
2178
+ unsigned int offset, void *buffer, size_t size)
2179
+{
2180
+ struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port,
2181
+ aux);
2182
+
2183
+ return drm_dp_send_dpcd_read(port->mgr, port,
2184
+ offset, size, buffer);
2185
+}
2186
+
2187
+/**
2188
+ * drm_dp_mst_dpcd_write() - write a series of bytes to the DPCD via sideband
2189
+ * @aux: Fake sideband AUX CH
2190
+ * @offset: address of the (first) register to write
2191
+ * @buffer: buffer containing the values to write
2192
+ * @size: number of bytes in @buffer
2193
+ *
2194
+ * Performs the same functionality for remote devices via
2195
+ * sideband messaging as drm_dp_dpcd_write() does for local
2196
+ * devices via actual AUX CH.
2197
+ *
2198
+ * Return: number of bytes written on success, negative error code on failure.
2199
+ */
2200
+ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux,
2201
+ unsigned int offset, void *buffer, size_t size)
2202
+{
2203
+ struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port,
2204
+ aux);
2205
+
2206
+ return drm_dp_send_dpcd_write(port->mgr, port,
2207
+ offset, size, buffer);
2208
+}
2209
+
2210
+static int drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
2211
+{
2212
+ int ret = 0;
10982213
10992214 memcpy(mstb->guid, guid, 16);
11002215
11012216 if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) {
11022217 if (mstb->port_parent) {
1103
- ret = drm_dp_send_dpcd_write(
1104
- mstb->mgr,
1105
- mstb->port_parent,
1106
- DP_GUID,
1107
- 16,
1108
- mstb->guid);
2218
+ ret = drm_dp_send_dpcd_write(mstb->mgr,
2219
+ mstb->port_parent,
2220
+ DP_GUID, 16, mstb->guid);
11092221 } else {
1110
-
1111
- ret = drm_dp_dpcd_write(
1112
- mstb->mgr->aux,
1113
- DP_GUID,
1114
- mstb->guid,
1115
- 16);
2222
+ ret = drm_dp_dpcd_write(mstb->mgr->aux,
2223
+ DP_GUID, mstb->guid, 16);
11162224 }
11172225 }
2226
+
2227
+ if (ret < 16 && ret > 0)
2228
+ return -EPROTO;
2229
+
2230
+ return ret == 16 ? 0 : ret;
11182231 }
11192232
11202233 static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
....@@ -1124,10 +2237,12 @@
11242237 {
11252238 int i;
11262239 char temp[8];
2240
+
11272241 snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id);
11282242 for (i = 0; i < (mstb->lct - 1); i++) {
11292243 int shift = (i % 2) ? 0 : 4;
11302244 int port_num = (mstb->rad[i / 2] >> shift) & 0xf;
2245
+
11312246 snprintf(temp, sizeof(temp), "-%d", port_num);
11322247 strlcat(proppath, temp, proppath_size);
11332248 }
....@@ -1135,35 +2250,177 @@
11352250 strlcat(proppath, temp, proppath_size);
11362251 }
11372252
1138
-static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
1139
- struct drm_device *dev,
1140
- struct drm_dp_link_addr_reply_port *port_msg)
2253
+/**
2254
+ * drm_dp_mst_connector_late_register() - Late MST connector registration
2255
+ * @connector: The MST connector
2256
+ * @port: The MST port for this connector
2257
+ *
2258
+ * Helper to register the remote aux device for this MST port. Drivers should
2259
+ * call this from their mst connector's late_register hook to enable MST aux
2260
+ * devices.
2261
+ *
2262
+ * Return: 0 on success, negative error code on failure.
2263
+ */
2264
+int drm_dp_mst_connector_late_register(struct drm_connector *connector,
2265
+ struct drm_dp_mst_port *port)
11412266 {
1142
- struct drm_dp_mst_port *port;
1143
- bool ret;
1144
- bool created = false;
1145
- int old_pdt = 0;
1146
- int old_ddps = 0;
1147
- port = drm_dp_get_port(mstb, port_msg->port_number);
1148
- if (!port) {
1149
- port = kzalloc(sizeof(*port), GFP_KERNEL);
1150
- if (!port)
1151
- return;
1152
- kref_init(&port->kref);
1153
- port->parent = mstb;
1154
- port->port_num = port_msg->port_number;
1155
- port->mgr = mstb->mgr;
1156
- port->aux.name = "DPMST";
1157
- port->aux.dev = dev->dev;
1158
- created = true;
1159
- } else {
1160
- old_pdt = port->pdt;
1161
- old_ddps = port->ddps;
2267
+ DRM_DEBUG_KMS("registering %s remote bus for %s\n",
2268
+ port->aux.name, connector->kdev->kobj.name);
2269
+
2270
+ port->aux.dev = connector->kdev;
2271
+ return drm_dp_aux_register_devnode(&port->aux);
2272
+}
2273
+EXPORT_SYMBOL(drm_dp_mst_connector_late_register);
2274
+
2275
+/**
2276
+ * drm_dp_mst_connector_early_unregister() - Early MST connector unregistration
2277
+ * @connector: The MST connector
2278
+ * @port: The MST port for this connector
2279
+ *
2280
+ * Helper to unregister the remote aux device for this MST port, registered by
2281
+ * drm_dp_mst_connector_late_register(). Drivers should call this from their mst
2282
+ * connector's early_unregister hook.
2283
+ */
2284
+void drm_dp_mst_connector_early_unregister(struct drm_connector *connector,
2285
+ struct drm_dp_mst_port *port)
2286
+{
2287
+ DRM_DEBUG_KMS("unregistering %s remote bus for %s\n",
2288
+ port->aux.name, connector->kdev->kobj.name);
2289
+ drm_dp_aux_unregister_devnode(&port->aux);
2290
+}
2291
+EXPORT_SYMBOL(drm_dp_mst_connector_early_unregister);
2292
+
2293
+static void
2294
+drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb,
2295
+ struct drm_dp_mst_port *port)
2296
+{
2297
+ struct drm_dp_mst_topology_mgr *mgr = port->mgr;
2298
+ char proppath[255];
2299
+ int ret;
2300
+
2301
+ build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath));
2302
+ port->connector = mgr->cbs->add_connector(mgr, port, proppath);
2303
+ if (!port->connector) {
2304
+ ret = -ENOMEM;
2305
+ goto error;
11622306 }
11632307
1164
- port->pdt = port_msg->peer_device_type;
2308
+ if (port->pdt != DP_PEER_DEVICE_NONE &&
2309
+ drm_dp_mst_is_end_device(port->pdt, port->mcs) &&
2310
+ port->port_num >= DP_MST_LOGICAL_PORT_0) {
2311
+ port->cached_edid = drm_get_edid(port->connector,
2312
+ &port->aux.ddc);
2313
+ drm_connector_set_tile_property(port->connector);
2314
+ }
2315
+
2316
+ drm_connector_register(port->connector);
2317
+ return;
2318
+
2319
+error:
2320
+ DRM_ERROR("Failed to create connector for port %p: %d\n", port, ret);
2321
+}
2322
+
2323
+/*
2324
+ * Drop a topology reference, and unlink the port from the in-memory topology
2325
+ * layout
2326
+ */
2327
+static void
2328
+drm_dp_mst_topology_unlink_port(struct drm_dp_mst_topology_mgr *mgr,
2329
+ struct drm_dp_mst_port *port)
2330
+{
2331
+ mutex_lock(&mgr->lock);
2332
+ port->parent->num_ports--;
2333
+ list_del(&port->next);
2334
+ mutex_unlock(&mgr->lock);
2335
+ drm_dp_mst_topology_put_port(port);
2336
+}
2337
+
2338
+static struct drm_dp_mst_port *
2339
+drm_dp_mst_add_port(struct drm_device *dev,
2340
+ struct drm_dp_mst_topology_mgr *mgr,
2341
+ struct drm_dp_mst_branch *mstb, u8 port_number)
2342
+{
2343
+ struct drm_dp_mst_port *port = kzalloc(sizeof(*port), GFP_KERNEL);
2344
+
2345
+ if (!port)
2346
+ return NULL;
2347
+
2348
+ kref_init(&port->topology_kref);
2349
+ kref_init(&port->malloc_kref);
2350
+ port->parent = mstb;
2351
+ port->port_num = port_number;
2352
+ port->mgr = mgr;
2353
+ port->aux.name = "DPMST";
2354
+ port->aux.dev = dev->dev;
2355
+ port->aux.is_remote = true;
2356
+
2357
+ /* initialize the MST downstream port's AUX crc work queue */
2358
+ drm_dp_remote_aux_init(&port->aux);
2359
+
2360
+ /*
2361
+ * Make sure the memory allocation for our parent branch stays
2362
+ * around until our own memory allocation is released
2363
+ */
2364
+ drm_dp_mst_get_mstb_malloc(mstb);
2365
+
2366
+ return port;
2367
+}
2368
+
2369
+static int
2370
+drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
2371
+ struct drm_device *dev,
2372
+ struct drm_dp_link_addr_reply_port *port_msg)
2373
+{
2374
+ struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
2375
+ struct drm_dp_mst_port *port;
2376
+ int old_ddps = 0, ret;
2377
+ u8 new_pdt = DP_PEER_DEVICE_NONE;
2378
+ bool new_mcs = 0;
2379
+ bool created = false, send_link_addr = false, changed = false;
2380
+
2381
+ port = drm_dp_get_port(mstb, port_msg->port_number);
2382
+ if (!port) {
2383
+ port = drm_dp_mst_add_port(dev, mgr, mstb,
2384
+ port_msg->port_number);
2385
+ if (!port)
2386
+ return -ENOMEM;
2387
+ created = true;
2388
+ changed = true;
2389
+ } else if (!port->input && port_msg->input_port && port->connector) {
2390
+ /* Since port->connector can't be changed here, we create a
2391
+ * new port if input_port changes from 0 to 1
2392
+ */
2393
+ drm_dp_mst_topology_unlink_port(mgr, port);
2394
+ drm_dp_mst_topology_put_port(port);
2395
+ port = drm_dp_mst_add_port(dev, mgr, mstb,
2396
+ port_msg->port_number);
2397
+ if (!port)
2398
+ return -ENOMEM;
2399
+ changed = true;
2400
+ created = true;
2401
+ } else if (port->input && !port_msg->input_port) {
2402
+ changed = true;
2403
+ } else if (port->connector) {
2404
+ /* We're updating a port that's exposed to userspace, so do it
2405
+ * under lock
2406
+ */
2407
+ drm_modeset_lock(&mgr->base.lock, NULL);
2408
+
2409
+ old_ddps = port->ddps;
2410
+ changed = port->ddps != port_msg->ddps ||
2411
+ (port->ddps &&
2412
+ (port->ldps != port_msg->legacy_device_plug_status ||
2413
+ port->dpcd_rev != port_msg->dpcd_revision ||
2414
+ port->mcs != port_msg->mcs ||
2415
+ port->pdt != port_msg->peer_device_type ||
2416
+ port->num_sdp_stream_sinks !=
2417
+ port_msg->num_sdp_stream_sinks));
2418
+ }
2419
+
11652420 port->input = port_msg->input_port;
1166
- port->mcs = port_msg->mcs;
2421
+ if (!port->input)
2422
+ new_pdt = port_msg->peer_device_type;
2423
+ new_mcs = port_msg->mcs;
11672424 port->ddps = port_msg->ddps;
11682425 port->ldps = port_msg->legacy_device_plug_status;
11692426 port->dpcd_rev = port_msg->dpcd_revision;
....@@ -1173,93 +2430,141 @@
11732430 /* manage mstb port lists with mgr lock - take a reference
11742431 for this list */
11752432 if (created) {
1176
- mutex_lock(&mstb->mgr->lock);
1177
- kref_get(&port->kref);
2433
+ mutex_lock(&mgr->lock);
2434
+ drm_dp_mst_topology_get_port(port);
11782435 list_add(&port->next, &mstb->ports);
1179
- mutex_unlock(&mstb->mgr->lock);
2436
+ mstb->num_ports++;
2437
+ mutex_unlock(&mgr->lock);
11802438 }
11812439
1182
- if (old_ddps != port->ddps) {
1183
- if (port->ddps) {
1184
- if (!port->input)
1185
- drm_dp_send_enum_path_resources(mstb->mgr, mstb, port);
2440
+ /*
2441
+ * Reprobe PBN caps on both hotplug, and when re-probing the link
2442
+ * for our parent mstb
2443
+ */
2444
+ if (old_ddps != port->ddps || !created) {
2445
+ if (port->ddps && !port->input) {
2446
+ ret = drm_dp_send_enum_path_resources(mgr, mstb,
2447
+ port);
2448
+ if (ret == 1)
2449
+ changed = true;
11862450 } else {
1187
- port->available_pbn = 0;
1188
- }
1189
- }
1190
-
1191
- if (old_pdt != port->pdt && !port->input) {
1192
- drm_dp_port_teardown_pdt(port, old_pdt);
1193
-
1194
- ret = drm_dp_port_setup_pdt(port);
1195
- if (ret == true)
1196
- drm_dp_send_link_address(mstb->mgr, port->mstb);
1197
- }
1198
-
1199
- if (created && !port->input) {
1200
- char proppath[255];
1201
-
1202
- build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath));
1203
- port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath);
1204
- if (!port->connector) {
1205
- /* remove it from the port list */
1206
- mutex_lock(&mstb->mgr->lock);
1207
- list_del(&port->next);
1208
- mutex_unlock(&mstb->mgr->lock);
1209
- /* drop port list reference */
1210
- drm_dp_put_port(port);
1211
- goto out;
2451
+ port->full_pbn = 0;
12122452 }
1213
- if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
1214
- port->pdt == DP_PEER_DEVICE_SST_SINK) &&
1215
- port->port_num >= DP_MST_LOGICAL_PORT_0) {
1216
- port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc);
1217
- drm_connector_set_tile_property(port->connector);
1218
- }
1219
- (*mstb->mgr->cbs->register_connector)(port->connector);
12202453 }
12212454
1222
-out:
2455
+ ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs);
2456
+ if (ret == 1) {
2457
+ send_link_addr = true;
2458
+ } else if (ret < 0) {
2459
+ DRM_ERROR("Failed to change PDT on port %p: %d\n",
2460
+ port, ret);
2461
+ goto fail;
2462
+ }
2463
+
2464
+ /*
2465
+ * If this port wasn't just created, then we're reprobing because
2466
+ * we're coming out of suspend. In this case, always resend the link
2467
+ * address if there's an MSTB on this port
2468
+ */
2469
+ if (!created && port->pdt == DP_PEER_DEVICE_MST_BRANCHING &&
2470
+ port->mcs)
2471
+ send_link_addr = true;
2472
+
2473
+ if (port->connector)
2474
+ drm_modeset_unlock(&mgr->base.lock);
2475
+ else if (!port->input)
2476
+ drm_dp_mst_port_add_connector(mstb, port);
2477
+
2478
+ if (send_link_addr && port->mstb) {
2479
+ ret = drm_dp_send_link_address(mgr, port->mstb);
2480
+ if (ret == 1) /* MSTB below us changed */
2481
+ changed = true;
2482
+ else if (ret < 0)
2483
+ goto fail_put;
2484
+ }
2485
+
12232486 /* put reference to this port */
1224
- drm_dp_put_port(port);
2487
+ drm_dp_mst_topology_put_port(port);
2488
+ return changed;
2489
+
2490
+fail:
2491
+ drm_dp_mst_topology_unlink_port(mgr, port);
2492
+ if (port->connector)
2493
+ drm_modeset_unlock(&mgr->base.lock);
2494
+fail_put:
2495
+ drm_dp_mst_topology_put_port(port);
2496
+ return ret;
12252497 }
12262498
1227
-static void drm_dp_update_port(struct drm_dp_mst_branch *mstb,
1228
- struct drm_dp_connection_status_notify *conn_stat)
2499
+static void
2500
+drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
2501
+ struct drm_dp_connection_status_notify *conn_stat)
12292502 {
2503
+ struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
12302504 struct drm_dp_mst_port *port;
1231
- int old_pdt;
1232
- int old_ddps;
1233
- bool dowork = false;
2505
+ int old_ddps, ret;
2506
+ u8 new_pdt;
2507
+ bool new_mcs;
2508
+ bool dowork = false, create_connector = false;
2509
+
12342510 port = drm_dp_get_port(mstb, conn_stat->port_number);
12352511 if (!port)
12362512 return;
12372513
2514
+ if (port->connector) {
2515
+ if (!port->input && conn_stat->input_port) {
2516
+ /*
2517
+ * We can't remove a connector from an already exposed
2518
+ * port, so just throw the port out and make sure we
2519
+ * reprobe the link address of it's parent MSTB
2520
+ */
2521
+ drm_dp_mst_topology_unlink_port(mgr, port);
2522
+ mstb->link_address_sent = false;
2523
+ dowork = true;
2524
+ goto out;
2525
+ }
2526
+
2527
+ /* Locking is only needed if the port's exposed to userspace */
2528
+ drm_modeset_lock(&mgr->base.lock, NULL);
2529
+ } else if (port->input && !conn_stat->input_port) {
2530
+ create_connector = true;
2531
+ /* Reprobe link address so we get num_sdp_streams */
2532
+ mstb->link_address_sent = false;
2533
+ dowork = true;
2534
+ }
2535
+
12382536 old_ddps = port->ddps;
1239
- old_pdt = port->pdt;
1240
- port->pdt = conn_stat->peer_device_type;
1241
- port->mcs = conn_stat->message_capability_status;
2537
+ port->input = conn_stat->input_port;
12422538 port->ldps = conn_stat->legacy_device_plug_status;
12432539 port->ddps = conn_stat->displayport_device_plug_status;
12442540
12452541 if (old_ddps != port->ddps) {
1246
- if (port->ddps) {
1247
- dowork = true;
1248
- } else {
1249
- port->available_pbn = 0;
1250
- }
1251
- }
1252
- if (old_pdt != port->pdt && !port->input) {
1253
- drm_dp_port_teardown_pdt(port, old_pdt);
1254
-
1255
- if (drm_dp_port_setup_pdt(port))
1256
- dowork = true;
2542
+ if (port->ddps && !port->input)
2543
+ drm_dp_send_enum_path_resources(mgr, mstb, port);
2544
+ else
2545
+ port->full_pbn = 0;
12572546 }
12582547
1259
- drm_dp_put_port(port);
2548
+ new_pdt = port->input ? DP_PEER_DEVICE_NONE : conn_stat->peer_device_type;
2549
+ new_mcs = conn_stat->message_capability_status;
2550
+ ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs);
2551
+ if (ret == 1) {
2552
+ dowork = true;
2553
+ } else if (ret < 0) {
2554
+ DRM_ERROR("Failed to change PDT for port %p: %d\n",
2555
+ port, ret);
2556
+ dowork = false;
2557
+ }
2558
+
2559
+ if (port->connector)
2560
+ drm_modeset_unlock(&mgr->base.lock);
2561
+ else if (create_connector)
2562
+ drm_dp_mst_port_add_connector(mstb, port);
2563
+
2564
+out:
2565
+ drm_dp_mst_topology_put_port(port);
12602566 if (dowork)
12612567 queue_work(system_long_wq, &mstb->mgr->work);
1262
-
12632568 }
12642569
12652570 static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr,
....@@ -1267,7 +2572,7 @@
12672572 {
12682573 struct drm_dp_mst_branch *mstb;
12692574 struct drm_dp_mst_port *port;
1270
- int i;
2575
+ int i, ret;
12712576 /* find the port by iterating down */
12722577
12732578 mutex_lock(&mgr->lock);
....@@ -1292,7 +2597,9 @@
12922597 }
12932598 }
12942599 }
1295
- kref_get(&mstb->kref);
2600
+ ret = drm_dp_mst_topology_try_get_mstb(mstb);
2601
+ if (!ret)
2602
+ mstb = NULL;
12962603 out:
12972604 mutex_unlock(&mgr->lock);
12982605 return mstb;
....@@ -1300,7 +2607,7 @@
13002607
13012608 static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
13022609 struct drm_dp_mst_branch *mstb,
1303
- uint8_t *guid)
2610
+ const uint8_t *guid)
13042611 {
13052612 struct drm_dp_mst_branch *found_mstb;
13062613 struct drm_dp_mst_port *port;
....@@ -1322,67 +2629,112 @@
13222629 return NULL;
13232630 }
13242631
1325
-static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device_by_guid(
1326
- struct drm_dp_mst_topology_mgr *mgr,
1327
- uint8_t *guid)
2632
+static struct drm_dp_mst_branch *
2633
+drm_dp_get_mst_branch_device_by_guid(struct drm_dp_mst_topology_mgr *mgr,
2634
+ const uint8_t *guid)
13282635 {
13292636 struct drm_dp_mst_branch *mstb;
2637
+ int ret;
13302638
13312639 /* find the port by iterating down */
13322640 mutex_lock(&mgr->lock);
13332641
13342642 mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
1335
-
1336
- if (mstb)
1337
- kref_get(&mstb->kref);
2643
+ if (mstb) {
2644
+ ret = drm_dp_mst_topology_try_get_mstb(mstb);
2645
+ if (!ret)
2646
+ mstb = NULL;
2647
+ }
13382648
13392649 mutex_unlock(&mgr->lock);
13402650 return mstb;
13412651 }
13422652
1343
-static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
2653
+static int drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
13442654 struct drm_dp_mst_branch *mstb)
13452655 {
13462656 struct drm_dp_mst_port *port;
1347
- struct drm_dp_mst_branch *mstb_child;
1348
- if (!mstb->link_address_sent)
1349
- drm_dp_send_link_address(mgr, mstb);
2657
+ int ret;
2658
+ bool changed = false;
2659
+
2660
+ if (!mstb->link_address_sent) {
2661
+ ret = drm_dp_send_link_address(mgr, mstb);
2662
+ if (ret == 1)
2663
+ changed = true;
2664
+ else if (ret < 0)
2665
+ return ret;
2666
+ }
13502667
13512668 list_for_each_entry(port, &mstb->ports, next) {
1352
- if (port->input)
2669
+ struct drm_dp_mst_branch *mstb_child = NULL;
2670
+
2671
+ if (port->input || !port->ddps)
13532672 continue;
13542673
1355
- if (!port->ddps)
1356
- continue;
2674
+ if (port->mstb)
2675
+ mstb_child = drm_dp_mst_topology_get_mstb_validated(
2676
+ mgr, port->mstb);
13572677
1358
- if (!port->available_pbn)
1359
- drm_dp_send_enum_path_resources(mgr, mstb, port);
1360
-
1361
- if (port->mstb) {
1362
- mstb_child = drm_dp_get_validated_mstb_ref(mgr, port->mstb);
1363
- if (mstb_child) {
1364
- drm_dp_check_and_send_link_address(mgr, mstb_child);
1365
- drm_dp_put_mst_branch_device(mstb_child);
1366
- }
2678
+ if (mstb_child) {
2679
+ ret = drm_dp_check_and_send_link_address(mgr,
2680
+ mstb_child);
2681
+ drm_dp_mst_topology_put_mstb(mstb_child);
2682
+ if (ret == 1)
2683
+ changed = true;
2684
+ else if (ret < 0)
2685
+ return ret;
13672686 }
13682687 }
2688
+
2689
+ return changed;
13692690 }
13702691
13712692 static void drm_dp_mst_link_probe_work(struct work_struct *work)
13722693 {
1373
- struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, work);
2694
+ struct drm_dp_mst_topology_mgr *mgr =
2695
+ container_of(work, struct drm_dp_mst_topology_mgr, work);
2696
+ struct drm_device *dev = mgr->dev;
13742697 struct drm_dp_mst_branch *mstb;
2698
+ int ret;
2699
+ bool clear_payload_id_table;
2700
+
2701
+ mutex_lock(&mgr->probe_lock);
13752702
13762703 mutex_lock(&mgr->lock);
2704
+ clear_payload_id_table = !mgr->payload_id_table_cleared;
2705
+ mgr->payload_id_table_cleared = true;
2706
+
13772707 mstb = mgr->mst_primary;
13782708 if (mstb) {
1379
- kref_get(&mstb->kref);
2709
+ ret = drm_dp_mst_topology_try_get_mstb(mstb);
2710
+ if (!ret)
2711
+ mstb = NULL;
13802712 }
13812713 mutex_unlock(&mgr->lock);
1382
- if (mstb) {
1383
- drm_dp_check_and_send_link_address(mgr, mstb);
1384
- drm_dp_put_mst_branch_device(mstb);
2714
+ if (!mstb) {
2715
+ mutex_unlock(&mgr->probe_lock);
2716
+ return;
13852717 }
2718
+
2719
+ /*
2720
+ * Certain branch devices seem to incorrectly report an available_pbn
2721
+ * of 0 on downstream sinks, even after clearing the
2722
+ * DP_PAYLOAD_ALLOCATE_* registers in
2723
+ * drm_dp_mst_topology_mgr_set_mst(). Namely, the CableMatters USB-C
2724
+ * 2x DP hub. Sending a CLEAR_PAYLOAD_ID_TABLE message seems to make
2725
+ * things work again.
2726
+ */
2727
+ if (clear_payload_id_table) {
2728
+ DRM_DEBUG_KMS("Clearing payload ID table\n");
2729
+ drm_dp_send_clear_payload_id_table(mgr, mstb);
2730
+ }
2731
+
2732
+ ret = drm_dp_check_and_send_link_address(mgr, mstb);
2733
+ drm_dp_mst_topology_put_mstb(mstb);
2734
+
2735
+ mutex_unlock(&mgr->probe_lock);
2736
+ if (ret)
2737
+ drm_kms_helper_hotplug_event(dev);
13862738 }
13872739
13882740 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
....@@ -1401,7 +2753,8 @@
14012753 return false;
14022754 }
14032755
1404
-static int build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes)
2756
+static void build_dpcd_read(struct drm_dp_sideband_msg_tx *msg,
2757
+ u8 port_num, u32 offset, u8 num_bytes)
14052758 {
14062759 struct drm_dp_sideband_msg_req_body req;
14072760
....@@ -1410,8 +2763,6 @@
14102763 req.u.dpcd_read.dpcd_address = offset;
14112764 req.u.dpcd_read.num_bytes = num_bytes;
14122765 drm_dp_encode_sideband_req(&req, msg);
1413
-
1414
- return 0;
14152766 }
14162767
14172768 static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr,
....@@ -1452,34 +2803,24 @@
14522803 struct drm_dp_mst_branch *mstb = txmsg->dst;
14532804 u8 req_type;
14542805
1455
- /* both msg slots are full */
1456
- if (txmsg->seqno == -1) {
1457
- if (mstb->tx_slots[0] && mstb->tx_slots[1]) {
1458
- DRM_DEBUG_KMS("%s: failed to find slot\n", __func__);
1459
- return -EAGAIN;
1460
- }
1461
- if (mstb->tx_slots[0] == NULL && mstb->tx_slots[1] == NULL) {
1462
- txmsg->seqno = mstb->last_seqno;
1463
- mstb->last_seqno ^= 1;
1464
- } else if (mstb->tx_slots[0] == NULL)
1465
- txmsg->seqno = 0;
1466
- else
1467
- txmsg->seqno = 1;
1468
- mstb->tx_slots[txmsg->seqno] = txmsg;
1469
- }
1470
-
14712806 req_type = txmsg->msg[0] & 0x7f;
14722807 if (req_type == DP_CONNECTION_STATUS_NOTIFY ||
1473
- req_type == DP_RESOURCE_STATUS_NOTIFY)
2808
+ req_type == DP_RESOURCE_STATUS_NOTIFY ||
2809
+ req_type == DP_CLEAR_PAYLOAD_ID_TABLE)
14742810 hdr->broadcast = 1;
14752811 else
14762812 hdr->broadcast = 0;
14772813 hdr->path_msg = txmsg->path_msg;
1478
- hdr->lct = mstb->lct;
1479
- hdr->lcr = mstb->lct - 1;
1480
- if (mstb->lct > 1)
1481
- memcpy(hdr->rad, mstb->rad, mstb->lct / 2);
1482
- hdr->seqno = txmsg->seqno;
2814
+ if (hdr->broadcast) {
2815
+ hdr->lct = 1;
2816
+ hdr->lcr = 6;
2817
+ } else {
2818
+ hdr->lct = mstb->lct;
2819
+ hdr->lcr = mstb->lct - 1;
2820
+ }
2821
+
2822
+ memcpy(hdr->rad, mstb->rad, hdr->lct / 2);
2823
+
14832824 return 0;
14842825 }
14852826 /*
....@@ -1494,15 +2835,15 @@
14942835 int len, space, idx, tosend;
14952836 int ret;
14962837
2838
+ if (txmsg->state == DRM_DP_SIDEBAND_TX_SENT)
2839
+ return 0;
2840
+
14972841 memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr));
14982842
1499
- if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED) {
1500
- txmsg->seqno = -1;
2843
+ if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED)
15012844 txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND;
1502
- }
15032845
1504
- /* make hdr from dst mst - for replies use seqno
1505
- otherwise assign one */
2846
+ /* make hdr from dst mst */
15062847 ret = set_hdr_from_dst_qlock(&hdr, txmsg);
15072848 if (ret < 0)
15082849 return ret;
....@@ -1529,7 +2870,12 @@
15292870
15302871 ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx);
15312872 if (ret) {
1532
- DRM_DEBUG_KMS("sideband msg failed to send\n");
2873
+ if (drm_debug_enabled(DRM_UT_DP)) {
2874
+ struct drm_printer p = drm_debug_printer(DBG_PREFIX);
2875
+
2876
+ drm_printf(&p, "sideband msg failed to send\n");
2877
+ drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
2878
+ }
15332879 return ret;
15342880 }
15352881
....@@ -1552,37 +2898,14 @@
15522898 if (list_empty(&mgr->tx_msg_downq))
15532899 return;
15542900
1555
- txmsg = list_first_entry(&mgr->tx_msg_downq, struct drm_dp_sideband_msg_tx, next);
2901
+ txmsg = list_first_entry(&mgr->tx_msg_downq,
2902
+ struct drm_dp_sideband_msg_tx, next);
15562903 ret = process_single_tx_qlock(mgr, txmsg, false);
1557
- if (ret == 1) {
1558
- /* txmsg is sent it should be in the slots now */
1559
- list_del(&txmsg->next);
1560
- } else if (ret) {
2904
+ if (ret < 0) {
15612905 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
15622906 list_del(&txmsg->next);
1563
- if (txmsg->seqno != -1)
1564
- txmsg->dst->tx_slots[txmsg->seqno] = NULL;
15652907 txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
15662908 wake_up_all(&mgr->tx_waitq);
1567
- }
1568
-}
1569
-
1570
-/* called holding qlock */
1571
-static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
1572
- struct drm_dp_sideband_msg_tx *txmsg)
1573
-{
1574
- int ret;
1575
-
1576
- /* construct a chunk from the first msg in the tx_msg queue */
1577
- ret = process_single_tx_qlock(mgr, txmsg, true);
1578
-
1579
- if (ret != 1)
1580
- DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
1581
-
1582
- if (txmsg->seqno != -1) {
1583
- WARN_ON((unsigned int)txmsg->seqno >
1584
- ARRAY_SIZE(txmsg->dst->tx_slots));
1585
- txmsg->dst->tx_slots[txmsg->seqno] = NULL;
15862909 }
15872910 }
15882911
....@@ -1591,15 +2914,124 @@
15912914 {
15922915 mutex_lock(&mgr->qlock);
15932916 list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
2917
+
2918
+ if (drm_debug_enabled(DRM_UT_DP)) {
2919
+ struct drm_printer p = drm_debug_printer(DBG_PREFIX);
2920
+
2921
+ drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
2922
+ }
2923
+
15942924 if (list_is_singular(&mgr->tx_msg_downq))
15952925 process_single_down_tx_qlock(mgr);
15962926 mutex_unlock(&mgr->qlock);
15972927 }
15982928
1599
-static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
2929
+static void
2930
+drm_dp_dump_link_address(struct drm_dp_link_address_ack_reply *reply)
2931
+{
2932
+ struct drm_dp_link_addr_reply_port *port_reply;
2933
+ int i;
2934
+
2935
+ for (i = 0; i < reply->nports; i++) {
2936
+ port_reply = &reply->ports[i];
2937
+ DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n",
2938
+ i,
2939
+ port_reply->input_port,
2940
+ port_reply->peer_device_type,
2941
+ port_reply->port_number,
2942
+ port_reply->dpcd_revision,
2943
+ port_reply->mcs,
2944
+ port_reply->ddps,
2945
+ port_reply->legacy_device_plug_status,
2946
+ port_reply->num_sdp_streams,
2947
+ port_reply->num_sdp_stream_sinks);
2948
+ }
2949
+}
2950
+
2951
+static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
16002952 struct drm_dp_mst_branch *mstb)
16012953 {
1602
- int len;
2954
+ struct drm_dp_sideband_msg_tx *txmsg;
2955
+ struct drm_dp_link_address_ack_reply *reply;
2956
+ struct drm_dp_mst_port *port, *tmp;
2957
+ int i, ret, port_mask = 0;
2958
+ bool changed = false;
2959
+
2960
+ txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2961
+ if (!txmsg)
2962
+ return -ENOMEM;
2963
+
2964
+ txmsg->dst = mstb;
2965
+ build_link_address(txmsg);
2966
+
2967
+ mstb->link_address_sent = true;
2968
+ drm_dp_queue_down_tx(mgr, txmsg);
2969
+
2970
+ /* FIXME: Actually do some real error handling here */
2971
+ ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2972
+ if (ret <= 0) {
2973
+ DRM_ERROR("Sending link address failed with %d\n", ret);
2974
+ goto out;
2975
+ }
2976
+ if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
2977
+ DRM_ERROR("link address NAK received\n");
2978
+ ret = -EIO;
2979
+ goto out;
2980
+ }
2981
+
2982
+ reply = &txmsg->reply.u.link_addr;
2983
+ DRM_DEBUG_KMS("link address reply: %d\n", reply->nports);
2984
+ drm_dp_dump_link_address(reply);
2985
+
2986
+ ret = drm_dp_check_mstb_guid(mstb, reply->guid);
2987
+ if (ret) {
2988
+ char buf[64];
2989
+
2990
+ drm_dp_mst_rad_to_str(mstb->rad, mstb->lct, buf, sizeof(buf));
2991
+ DRM_ERROR("GUID check on %s failed: %d\n",
2992
+ buf, ret);
2993
+ goto out;
2994
+ }
2995
+
2996
+ for (i = 0; i < reply->nports; i++) {
2997
+ port_mask |= BIT(reply->ports[i].port_number);
2998
+ ret = drm_dp_mst_handle_link_address_port(mstb, mgr->dev,
2999
+ &reply->ports[i]);
3000
+ if (ret == 1)
3001
+ changed = true;
3002
+ else if (ret < 0)
3003
+ goto out;
3004
+ }
3005
+
3006
+ /* Prune any ports that are currently a part of mstb in our in-memory
3007
+ * topology, but were not seen in this link address. Usually this
3008
+ * means that they were removed while the topology was out of sync,
3009
+ * e.g. during suspend/resume
3010
+ */
3011
+ mutex_lock(&mgr->lock);
3012
+ list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
3013
+ if (port_mask & BIT(port->port_num))
3014
+ continue;
3015
+
3016
+ DRM_DEBUG_KMS("port %d was not in link address, removing\n",
3017
+ port->port_num);
3018
+ list_del(&port->next);
3019
+ drm_dp_mst_topology_put_port(port);
3020
+ changed = true;
3021
+ }
3022
+ mutex_unlock(&mgr->lock);
3023
+
3024
+out:
3025
+ if (ret <= 0)
3026
+ mstb->link_address_sent = false;
3027
+ kfree(txmsg);
3028
+ return ret < 0 ? ret : changed;
3029
+}
3030
+
3031
+static void
3032
+drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr *mgr,
3033
+ struct drm_dp_mst_branch *mstb)
3034
+{
16033035 struct drm_dp_sideband_msg_tx *txmsg;
16043036 int ret;
16053037
....@@ -1608,52 +3040,23 @@
16083040 return;
16093041
16103042 txmsg->dst = mstb;
1611
- len = build_link_address(txmsg);
3043
+ build_clear_payload_id_table(txmsg);
16123044
1613
- mstb->link_address_sent = true;
16143045 drm_dp_queue_down_tx(mgr, txmsg);
16153046
16163047 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
1617
- if (ret > 0) {
1618
- int i;
1619
-
1620
- if (txmsg->reply.reply_type == 1)
1621
- DRM_DEBUG_KMS("link address nak received\n");
1622
- else {
1623
- DRM_DEBUG_KMS("link address reply: %d\n", txmsg->reply.u.link_addr.nports);
1624
- for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
1625
- DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n", i,
1626
- txmsg->reply.u.link_addr.ports[i].input_port,
1627
- txmsg->reply.u.link_addr.ports[i].peer_device_type,
1628
- txmsg->reply.u.link_addr.ports[i].port_number,
1629
- txmsg->reply.u.link_addr.ports[i].dpcd_revision,
1630
- txmsg->reply.u.link_addr.ports[i].mcs,
1631
- txmsg->reply.u.link_addr.ports[i].ddps,
1632
- txmsg->reply.u.link_addr.ports[i].legacy_device_plug_status,
1633
- txmsg->reply.u.link_addr.ports[i].num_sdp_streams,
1634
- txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks);
1635
- }
1636
-
1637
- drm_dp_check_mstb_guid(mstb, txmsg->reply.u.link_addr.guid);
1638
-
1639
- for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
1640
- drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]);
1641
- }
1642
- (*mgr->cbs->hotplug)(mgr);
1643
- }
1644
- } else {
1645
- mstb->link_address_sent = false;
1646
- DRM_DEBUG_KMS("link address failed %d\n", ret);
1647
- }
3048
+ if (ret > 0 && txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
3049
+ DRM_DEBUG_KMS("clear payload table id nak received\n");
16483050
16493051 kfree(txmsg);
16503052 }
16513053
1652
-static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
1653
- struct drm_dp_mst_branch *mstb,
1654
- struct drm_dp_mst_port *port)
3054
+static int
3055
+drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
3056
+ struct drm_dp_mst_branch *mstb,
3057
+ struct drm_dp_mst_port *port)
16553058 {
1656
- int len;
3059
+ struct drm_dp_enum_path_resources_ack_reply *path_res;
16573060 struct drm_dp_sideband_msg_tx *txmsg;
16583061 int ret;
16593062
....@@ -1662,26 +3065,41 @@
16623065 return -ENOMEM;
16633066
16643067 txmsg->dst = mstb;
1665
- len = build_enum_path_resources(txmsg, port->port_num);
3068
+ build_enum_path_resources(txmsg, port->port_num);
16663069
16673070 drm_dp_queue_down_tx(mgr, txmsg);
16683071
16693072 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
16703073 if (ret > 0) {
1671
- if (txmsg->reply.reply_type == 1)
3074
+ ret = 0;
3075
+ path_res = &txmsg->reply.u.path_resources;
3076
+
3077
+ if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
16723078 DRM_DEBUG_KMS("enum path resources nak received\n");
1673
- else {
1674
- if (port->port_num != txmsg->reply.u.path_resources.port_number)
3079
+ } else {
3080
+ if (port->port_num != path_res->port_number)
16753081 DRM_ERROR("got incorrect port in response\n");
1676
- DRM_DEBUG_KMS("enum path resources %d: %d %d\n", txmsg->reply.u.path_resources.port_number, txmsg->reply.u.path_resources.full_payload_bw_number,
1677
- txmsg->reply.u.path_resources.avail_payload_bw_number);
1678
- port->available_pbn = txmsg->reply.u.path_resources.avail_payload_bw_number;
1679
- port->fec_capable = txmsg->reply.u.path_resources.fec_capable;
3082
+
3083
+ DRM_DEBUG_KMS("enum path resources %d: %d %d\n",
3084
+ path_res->port_number,
3085
+ path_res->full_payload_bw_number,
3086
+ path_res->avail_payload_bw_number);
3087
+
3088
+ /*
3089
+ * If something changed, make sure we send a
3090
+ * hotplug
3091
+ */
3092
+ if (port->full_pbn != path_res->full_payload_bw_number ||
3093
+ port->fec_capable != path_res->fec_capable)
3094
+ ret = 1;
3095
+
3096
+ port->full_pbn = path_res->full_payload_bw_number;
3097
+ port->fec_capable = path_res->fec_capable;
16803098 }
16813099 }
16823100
16833101 kfree(txmsg);
1684
- return 0;
3102
+ return ret;
16853103 }
16863104
16873105 static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb)
....@@ -1695,22 +3113,40 @@
16953113 return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent);
16963114 }
16973115
1698
-static struct drm_dp_mst_branch *drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,
1699
- struct drm_dp_mst_branch *mstb,
1700
- int *port_num)
3116
+/*
3117
+ * Searches upwards in the topology starting from mstb to try to find the
3118
+ * closest available parent of mstb that's still connected to the rest of the
3119
+ * topology. This can be used in order to perform operations like releasing
3120
+ * payloads, where the branch device which owned the payload may no longer be
3121
+ * around and thus would require that the payload on the last living relative
3122
+ * be freed instead.
3123
+ */
3124
+static struct drm_dp_mst_branch *
3125
+drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,
3126
+ struct drm_dp_mst_branch *mstb,
3127
+ int *port_num)
17013128 {
17023129 struct drm_dp_mst_branch *rmstb = NULL;
17033130 struct drm_dp_mst_port *found_port;
1704
- mutex_lock(&mgr->lock);
1705
- if (mgr->mst_primary) {
1706
- found_port = drm_dp_get_last_connected_port_to_mstb(mstb);
17073131
1708
- if (found_port) {
3132
+ mutex_lock(&mgr->lock);
3133
+ if (!mgr->mst_primary)
3134
+ goto out;
3135
+
3136
+ do {
3137
+ found_port = drm_dp_get_last_connected_port_to_mstb(mstb);
3138
+ if (!found_port)
3139
+ break;
3140
+
3141
+ if (drm_dp_mst_topology_try_get_mstb(found_port->parent)) {
17093142 rmstb = found_port->parent;
1710
- kref_get(&rmstb->kref);
17113143 *port_num = found_port->port_num;
3144
+ } else {
3145
+ /* Search again, starting from this parent */
3146
+ mstb = found_port->parent;
17123147 }
1713
- }
3148
+ } while (!rmstb);
3149
+out:
17143150 mutex_unlock(&mgr->lock);
17153151 return rmstb;
17163152 }
....@@ -1722,23 +3158,19 @@
17223158 {
17233159 struct drm_dp_sideband_msg_tx *txmsg;
17243160 struct drm_dp_mst_branch *mstb;
1725
- int len, ret, port_num;
3161
+ int ret, port_num;
17263162 u8 sinks[DRM_DP_MAX_SDP_STREAMS];
17273163 int i;
17283164
1729
- port = drm_dp_get_validated_port_ref(mgr, port);
1730
- if (!port)
1731
- return -EINVAL;
1732
-
17333165 port_num = port->port_num;
1734
- mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
3166
+ mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
17353167 if (!mstb) {
1736
- mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num);
3168
+ mstb = drm_dp_get_last_connected_port_and_mstb(mgr,
3169
+ port->parent,
3170
+ &port_num);
17373171
1738
- if (!mstb) {
1739
- drm_dp_put_port(port);
3172
+ if (!mstb)
17403173 return -EINVAL;
1741
- }
17423174 }
17433175
17443176 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
....@@ -1751,23 +3183,30 @@
17513183 sinks[i] = i;
17523184
17533185 txmsg->dst = mstb;
1754
- len = build_allocate_payload(txmsg, port_num,
1755
- id,
1756
- pbn, port->num_sdp_streams, sinks);
3186
+ build_allocate_payload(txmsg, port_num,
3187
+ id,
3188
+ pbn, port->num_sdp_streams, sinks);
17573189
17583190 drm_dp_queue_down_tx(mgr, txmsg);
17593191
3192
+ /*
3193
+ * FIXME: there is a small chance that between getting the last
3194
+ * connected mstb and sending the payload message, the last connected
3195
+ * mstb could also be removed from the topology. In the future, this
3196
+ * needs to be fixed by restarting the
3197
+ * drm_dp_get_last_connected_port_and_mstb() search in the event of a
3198
+ * timeout if the topology is still connected to the system.
3199
+ */
17603200 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
17613201 if (ret > 0) {
1762
- if (txmsg->reply.reply_type == 1) {
3202
+ if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
17633203 ret = -EINVAL;
1764
- } else
3204
+ else
17653205 ret = 0;
17663206 }
17673207 kfree(txmsg);
17683208 fail_put:
1769
- drm_dp_put_mst_branch_device(mstb);
1770
- drm_dp_put_port(port);
3209
+ drm_dp_mst_topology_put_mstb(mstb);
17713210 return ret;
17723211 }
17733212
....@@ -1775,71 +3214,86 @@
17753214 struct drm_dp_mst_port *port, bool power_up)
17763215 {
17773216 struct drm_dp_sideband_msg_tx *txmsg;
1778
- int len, ret;
3217
+ int ret;
17793218
1780
- port = drm_dp_get_validated_port_ref(mgr, port);
3219
+ port = drm_dp_mst_topology_get_port_validated(mgr, port);
17813220 if (!port)
17823221 return -EINVAL;
17833222
17843223 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
17853224 if (!txmsg) {
1786
- drm_dp_put_port(port);
3225
+ drm_dp_mst_topology_put_port(port);
17873226 return -ENOMEM;
17883227 }
17893228
17903229 txmsg->dst = port->parent;
1791
- len = build_power_updown_phy(txmsg, port->port_num, power_up);
3230
+ build_power_updown_phy(txmsg, port->port_num, power_up);
17923231 drm_dp_queue_down_tx(mgr, txmsg);
17933232
17943233 ret = drm_dp_mst_wait_tx_reply(port->parent, txmsg);
17953234 if (ret > 0) {
1796
- if (txmsg->reply.reply_type == 1)
3235
+ if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
17973236 ret = -EINVAL;
17983237 else
17993238 ret = 0;
18003239 }
18013240 kfree(txmsg);
1802
- drm_dp_put_port(port);
3241
+ drm_dp_mst_topology_put_port(port);
18033242
18043243 return ret;
18053244 }
18063245 EXPORT_SYMBOL(drm_dp_send_power_updown_phy);
18073246
1808
-int drm_dp_mst_get_dsc_info(struct drm_dp_mst_topology_mgr *mgr,
3247
+int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr,
18093248 struct drm_dp_mst_port *port,
1810
- struct drm_dp_mst_dsc_info *dsc_info)
3249
+ struct drm_dp_query_stream_enc_status_ack_reply *status)
18113250 {
1812
- if (!dsc_info)
1813
- return -EINVAL;
3251
+ struct drm_dp_sideband_msg_tx *txmsg;
3252
+ u8 nonce[7];
3253
+ int len, ret;
18143254
1815
- port = drm_dp_get_validated_port_ref(mgr, port);
1816
- if (!port)
1817
- return -EINVAL;
3255
+ txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3256
+ if (!txmsg)
3257
+ return -ENOMEM;
18183258
1819
- memcpy(dsc_info, &port->dsc_info, sizeof(struct drm_dp_mst_dsc_info));
1820
- drm_dp_put_port(port);
3259
+ port = drm_dp_mst_topology_get_port_validated(mgr, port);
3260
+ if (!port) {
3261
+ ret = -EINVAL;
3262
+ goto out_get_port;
3263
+ }
18213264
1822
- return 0;
3265
+ get_random_bytes(nonce, sizeof(nonce));
3266
+
3267
+ /*
3268
+ * "Source device targets the QUERY_STREAM_ENCRYPTION_STATUS message
3269
+ * transaction at the MST Branch device directly connected to the
3270
+ * Source"
3271
+ */
3272
+ txmsg->dst = mgr->mst_primary;
3273
+
3274
+ len = build_query_stream_enc_status(txmsg, port->vcpi.vcpi, nonce);
3275
+
3276
+ drm_dp_queue_down_tx(mgr, txmsg);
3277
+
3278
+ ret = drm_dp_mst_wait_tx_reply(mgr->mst_primary, txmsg);
3279
+ if (ret < 0) {
3280
+ goto out;
3281
+ } else if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
3282
+ drm_dbg_kms(mgr->dev, "query encryption status nak received\n");
3283
+ ret = -ENXIO;
3284
+ goto out;
3285
+ }
3286
+
3287
+ ret = 0;
3288
+ memcpy(status, &txmsg->reply.u.enc_status, sizeof(*status));
3289
+
3290
+out:
3291
+ drm_dp_mst_topology_put_port(port);
3292
+out_get_port:
3293
+ kfree(txmsg);
3294
+ return ret;
18233295 }
1824
-EXPORT_SYMBOL_GPL(drm_dp_mst_get_dsc_info);
1825
-
1826
-int drm_dp_mst_update_dsc_info(struct drm_dp_mst_topology_mgr *mgr,
1827
- struct drm_dp_mst_port *port,
1828
- struct drm_dp_mst_dsc_info *dsc_info)
1829
-{
1830
- if (!dsc_info)
1831
- return -EINVAL;
1832
-
1833
- port = drm_dp_get_validated_port_ref(mgr, port);
1834
- if (!port)
1835
- return -EINVAL;
1836
-
1837
- memcpy(&port->dsc_info, dsc_info, sizeof(struct drm_dp_mst_dsc_info));
1838
- drm_dp_put_port(port);
1839
-
1840
- return 0;
1841
-}
1842
-EXPORT_SYMBOL_GPL(drm_dp_mst_update_dsc_info);
3296
+EXPORT_SYMBOL(drm_dp_send_query_stream_enc_status);
18433297
18443298 static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
18453299 int id,
....@@ -1862,6 +3316,7 @@
18623316 struct drm_dp_payload *payload)
18633317 {
18643318 int ret;
3319
+
18653320 ret = drm_dp_payload_send_msg(mgr, port, id, port->vcpi.pbn);
18663321 if (ret < 0)
18673322 return ret;
....@@ -1875,7 +3330,7 @@
18753330 struct drm_dp_payload *payload)
18763331 {
18773332 DRM_DEBUG_KMS("\n");
1878
- /* its okay for these to fail */
3333
+ /* it's okay for these to fail */
18793334 if (port) {
18803335 drm_dp_payload_send_msg(mgr, port, id, 0);
18813336 }
....@@ -1908,72 +3363,114 @@
19083363 */
19093364 int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
19103365 {
1911
- int i, j;
1912
- int cur_slots = 1;
19133366 struct drm_dp_payload req_payload;
19143367 struct drm_dp_mst_port *port;
3368
+ int i, j;
3369
+ int cur_slots = 1;
3370
+ bool skip;
19153371
19163372 mutex_lock(&mgr->payload_lock);
19173373 for (i = 0; i < mgr->max_payloads; i++) {
3374
+ struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
3375
+ struct drm_dp_payload *payload = &mgr->payloads[i];
3376
+ bool put_port = false;
3377
+
19183378 /* solve the current payloads - compare to the hw ones
19193379 - update the hw view */
19203380 req_payload.start_slot = cur_slots;
1921
- if (mgr->proposed_vcpis[i]) {
1922
- port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
1923
- port = drm_dp_get_validated_port_ref(mgr, port);
1924
- if (!port) {
1925
- mutex_unlock(&mgr->payload_lock);
1926
- return -EINVAL;
3381
+ if (vcpi) {
3382
+ port = container_of(vcpi, struct drm_dp_mst_port,
3383
+ vcpi);
3384
+
3385
+ mutex_lock(&mgr->lock);
3386
+ skip = !drm_dp_mst_port_downstream_of_branch(port, mgr->mst_primary);
3387
+ mutex_unlock(&mgr->lock);
3388
+
3389
+ if (skip) {
3390
+ drm_dbg_kms(mgr->dev,
3391
+ "Virtual channel %d is not in current topology\n",
3392
+ i);
3393
+ continue;
19273394 }
1928
- req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots;
1929
- req_payload.vcpi = mgr->proposed_vcpis[i]->vcpi;
3395
+ /* Validated ports don't matter if we're releasing
3396
+ * VCPI
3397
+ */
3398
+ if (vcpi->num_slots) {
3399
+ port = drm_dp_mst_topology_get_port_validated(
3400
+ mgr, port);
3401
+ if (!port) {
3402
+ if (vcpi->num_slots == payload->num_slots) {
3403
+ cur_slots += vcpi->num_slots;
3404
+ payload->start_slot = req_payload.start_slot;
3405
+ continue;
3406
+ } else {
3407
+ drm_dbg_kms(mgr->dev,
3408
+ "Fail:set payload to invalid sink");
3409
+ mutex_unlock(&mgr->payload_lock);
3410
+ return -EINVAL;
3411
+ }
3412
+ }
3413
+ put_port = true;
3414
+ }
3415
+
3416
+ req_payload.num_slots = vcpi->num_slots;
3417
+ req_payload.vcpi = vcpi->vcpi;
19303418 } else {
19313419 port = NULL;
19323420 req_payload.num_slots = 0;
19333421 }
19343422
1935
- if (mgr->payloads[i].start_slot != req_payload.start_slot) {
1936
- mgr->payloads[i].start_slot = req_payload.start_slot;
1937
- }
3423
+ payload->start_slot = req_payload.start_slot;
19383424 /* work out what is required to happen with this payload */
1939
- if (mgr->payloads[i].num_slots != req_payload.num_slots) {
3425
+ if (payload->num_slots != req_payload.num_slots) {
19403426
19413427 /* need to push an update for this payload */
19423428 if (req_payload.num_slots) {
1943
- drm_dp_create_payload_step1(mgr, mgr->proposed_vcpis[i]->vcpi, &req_payload);
1944
- mgr->payloads[i].num_slots = req_payload.num_slots;
1945
- mgr->payloads[i].vcpi = req_payload.vcpi;
1946
- } else if (mgr->payloads[i].num_slots) {
1947
- mgr->payloads[i].num_slots = 0;
1948
- drm_dp_destroy_payload_step1(mgr, port, mgr->payloads[i].vcpi, &mgr->payloads[i]);
1949
- req_payload.payload_state = mgr->payloads[i].payload_state;
1950
- mgr->payloads[i].start_slot = 0;
3429
+ drm_dp_create_payload_step1(mgr, vcpi->vcpi,
3430
+ &req_payload);
3431
+ payload->num_slots = req_payload.num_slots;
3432
+ payload->vcpi = req_payload.vcpi;
3433
+
3434
+ } else if (payload->num_slots) {
3435
+ payload->num_slots = 0;
3436
+ drm_dp_destroy_payload_step1(mgr, port,
3437
+ payload->vcpi,
3438
+ payload);
3439
+ req_payload.payload_state =
3440
+ payload->payload_state;
3441
+ payload->start_slot = 0;
19513442 }
1952
- mgr->payloads[i].payload_state = req_payload.payload_state;
3443
+ payload->payload_state = req_payload.payload_state;
19533444 }
19543445 cur_slots += req_payload.num_slots;
19553446
1956
- if (port)
1957
- drm_dp_put_port(port);
3447
+ if (put_port)
3448
+ drm_dp_mst_topology_put_port(port);
19583449 }
19593450
1960
- for (i = 0; i < mgr->max_payloads; i++) {
1961
- if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
1962
- DRM_DEBUG_KMS("removing payload %d\n", i);
1963
- for (j = i; j < mgr->max_payloads - 1; j++) {
1964
- memcpy(&mgr->payloads[j], &mgr->payloads[j + 1], sizeof(struct drm_dp_payload));
1965
- mgr->proposed_vcpis[j] = mgr->proposed_vcpis[j + 1];
1966
- if (mgr->proposed_vcpis[j] && mgr->proposed_vcpis[j]->num_slots) {
1967
- set_bit(j + 1, &mgr->payload_mask);
1968
- } else {
1969
- clear_bit(j + 1, &mgr->payload_mask);
1970
- }
1971
- }
1972
- memset(&mgr->payloads[mgr->max_payloads - 1], 0, sizeof(struct drm_dp_payload));
1973
- mgr->proposed_vcpis[mgr->max_payloads - 1] = NULL;
1974
- clear_bit(mgr->max_payloads, &mgr->payload_mask);
1975
-
3451
+ for (i = 0; i < mgr->max_payloads; /* do nothing */) {
3452
+ if (mgr->payloads[i].payload_state != DP_PAYLOAD_DELETE_LOCAL) {
3453
+ i++;
3454
+ continue;
19763455 }
3456
+
3457
+ DRM_DEBUG_KMS("removing payload %d\n", i);
3458
+ for (j = i; j < mgr->max_payloads - 1; j++) {
3459
+ mgr->payloads[j] = mgr->payloads[j + 1];
3460
+ mgr->proposed_vcpis[j] = mgr->proposed_vcpis[j + 1];
3461
+
3462
+ if (mgr->proposed_vcpis[j] &&
3463
+ mgr->proposed_vcpis[j]->num_slots) {
3464
+ set_bit(j + 1, &mgr->payload_mask);
3465
+ } else {
3466
+ clear_bit(j + 1, &mgr->payload_mask);
3467
+ }
3468
+ }
3469
+
3470
+ memset(&mgr->payloads[mgr->max_payloads - 1], 0,
3471
+ sizeof(struct drm_dp_payload));
3472
+ mgr->proposed_vcpis[mgr->max_payloads - 1] = NULL;
3473
+ clear_bit(mgr->max_payloads, &mgr->payload_mask);
19773474 }
19783475 mutex_unlock(&mgr->payload_lock);
19793476
....@@ -1995,6 +3492,8 @@
19953492 struct drm_dp_mst_port *port;
19963493 int i;
19973494 int ret = 0;
3495
+ bool skip;
3496
+
19983497 mutex_lock(&mgr->payload_lock);
19993498 for (i = 0; i < mgr->max_payloads; i++) {
20003499
....@@ -2002,6 +3501,13 @@
20023501 continue;
20033502
20043503 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
3504
+
3505
+ mutex_lock(&mgr->lock);
3506
+ skip = !drm_dp_mst_port_downstream_of_branch(port, mgr->mst_primary);
3507
+ mutex_unlock(&mgr->lock);
3508
+
3509
+ if (skip)
3510
+ continue;
20053511
20063512 DRM_DEBUG_KMS("payload %d %d\n", i, mgr->payloads[i].payload_state);
20073513 if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) {
....@@ -2019,36 +3525,15 @@
20193525 }
20203526 EXPORT_SYMBOL(drm_dp_update_payload_part2);
20213527
2022
-int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
2023
- struct drm_dp_mst_port *port,
2024
- int offset, int size, u8 *bytes)
3528
+static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
3529
+ struct drm_dp_mst_port *port,
3530
+ int offset, int size, u8 *bytes)
20253531 {
2026
- int len;
2027
- struct drm_dp_sideband_msg_tx *txmsg;
2028
-
2029
- txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2030
- if (!txmsg)
2031
- return -ENOMEM;
2032
-
2033
- len = build_dpcd_read(txmsg, port->port_num, offset, size);
2034
- txmsg->dst = port->parent;
2035
-
2036
- drm_dp_queue_down_tx(mgr, txmsg);
2037
-
2038
- return 0;
2039
-}
2040
-EXPORT_SYMBOL_GPL(drm_dp_send_dpcd_read);
2041
-
2042
-int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
2043
- struct drm_dp_mst_port *port,
2044
- int offset, int size, u8 *bytes)
2045
-{
2046
- int len;
2047
- int ret;
3532
+ int ret = 0;
20483533 struct drm_dp_sideband_msg_tx *txmsg;
20493534 struct drm_dp_mst_branch *mstb;
20503535
2051
- mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
3536
+ mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
20523537 if (!mstb)
20533538 return -EINVAL;
20543539
....@@ -2058,45 +3543,82 @@
20583543 goto fail_put;
20593544 }
20603545
2061
- len = build_dpcd_write(txmsg, port->port_num, offset, size, bytes);
3546
+ build_dpcd_read(txmsg, port->port_num, offset, size);
3547
+ txmsg->dst = port->parent;
3548
+
3549
+ drm_dp_queue_down_tx(mgr, txmsg);
3550
+
3551
+ ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3552
+ if (ret < 0)
3553
+ goto fail_free;
3554
+
3555
+ /* DPCD read should never be NACKed */
3556
+ if (txmsg->reply.reply_type == 1) {
3557
+ DRM_ERROR("mstb %p port %d: DPCD read on addr 0x%x for %d bytes NAKed\n",
3558
+ mstb, port->port_num, offset, size);
3559
+ ret = -EIO;
3560
+ goto fail_free;
3561
+ }
3562
+
3563
+ if (txmsg->reply.u.remote_dpcd_read_ack.num_bytes != size) {
3564
+ ret = -EPROTO;
3565
+ goto fail_free;
3566
+ }
3567
+
3568
+ ret = min_t(size_t, txmsg->reply.u.remote_dpcd_read_ack.num_bytes,
3569
+ size);
3570
+ memcpy(bytes, txmsg->reply.u.remote_dpcd_read_ack.bytes, ret);
3571
+
3572
+fail_free:
3573
+ kfree(txmsg);
3574
+fail_put:
3575
+ drm_dp_mst_topology_put_mstb(mstb);
3576
+
3577
+ return ret;
3578
+}
3579
+
3580
+static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
3581
+ struct drm_dp_mst_port *port,
3582
+ int offset, int size, u8 *bytes)
3583
+{
3584
+ int ret;
3585
+ struct drm_dp_sideband_msg_tx *txmsg;
3586
+ struct drm_dp_mst_branch *mstb;
3587
+
3588
+ mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
3589
+ if (!mstb)
3590
+ return -EINVAL;
3591
+
3592
+ txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3593
+ if (!txmsg) {
3594
+ ret = -ENOMEM;
3595
+ goto fail_put;
3596
+ }
3597
+
3598
+ build_dpcd_write(txmsg, port->port_num, offset, size, bytes);
20623599 txmsg->dst = mstb;
20633600
20643601 drm_dp_queue_down_tx(mgr, txmsg);
20653602
20663603 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
20673604 if (ret > 0) {
2068
- if (txmsg->reply.reply_type == 1) {
2069
- ret = -EINVAL;
2070
- } else
2071
- ret = 0;
3605
+ if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
3606
+ ret = -EIO;
3607
+ else
3608
+ ret = size;
20723609 }
3610
+
20733611 kfree(txmsg);
20743612 fail_put:
2075
- drm_dp_put_mst_branch_device(mstb);
3613
+ drm_dp_mst_topology_put_mstb(mstb);
20763614 return ret;
20773615 }
2078
-EXPORT_SYMBOL_GPL(drm_dp_send_dpcd_write);
2079
-
2080
-int drm_dp_mst_get_max_sdp_streams_supported(
2081
- struct drm_dp_mst_topology_mgr *mgr,
2082
- struct drm_dp_mst_port *port)
2083
-{
2084
- int ret = -1;
2085
-
2086
- port = drm_dp_get_validated_port_ref(mgr, port);
2087
- if (!port)
2088
- return ret;
2089
- ret = port->num_sdp_streams;
2090
- drm_dp_put_port(port);
2091
- return ret;
2092
-}
2093
-EXPORT_SYMBOL_GPL(drm_dp_mst_get_max_sdp_streams_supported);
20943616
20953617 static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type)
20963618 {
20973619 struct drm_dp_sideband_msg_reply_body reply;
20983620
2099
- reply.reply_type = 0;
3621
+ reply.reply_type = DP_SIDEBAND_REPLY_ACK;
21003622 reply.req_type = req_type;
21013623 drm_dp_encode_sideband_reply(&reply, msg);
21023624 return 0;
....@@ -2104,7 +3626,7 @@
21043626
21053627 static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
21063628 struct drm_dp_mst_branch *mstb,
2107
- int req_type, int seqno, bool broadcast)
3629
+ int req_type, bool broadcast)
21083630 {
21093631 struct drm_dp_sideband_msg_tx *txmsg;
21103632
....@@ -2113,44 +3635,59 @@
21133635 return -ENOMEM;
21143636
21153637 txmsg->dst = mstb;
2116
- txmsg->seqno = seqno;
21173638 drm_dp_encode_up_ack_reply(txmsg, req_type);
21183639
21193640 mutex_lock(&mgr->qlock);
2120
-
2121
- process_single_up_tx_qlock(mgr, txmsg);
2122
-
3641
+ /* construct a chunk from the first msg in the tx_msg queue */
3642
+ process_single_tx_qlock(mgr, txmsg, true);
21233643 mutex_unlock(&mgr->qlock);
21243644
21253645 kfree(txmsg);
21263646 return 0;
21273647 }
21283648
2129
-static bool drm_dp_get_vc_payload_bw(int dp_link_bw,
2130
- int dp_link_count,
2131
- int *out)
3649
+/**
3650
+ * drm_dp_get_vc_payload_bw - get the VC payload BW for an MST link
3651
+ * @link_rate: link rate in 10kbits/s units
3652
+ * @link_lane_count: lane count
3653
+ *
3654
+ * Calculate the total bandwidth of a MultiStream Transport link. The returned
3655
+ * value is in units of PBNs/(timeslots/1 MTP). This value can be used to
3656
+ * convert the number of PBNs required for a given stream to the number of
3657
+ * timeslots this stream requires in each MTP.
3658
+ */
3659
+int drm_dp_get_vc_payload_bw(int link_rate, int link_lane_count)
21323660 {
2133
- switch (dp_link_bw) {
2134
- default:
2135
- DRM_DEBUG_KMS("invalid link bandwidth in DPCD: %x (link count: %d)\n",
2136
- dp_link_bw, dp_link_count);
3661
+ if (link_rate == 0 || link_lane_count == 0)
3662
+ DRM_DEBUG_KMS("invalid link rate/lane count: (%d / %d)\n",
3663
+ link_rate, link_lane_count);
3664
+
3665
+ /* See DP v2.0 2.6.4.2, VCPayload_Bandwidth_for_OneTimeSlotPer_MTP_Allocation */
3666
+ return link_rate * link_lane_count / 54000;
3667
+}
3668
+EXPORT_SYMBOL(drm_dp_get_vc_payload_bw);
3669
+
3670
+/**
3671
+ * drm_dp_read_mst_cap() - check whether or not a sink supports MST
3672
+ * @aux: The DP AUX channel to use
3673
+ * @dpcd: A cached copy of the DPCD capabilities for this sink
3674
+ *
3675
+ * Returns: %True if the sink supports MST, %false otherwise
3676
+ */
3677
+bool drm_dp_read_mst_cap(struct drm_dp_aux *aux,
3678
+ const u8 dpcd[DP_RECEIVER_CAP_SIZE])
3679
+{
3680
+ u8 mstm_cap;
3681
+
3682
+ if (dpcd[DP_DPCD_REV] < DP_DPCD_REV_12)
21373683 return false;
21383684
2139
- case DP_LINK_BW_1_62:
2140
- *out = 3 * dp_link_count;
2141
- break;
2142
- case DP_LINK_BW_2_7:
2143
- *out = 5 * dp_link_count;
2144
- break;
2145
- case DP_LINK_BW_5_4:
2146
- *out = 10 * dp_link_count;
2147
- break;
2148
- case DP_LINK_BW_8_1:
2149
- *out = 15 * dp_link_count;
2150
- break;
2151
- }
2152
- return true;
3685
+ if (drm_dp_dpcd_readb(aux, DP_MSTM_CAP, &mstm_cap) != 1)
3686
+ return false;
3687
+
3688
+ return mstm_cap & DP_MST_CAP;
21533689 }
3690
+EXPORT_SYMBOL(drm_dp_read_mst_cap);
21543691
21553692 /**
21563693 * drm_dp_mst_topology_mgr_set_mst() - Set the MST state for a topology manager
....@@ -2173,6 +3710,8 @@
21733710 mgr->mst_state = mst_state;
21743711 /* set the device into MST mode */
21753712 if (mst_state) {
3713
+ struct drm_dp_payload reset_pay;
3714
+
21763715 WARN_ON(mgr->mst_primary);
21773716
21783717 /* get dpcd info */
....@@ -2182,9 +3721,9 @@
21823721 goto out_unlock;
21833722 }
21843723
2185
- if (!drm_dp_get_vc_payload_bw(mgr->dpcd[1],
2186
- mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK,
2187
- &mgr->pbn_div)) {
3724
+ mgr->pbn_div = drm_dp_get_vc_payload_bw(drm_dp_bw_code_to_link_rate(mgr->dpcd[1]),
3725
+ mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK);
3726
+ if (mgr->pbn_div == 0) {
21883727 ret = -EINVAL;
21893728 goto out_unlock;
21903729 }
....@@ -2199,20 +3738,18 @@
21993738
22003739 /* give this the main reference */
22013740 mgr->mst_primary = mstb;
2202
- kref_get(&mgr->mst_primary->kref);
3741
+ drm_dp_mst_topology_get_mstb(mgr->mst_primary);
22033742
22043743 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
2205
- DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
2206
- if (ret < 0) {
3744
+ DP_MST_EN |
3745
+ DP_UP_REQ_EN |
3746
+ DP_UPSTREAM_IS_SRC);
3747
+ if (ret < 0)
22073748 goto out_unlock;
2208
- }
22093749
2210
- {
2211
- struct drm_dp_payload reset_pay;
2212
- reset_pay.start_slot = 0;
2213
- reset_pay.num_slots = 0x3f;
2214
- drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
2215
- }
3750
+ reset_pay.start_slot = 0;
3751
+ reset_pay.num_slots = 0x3f;
3752
+ drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
22163753
22173754 queue_work(system_long_wq, &mgr->work);
22183755
....@@ -2231,17 +3768,34 @@
22313768 mgr->payload_mask = 0;
22323769 set_bit(0, &mgr->payload_mask);
22333770 mgr->vcpi_mask = 0;
3771
+ mgr->payload_id_table_cleared = false;
3772
+
3773
+ memset(&mgr->down_rep_recv, 0, sizeof(mgr->down_rep_recv));
3774
+ memset(&mgr->up_req_recv, 0, sizeof(mgr->up_req_recv));
22343775 }
22353776
22363777 out_unlock:
22373778 mutex_unlock(&mgr->lock);
22383779 mutex_unlock(&mgr->payload_lock);
22393780 if (mstb)
2240
- drm_dp_put_mst_branch_device(mstb);
3781
+ drm_dp_mst_topology_put_mstb(mstb);
22413782 return ret;
22423783
22433784 }
22443785 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst);
3786
+
3787
+static void
3788
+drm_dp_mst_topology_mgr_invalidate_mstb(struct drm_dp_mst_branch *mstb)
3789
+{
3790
+ struct drm_dp_mst_port *port;
3791
+
3792
+ /* The link address will need to be re-sent on resume */
3793
+ mstb->link_address_sent = false;
3794
+
3795
+ list_for_each_entry(port, &mstb->ports, next)
3796
+ if (port->mstb)
3797
+ drm_dp_mst_topology_mgr_invalidate_mstb(port->mstb);
3798
+}
22453799
22463800 /**
22473801 * drm_dp_mst_topology_mgr_suspend() - suspend the MST manager
....@@ -2256,91 +3810,154 @@
22563810 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
22573811 DP_MST_EN | DP_UPSTREAM_IS_SRC);
22583812 mutex_unlock(&mgr->lock);
3813
+ flush_work(&mgr->up_req_work);
22593814 flush_work(&mgr->work);
2260
- flush_work(&mgr->destroy_connector_work);
3815
+ flush_work(&mgr->delayed_destroy_work);
3816
+
3817
+ mutex_lock(&mgr->lock);
3818
+ if (mgr->mst_state && mgr->mst_primary)
3819
+ drm_dp_mst_topology_mgr_invalidate_mstb(mgr->mst_primary);
3820
+ mutex_unlock(&mgr->lock);
22613821 }
22623822 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
22633823
22643824 /**
22653825 * drm_dp_mst_topology_mgr_resume() - resume the MST manager
22663826 * @mgr: manager to resume
3827
+ * @sync: whether or not to perform topology reprobing synchronously
22673828 *
22683829 * This will fetch DPCD and see if the device is still there,
22693830 * if it is, it will rewrite the MSTM control bits, and return.
22703831 *
2271
- * if the device fails this returns -1, and the driver should do
3832
+ * If the device fails this returns -1, and the driver should do
22723833 * a full MST reprobe, in case we were undocked.
3834
+ *
3835
+ * During system resume (where it is assumed that the driver will be calling
3836
+ * drm_atomic_helper_resume()) this function should be called beforehand with
3837
+ * @sync set to true. In contexts like runtime resume where the driver is not
3838
+ * expected to be calling drm_atomic_helper_resume(), this function should be
3839
+ * called with @sync set to false in order to avoid deadlocking.
3840
+ *
3841
+ * Returns: -1 if the MST topology was removed while we were suspended, 0
3842
+ * otherwise.
22733843 */
2274
-int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
3844
+int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
3845
+ bool sync)
22753846 {
2276
- int ret = 0;
3847
+ int ret;
3848
+ u8 guid[16];
22773849
22783850 mutex_lock(&mgr->lock);
3851
+ if (!mgr->mst_primary)
3852
+ goto out_fail;
22793853
2280
- if (mgr->mst_primary) {
2281
- int sret;
2282
- u8 guid[16];
3854
+ ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd,
3855
+ DP_RECEIVER_CAP_SIZE);
3856
+ if (ret != DP_RECEIVER_CAP_SIZE) {
3857
+ DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
3858
+ goto out_fail;
3859
+ }
22833860
2284
- sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
2285
- if (sret != DP_RECEIVER_CAP_SIZE) {
2286
- DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
2287
- ret = -1;
2288
- goto out_unlock;
2289
- }
3861
+ ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
3862
+ DP_MST_EN |
3863
+ DP_UP_REQ_EN |
3864
+ DP_UPSTREAM_IS_SRC);
3865
+ if (ret < 0) {
3866
+ DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n");
3867
+ goto out_fail;
3868
+ }
22903869
2291
- ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
2292
- DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
2293
- if (ret < 0) {
2294
- DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n");
2295
- ret = -1;
2296
- goto out_unlock;
2297
- }
3870
+ /* Some hubs forget their guids after they resume */
3871
+ ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
3872
+ if (ret != 16) {
3873
+ DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
3874
+ goto out_fail;
3875
+ }
22983876
2299
- /* Some hubs forget their guids after they resume */
2300
- sret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
2301
- if (sret != 16) {
2302
- DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
2303
- ret = -1;
2304
- goto out_unlock;
2305
- }
2306
- drm_dp_check_mstb_guid(mgr->mst_primary, guid);
3877
+ ret = drm_dp_check_mstb_guid(mgr->mst_primary, guid);
3878
+ if (ret) {
3879
+ DRM_DEBUG_KMS("check mstb failed - undocked during suspend?\n");
3880
+ goto out_fail;
3881
+ }
23073882
2308
- ret = 0;
2309
- } else
2310
- ret = -1;
2311
-
2312
-out_unlock:
3883
+ /*
3884
+ * For the final step of resuming the topology, we need to bring the
3885
+ * state of our in-memory topology back into sync with reality. So,
3886
+ * restart the probing process as if we're probing a new hub
3887
+ */
3888
+ queue_work(system_long_wq, &mgr->work);
23133889 mutex_unlock(&mgr->lock);
2314
- return ret;
3890
+
3891
+ if (sync) {
3892
+ DRM_DEBUG_KMS("Waiting for link probe work to finish re-syncing topology...\n");
3893
+ flush_work(&mgr->work);
3894
+ }
3895
+
3896
+ return 0;
3897
+
3898
+out_fail:
3899
+ mutex_unlock(&mgr->lock);
3900
+ return -1;
23153901 }
23163902 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
23173903
2318
-static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
3904
+static bool
3905
+drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up,
3906
+ struct drm_dp_mst_branch **mstb)
23193907 {
23203908 int len;
23213909 u8 replyblock[32];
2322
- int replylen, origlen, curreply;
3910
+ int replylen, curreply;
23233911 int ret;
2324
- struct drm_dp_sideband_msg_rx *msg;
2325
- int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE : DP_SIDEBAND_MSG_DOWN_REP_BASE;
2326
- msg = up ? &mgr->up_req_recv : &mgr->down_rep_recv;
3912
+ u8 hdrlen;
3913
+ struct drm_dp_sideband_msg_hdr hdr;
3914
+ struct drm_dp_sideband_msg_rx *msg =
3915
+ up ? &mgr->up_req_recv : &mgr->down_rep_recv;
3916
+ int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE :
3917
+ DP_SIDEBAND_MSG_DOWN_REP_BASE;
3918
+
3919
+ if (!up)
3920
+ *mstb = NULL;
23273921
23283922 len = min(mgr->max_dpcd_transaction_bytes, 16);
2329
- ret = drm_dp_dpcd_read(mgr->aux, basereg,
2330
- replyblock, len);
3923
+ ret = drm_dp_dpcd_read(mgr->aux, basereg, replyblock, len);
23313924 if (ret != len) {
23323925 DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret);
23333926 return false;
23343927 }
2335
- ret = drm_dp_sideband_msg_build(msg, replyblock, len, true);
3928
+
3929
+ ret = drm_dp_decode_sideband_msg_hdr(&hdr, replyblock, len, &hdrlen);
3930
+ if (ret == false) {
3931
+ print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16,
3932
+ 1, replyblock, len, false);
3933
+ DRM_DEBUG_KMS("ERROR: failed header\n");
3934
+ return false;
3935
+ }
3936
+
3937
+ if (!up) {
3938
+ /* Caller is responsible for giving back this reference */
3939
+ *mstb = drm_dp_get_mst_branch_device(mgr, hdr.lct, hdr.rad);
3940
+ if (!*mstb) {
3941
+ DRM_DEBUG_KMS("Got MST reply from unknown device %d\n",
3942
+ hdr.lct);
3943
+ return false;
3944
+ }
3945
+ }
3946
+
3947
+ if (!drm_dp_sideband_msg_set_header(msg, &hdr, hdrlen)) {
3948
+ DRM_DEBUG_KMS("sideband msg set header failed %d\n",
3949
+ replyblock[0]);
3950
+ return false;
3951
+ }
3952
+
3953
+ replylen = min(msg->curchunk_len, (u8)(len - hdrlen));
3954
+ ret = drm_dp_sideband_append_payload(msg, replyblock + hdrlen, replylen);
23363955 if (!ret) {
23373956 DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]);
23383957 return false;
23393958 }
2340
- replylen = msg->curchunk_len + msg->curchunk_hdrlen;
23413959
2342
- origlen = replylen;
2343
- replylen -= len;
3960
+ replylen = msg->curchunk_len + msg->curchunk_hdrlen - len;
23443961 curreply = len;
23453962 while (replylen > 0) {
23463963 len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16);
....@@ -2352,7 +3969,7 @@
23523969 return false;
23533970 }
23543971
2355
- ret = drm_dp_sideband_msg_build(msg, replyblock, len, false);
3972
+ ret = drm_dp_sideband_append_payload(msg, replyblock, len);
23563973 if (!ret) {
23573974 DRM_DEBUG_KMS("failed to build sideband msg\n");
23583975 return false;
....@@ -2366,131 +3983,194 @@
23663983
23673984 static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
23683985 {
2369
- int ret = 0;
3986
+ struct drm_dp_sideband_msg_tx *txmsg;
3987
+ struct drm_dp_mst_branch *mstb = NULL;
3988
+ struct drm_dp_sideband_msg_rx *msg = &mgr->down_rep_recv;
23703989
2371
- if (!drm_dp_get_one_sb_msg(mgr, false)) {
2372
- memset(&mgr->down_rep_recv, 0,
2373
- sizeof(struct drm_dp_sideband_msg_rx));
2374
- return 0;
3990
+ if (!drm_dp_get_one_sb_msg(mgr, false, &mstb))
3991
+ goto out_clear_reply;
3992
+
3993
+ /* Multi-packet message transmission, don't clear the reply */
3994
+ if (!msg->have_eomt)
3995
+ goto out;
3996
+
3997
+ /* find the message */
3998
+ mutex_lock(&mgr->qlock);
3999
+ txmsg = list_first_entry_or_null(&mgr->tx_msg_downq,
4000
+ struct drm_dp_sideband_msg_tx, next);
4001
+ mutex_unlock(&mgr->qlock);
4002
+
4003
+ /* Were we actually expecting a response, and from this mstb? */
4004
+ if (!txmsg || txmsg->dst != mstb) {
4005
+ struct drm_dp_sideband_msg_hdr *hdr;
4006
+
4007
+ hdr = &msg->initial_hdr;
4008
+ DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n",
4009
+ mstb, hdr->seqno, hdr->lct, hdr->rad[0],
4010
+ msg->msg[0]);
4011
+ goto out_clear_reply;
23754012 }
23764013
2377
- if (mgr->down_rep_recv.have_eomt) {
2378
- struct drm_dp_sideband_msg_tx *txmsg;
2379
- struct drm_dp_mst_branch *mstb;
2380
- int slot = -1;
2381
- mstb = drm_dp_get_mst_branch_device(mgr,
2382
- mgr->down_rep_recv.initial_hdr.lct,
2383
- mgr->down_rep_recv.initial_hdr.rad);
4014
+ drm_dp_sideband_parse_reply(msg, &txmsg->reply);
23844015
2385
- if (!mstb) {
2386
- DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->down_rep_recv.initial_hdr.lct);
2387
- memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2388
- return 0;
2389
- }
2390
-
2391
- /* find the message */
2392
- slot = mgr->down_rep_recv.initial_hdr.seqno;
2393
- mutex_lock(&mgr->qlock);
2394
- txmsg = mstb->tx_slots[slot];
2395
- /* remove from slots */
2396
- mutex_unlock(&mgr->qlock);
2397
-
2398
- if (!txmsg) {
2399
- DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n",
2400
- mstb,
2401
- mgr->down_rep_recv.initial_hdr.seqno,
2402
- mgr->down_rep_recv.initial_hdr.lct,
2403
- mgr->down_rep_recv.initial_hdr.rad[0],
2404
- mgr->down_rep_recv.msg[0]);
2405
- drm_dp_put_mst_branch_device(mstb);
2406
- memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2407
- return 0;
2408
- }
2409
-
2410
- drm_dp_sideband_parse_reply(&mgr->down_rep_recv, &txmsg->reply);
2411
- if (txmsg->reply.reply_type == 1) {
2412
- DRM_DEBUG_KMS("Got NAK reply: req 0x%02x, reason 0x%02x, nak data 0x%02x\n", txmsg->reply.req_type, txmsg->reply.u.nak.reason, txmsg->reply.u.nak.nak_data);
2413
- }
2414
-
2415
- memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2416
- drm_dp_put_mst_branch_device(mstb);
2417
-
2418
- mutex_lock(&mgr->qlock);
2419
- txmsg->state = DRM_DP_SIDEBAND_TX_RX;
2420
- mstb->tx_slots[slot] = NULL;
2421
- mutex_unlock(&mgr->qlock);
2422
-
2423
- wake_up_all(&mgr->tx_waitq);
4016
+ if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
4017
+ DRM_DEBUG_KMS("Got NAK reply: req 0x%02x (%s), reason 0x%02x (%s), nak data 0x%02x\n",
4018
+ txmsg->reply.req_type,
4019
+ drm_dp_mst_req_type_str(txmsg->reply.req_type),
4020
+ txmsg->reply.u.nak.reason,
4021
+ drm_dp_mst_nak_reason_str(txmsg->reply.u.nak.reason),
4022
+ txmsg->reply.u.nak.nak_data);
24244023 }
2425
- return ret;
4024
+
4025
+ memset(msg, 0, sizeof(struct drm_dp_sideband_msg_rx));
4026
+ drm_dp_mst_topology_put_mstb(mstb);
4027
+
4028
+ mutex_lock(&mgr->qlock);
4029
+ txmsg->state = DRM_DP_SIDEBAND_TX_RX;
4030
+ list_del(&txmsg->next);
4031
+ mutex_unlock(&mgr->qlock);
4032
+
4033
+ wake_up_all(&mgr->tx_waitq);
4034
+
4035
+ return 0;
4036
+
4037
+out_clear_reply:
4038
+ memset(msg, 0, sizeof(struct drm_dp_sideband_msg_rx));
4039
+out:
4040
+ if (mstb)
4041
+ drm_dp_mst_topology_put_mstb(mstb);
4042
+
4043
+ return 0;
4044
+}
4045
+
4046
+static inline bool
4047
+drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr,
4048
+ struct drm_dp_pending_up_req *up_req)
4049
+{
4050
+ struct drm_dp_mst_branch *mstb = NULL;
4051
+ struct drm_dp_sideband_msg_req_body *msg = &up_req->msg;
4052
+ struct drm_dp_sideband_msg_hdr *hdr = &up_req->hdr;
4053
+ bool hotplug = false;
4054
+
4055
+ if (hdr->broadcast) {
4056
+ const u8 *guid = NULL;
4057
+
4058
+ if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY)
4059
+ guid = msg->u.conn_stat.guid;
4060
+ else if (msg->req_type == DP_RESOURCE_STATUS_NOTIFY)
4061
+ guid = msg->u.resource_stat.guid;
4062
+
4063
+ if (guid)
4064
+ mstb = drm_dp_get_mst_branch_device_by_guid(mgr, guid);
4065
+ } else {
4066
+ mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad);
4067
+ }
4068
+
4069
+ if (!mstb) {
4070
+ DRM_DEBUG_KMS("Got MST reply from unknown device %d\n",
4071
+ hdr->lct);
4072
+ return false;
4073
+ }
4074
+
4075
+ /* TODO: Add missing handler for DP_RESOURCE_STATUS_NOTIFY events */
4076
+ if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY) {
4077
+ drm_dp_mst_handle_conn_stat(mstb, &msg->u.conn_stat);
4078
+ hotplug = true;
4079
+ }
4080
+
4081
+ drm_dp_mst_topology_put_mstb(mstb);
4082
+ return hotplug;
4083
+}
4084
+
4085
+static void drm_dp_mst_up_req_work(struct work_struct *work)
4086
+{
4087
+ struct drm_dp_mst_topology_mgr *mgr =
4088
+ container_of(work, struct drm_dp_mst_topology_mgr,
4089
+ up_req_work);
4090
+ struct drm_dp_pending_up_req *up_req;
4091
+ bool send_hotplug = false;
4092
+
4093
+ mutex_lock(&mgr->probe_lock);
4094
+ while (true) {
4095
+ mutex_lock(&mgr->up_req_lock);
4096
+ up_req = list_first_entry_or_null(&mgr->up_req_list,
4097
+ struct drm_dp_pending_up_req,
4098
+ next);
4099
+ if (up_req)
4100
+ list_del(&up_req->next);
4101
+ mutex_unlock(&mgr->up_req_lock);
4102
+
4103
+ if (!up_req)
4104
+ break;
4105
+
4106
+ send_hotplug |= drm_dp_mst_process_up_req(mgr, up_req);
4107
+ kfree(up_req);
4108
+ }
4109
+ mutex_unlock(&mgr->probe_lock);
4110
+
4111
+ if (send_hotplug)
4112
+ drm_kms_helper_hotplug_event(mgr->dev);
24264113 }
24274114
24284115 static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
24294116 {
2430
- int ret = 0;
4117
+ struct drm_dp_pending_up_req *up_req;
24314118
2432
- if (!drm_dp_get_one_sb_msg(mgr, true)) {
2433
- memset(&mgr->up_req_recv, 0,
2434
- sizeof(struct drm_dp_sideband_msg_rx));
4119
+ if (!drm_dp_get_one_sb_msg(mgr, true, NULL))
4120
+ goto out;
4121
+
4122
+ if (!mgr->up_req_recv.have_eomt)
24354123 return 0;
4124
+
4125
+ up_req = kzalloc(sizeof(*up_req), GFP_KERNEL);
4126
+ if (!up_req) {
4127
+ DRM_ERROR("Not enough memory to process MST up req\n");
4128
+ return -ENOMEM;
4129
+ }
4130
+ INIT_LIST_HEAD(&up_req->next);
4131
+
4132
+ drm_dp_sideband_parse_req(&mgr->up_req_recv, &up_req->msg);
4133
+
4134
+ if (up_req->msg.req_type != DP_CONNECTION_STATUS_NOTIFY &&
4135
+ up_req->msg.req_type != DP_RESOURCE_STATUS_NOTIFY) {
4136
+ DRM_DEBUG_KMS("Received unknown up req type, ignoring: %x\n",
4137
+ up_req->msg.req_type);
4138
+ kfree(up_req);
4139
+ goto out;
24364140 }
24374141
2438
- if (mgr->up_req_recv.have_eomt) {
2439
- struct drm_dp_sideband_msg_req_body msg;
2440
- struct drm_dp_mst_branch *mstb = NULL;
2441
- bool seqno;
4142
+ drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, up_req->msg.req_type,
4143
+ false);
24424144
2443
- if (!mgr->up_req_recv.initial_hdr.broadcast) {
2444
- mstb = drm_dp_get_mst_branch_device(mgr,
2445
- mgr->up_req_recv.initial_hdr.lct,
2446
- mgr->up_req_recv.initial_hdr.rad);
2447
- if (!mstb) {
2448
- DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
2449
- memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2450
- return 0;
2451
- }
2452
- }
4145
+ if (up_req->msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
4146
+ const struct drm_dp_connection_status_notify *conn_stat =
4147
+ &up_req->msg.u.conn_stat;
24534148
2454
- seqno = mgr->up_req_recv.initial_hdr.seqno;
2455
- drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg);
4149
+ DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n",
4150
+ conn_stat->port_number,
4151
+ conn_stat->legacy_device_plug_status,
4152
+ conn_stat->displayport_device_plug_status,
4153
+ conn_stat->message_capability_status,
4154
+ conn_stat->input_port,
4155
+ conn_stat->peer_device_type);
4156
+ } else if (up_req->msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
4157
+ const struct drm_dp_resource_status_notify *res_stat =
4158
+ &up_req->msg.u.resource_stat;
24564159
2457
- if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
2458
- drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
2459
-
2460
- if (!mstb)
2461
- mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.conn_stat.guid);
2462
-
2463
- if (!mstb) {
2464
- DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
2465
- memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2466
- return 0;
2467
- }
2468
-
2469
- drm_dp_update_port(mstb, &msg.u.conn_stat);
2470
-
2471
- DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type);
2472
- (*mgr->cbs->hotplug)(mgr);
2473
-
2474
- } else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
2475
- drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
2476
- if (!mstb)
2477
- mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.resource_stat.guid);
2478
-
2479
- if (!mstb) {
2480
- DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
2481
- memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2482
- return 0;
2483
- }
2484
-
2485
- DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn);
2486
- }
2487
-
2488
- if (mstb)
2489
- drm_dp_put_mst_branch_device(mstb);
2490
-
2491
- memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
4160
+ DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n",
4161
+ res_stat->port_number,
4162
+ res_stat->available_pbn);
24924163 }
2493
- return ret;
4164
+
4165
+ up_req->hdr = mgr->up_req_recv.initial_hdr;
4166
+ mutex_lock(&mgr->up_req_lock);
4167
+ list_add_tail(&up_req->next, &mgr->up_req_list);
4168
+ mutex_unlock(&mgr->up_req_lock);
4169
+ queue_work(system_long_wq, &mgr->up_req_work);
4170
+
4171
+out:
4172
+ memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
4173
+ return 0;
24944174 }
24954175
24964176 /**
....@@ -2534,32 +4214,44 @@
25344214 /**
25354215 * drm_dp_mst_detect_port() - get connection status for an MST port
25364216 * @connector: DRM connector for this port
4217
+ * @ctx: The acquisition context to use for grabbing locks
25374218 * @mgr: manager for this port
2538
- * @port: unverified pointer to a port
4219
+ * @port: pointer to a port
25394220 *
2540
- * This returns the current connection state for a port. It validates the
2541
- * port pointer still exists so the caller doesn't require a reference
4221
+ * This returns the current connection state for a port.
25424222 */
2543
-enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector,
2544
- struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
4223
+int
4224
+drm_dp_mst_detect_port(struct drm_connector *connector,
4225
+ struct drm_modeset_acquire_ctx *ctx,
4226
+ struct drm_dp_mst_topology_mgr *mgr,
4227
+ struct drm_dp_mst_port *port)
25454228 {
2546
- enum drm_connector_status status = connector_status_disconnected;
4229
+ int ret;
25474230
2548
- /* we need to search for the port in the mgr in case its gone */
2549
- port = drm_dp_get_validated_port_ref(mgr, port);
4231
+ /* we need to search for the port in the mgr in case it's gone */
4232
+ port = drm_dp_mst_topology_get_port_validated(mgr, port);
25504233 if (!port)
25514234 return connector_status_disconnected;
4235
+
4236
+ ret = drm_modeset_lock(&mgr->base.lock, ctx);
4237
+ if (ret)
4238
+ goto out;
4239
+
4240
+ ret = connector_status_disconnected;
25524241
25534242 if (!port->ddps)
25544243 goto out;
25554244
25564245 switch (port->pdt) {
25574246 case DP_PEER_DEVICE_NONE:
4247
+ break;
25584248 case DP_PEER_DEVICE_MST_BRANCHING:
4249
+ if (!port->mcs)
4250
+ ret = connector_status_connected;
25594251 break;
25604252
25614253 case DP_PEER_DEVICE_SST_SINK:
2562
- status = connector_status_connected;
4254
+ ret = connector_status_connected;
25634255 /* for logical ports - cache the EDID */
25644256 if (port->port_num >= 8 && !port->cached_edid) {
25654257 port->cached_edid = drm_get_edid(connector, &port->aux.ddc);
....@@ -2567,49 +4259,14 @@
25674259 break;
25684260 case DP_PEER_DEVICE_DP_LEGACY_CONV:
25694261 if (port->ldps)
2570
- status = connector_status_connected;
4262
+ ret = connector_status_connected;
25714263 break;
25724264 }
25734265 out:
2574
- drm_dp_put_port(port);
2575
- return status;
4266
+ drm_dp_mst_topology_put_port(port);
4267
+ return ret;
25764268 }
25774269 EXPORT_SYMBOL(drm_dp_mst_detect_port);
2578
-
2579
-/**
2580
- * drm_dp_mst_port_has_audio() - Check whether port has audio capability or not
2581
- * @mgr: manager for this port
2582
- * @port: unverified pointer to a port.
2583
- *
2584
- * This returns whether the port supports audio or not.
2585
- */
2586
-bool drm_dp_mst_port_has_audio(struct drm_dp_mst_topology_mgr *mgr,
2587
- struct drm_dp_mst_port *port)
2588
-{
2589
- bool ret = false;
2590
-
2591
- port = drm_dp_get_validated_port_ref(mgr, port);
2592
- if (!port)
2593
- return ret;
2594
- ret = port->has_audio;
2595
- drm_dp_put_port(port);
2596
- return ret;
2597
-}
2598
-EXPORT_SYMBOL(drm_dp_mst_port_has_audio);
2599
-
2600
-bool drm_dp_mst_has_fec(struct drm_dp_mst_topology_mgr *mgr,
2601
- struct drm_dp_mst_port *port)
2602
-{
2603
- bool ret = false;
2604
-
2605
- port = drm_dp_get_validated_port_ref(mgr, port);
2606
- if (!port)
2607
- return ret;
2608
- ret = port->fec_capable;
2609
- drm_dp_put_port(port);
2610
- return ret;
2611
-}
2612
-EXPORT_SYMBOL_GPL(drm_dp_mst_has_fec);
26134270
26144271 /**
26154272 * drm_dp_mst_get_edid() - get EDID for an MST port
....@@ -2625,8 +4282,8 @@
26254282 {
26264283 struct edid *edid = NULL;
26274284
2628
- /* we need to search for the port in the mgr in case its gone */
2629
- port = drm_dp_get_validated_port_ref(mgr, port);
4285
+ /* we need to search for the port in the mgr in case it's gone */
4286
+ port = drm_dp_mst_topology_get_port_validated(mgr, port);
26304287 if (!port)
26314288 return NULL;
26324289
....@@ -2634,18 +4291,24 @@
26344291 edid = drm_edid_duplicate(port->cached_edid);
26354292 else {
26364293 edid = drm_get_edid(connector, &port->aux.ddc);
2637
- drm_connector_set_tile_property(connector);
26384294 }
26394295 port->has_audio = drm_detect_monitor_audio(edid);
2640
- drm_dp_put_port(port);
4296
+ drm_dp_mst_topology_put_port(port);
26414297 return edid;
26424298 }
26434299 EXPORT_SYMBOL(drm_dp_mst_get_edid);
26444300
26454301 /**
2646
- * drm_dp_find_vcpi_slots() - find slots for this PBN value
4302
+ * drm_dp_find_vcpi_slots() - Find VCPI slots for this PBN value
26474303 * @mgr: manager to use
26484304 * @pbn: payload bandwidth to convert into slots.
4305
+ *
4306
+ * Calculate the number of VCPI slots that will be required for the given PBN
4307
+ * value. This function is deprecated, and should not be used in atomic
4308
+ * drivers.
4309
+ *
4310
+ * RETURNS:
4311
+ * The total slots required for this port, or error.
26494312 */
26504313 int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
26514314 int pbn)
....@@ -2681,42 +4344,100 @@
26814344 }
26824345
26834346 /**
2684
- * drm_dp_atomic_find_vcpi_slots() - Find and add vcpi slots to the state
4347
+ * drm_dp_atomic_find_vcpi_slots() - Find and add VCPI slots to the state
26854348 * @state: global atomic state
26864349 * @mgr: MST topology manager for the port
26874350 * @port: port to find vcpi slots for
26884351 * @pbn: bandwidth required for the mode in PBN
4352
+ * @pbn_div: divider for DSC mode that takes FEC into account
26894353 *
2690
- * RETURNS:
2691
- * Total slots in the atomic state assigned for this port or error
4354
+ * Allocates VCPI slots to @port, replacing any previous VCPI allocations it
4355
+ * may have had. Any atomic drivers which support MST must call this function
4356
+ * in their &drm_encoder_helper_funcs.atomic_check() callback to change the
4357
+ * current VCPI allocation for the new state, but only when
4358
+ * &drm_crtc_state.mode_changed or &drm_crtc_state.connectors_changed is set
4359
+ * to ensure compatibility with userspace applications that still use the
4360
+ * legacy modesetting UAPI.
4361
+ *
4362
+ * Allocations set by this function are not checked against the bandwidth
4363
+ * restraints of @mgr until the driver calls drm_dp_mst_atomic_check().
4364
+ *
4365
+ * Additionally, it is OK to call this function multiple times on the same
4366
+ * @port as needed. It is not OK however, to call this function and
4367
+ * drm_dp_atomic_release_vcpi_slots() in the same atomic check phase.
4368
+ *
4369
+ * See also:
4370
+ * drm_dp_atomic_release_vcpi_slots()
4371
+ * drm_dp_mst_atomic_check()
4372
+ *
4373
+ * Returns:
4374
+ * Total slots in the atomic state assigned for this port, or a negative error
4375
+ * code if the port no longer exists
26924376 */
26934377 int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
26944378 struct drm_dp_mst_topology_mgr *mgr,
2695
- struct drm_dp_mst_port *port, int pbn)
4379
+ struct drm_dp_mst_port *port, int pbn,
4380
+ int pbn_div)
26964381 {
26974382 struct drm_dp_mst_topology_state *topology_state;
2698
- int req_slots;
4383
+ struct drm_dp_vcpi_allocation *pos, *vcpi = NULL;
4384
+ int prev_slots, prev_bw, req_slots;
26994385
27004386 topology_state = drm_atomic_get_mst_topology_state(state, mgr);
27014387 if (IS_ERR(topology_state))
27024388 return PTR_ERR(topology_state);
27034389
2704
- port = drm_dp_get_validated_port_ref(mgr, port);
2705
- if (port == NULL)
2706
- return -EINVAL;
2707
- req_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
2708
- DRM_DEBUG_KMS("vcpi slots req=%d, avail=%d\n",
2709
- req_slots, topology_state->avail_slots);
4390
+ /* Find the current allocation for this port, if any */
4391
+ list_for_each_entry(pos, &topology_state->vcpis, next) {
4392
+ if (pos->port == port) {
4393
+ vcpi = pos;
4394
+ prev_slots = vcpi->vcpi;
4395
+ prev_bw = vcpi->pbn;
27104396
2711
- if (req_slots > topology_state->avail_slots) {
2712
- drm_dp_put_port(port);
2713
- return -ENOSPC;
4397
+ /*
4398
+ * This should never happen, unless the driver tries
4399
+ * releasing and allocating the same VCPI allocation,
4400
+ * which is an error
4401
+ */
4402
+ if (WARN_ON(!prev_slots)) {
4403
+ DRM_ERROR("cannot allocate and release VCPI on [MST PORT:%p] in the same state\n",
4404
+ port);
4405
+ return -EINVAL;
4406
+ }
4407
+
4408
+ break;
4409
+ }
4410
+ }
4411
+ if (!vcpi) {
4412
+ prev_slots = 0;
4413
+ prev_bw = 0;
27144414 }
27154415
2716
- topology_state->avail_slots -= req_slots;
2717
- DRM_DEBUG_KMS("vcpi slots avail=%d", topology_state->avail_slots);
4416
+ if (pbn_div <= 0)
4417
+ pbn_div = mgr->pbn_div;
27184418
2719
- drm_dp_put_port(port);
4419
+ req_slots = DIV_ROUND_UP(pbn, pbn_div);
4420
+
4421
+ DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] [MST PORT:%p] VCPI %d -> %d\n",
4422
+ port->connector->base.id, port->connector->name,
4423
+ port, prev_slots, req_slots);
4424
+ DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] [MST PORT:%p] PBN %d -> %d\n",
4425
+ port->connector->base.id, port->connector->name,
4426
+ port, prev_bw, pbn);
4427
+
4428
+ /* Add the new allocation to the state */
4429
+ if (!vcpi) {
4430
+ vcpi = kzalloc(sizeof(*vcpi), GFP_KERNEL);
4431
+ if (!vcpi)
4432
+ return -ENOMEM;
4433
+
4434
+ drm_dp_mst_get_port_malloc(port);
4435
+ vcpi->port = port;
4436
+ list_add(&vcpi->next, &topology_state->vcpis);
4437
+ }
4438
+ vcpi->vcpi = req_slots;
4439
+ vcpi->pbn = pbn;
4440
+
27204441 return req_slots;
27214442 }
27224443 EXPORT_SYMBOL(drm_dp_atomic_find_vcpi_slots);
....@@ -2725,31 +4446,58 @@
27254446 * drm_dp_atomic_release_vcpi_slots() - Release allocated vcpi slots
27264447 * @state: global atomic state
27274448 * @mgr: MST topology manager for the port
2728
- * @slots: number of vcpi slots to release
4449
+ * @port: The port to release the VCPI slots from
27294450 *
2730
- * RETURNS:
2731
- * 0 if @slots were added back to &drm_dp_mst_topology_state->avail_slots or
2732
- * negative error code
4451
+ * Releases any VCPI slots that have been allocated to a port in the atomic
4452
+ * state. Any atomic drivers which support MST must call this function in
4453
+ * their &drm_connector_helper_funcs.atomic_check() callback when the
4454
+ * connector will no longer have VCPI allocated (e.g. because its CRTC was
4455
+ * removed) when it had VCPI allocated in the previous atomic state.
4456
+ *
4457
+ * It is OK to call this even if @port has been removed from the system.
4458
+ * Additionally, it is OK to call this function multiple times on the same
4459
+ * @port as needed. It is not OK however, to call this function and
4460
+ * drm_dp_atomic_find_vcpi_slots() on the same @port in a single atomic check
4461
+ * phase.
4462
+ *
4463
+ * See also:
4464
+ * drm_dp_atomic_find_vcpi_slots()
4465
+ * drm_dp_mst_atomic_check()
4466
+ *
4467
+ * Returns:
4468
+ * 0 if all slots for this port were added back to
4469
+ * &drm_dp_mst_topology_state.avail_slots or negative error code
27334470 */
27344471 int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
27354472 struct drm_dp_mst_topology_mgr *mgr,
2736
- int slots)
4473
+ struct drm_dp_mst_port *port)
27374474 {
27384475 struct drm_dp_mst_topology_state *topology_state;
4476
+ struct drm_dp_vcpi_allocation *pos;
4477
+ bool found = false;
27394478
27404479 topology_state = drm_atomic_get_mst_topology_state(state, mgr);
27414480 if (IS_ERR(topology_state))
27424481 return PTR_ERR(topology_state);
27434482
2744
- /* We cannot rely on port->vcpi.num_slots to update
2745
- * topology_state->avail_slots as the port may not exist if the parent
2746
- * branch device was unplugged. This should be fixed by tracking
2747
- * per-port slot allocation in drm_dp_mst_topology_state instead of
2748
- * depending on the caller to tell us how many slots to release.
2749
- */
2750
- topology_state->avail_slots += slots;
2751
- DRM_DEBUG_KMS("vcpi slots released=%d, avail=%d\n",
2752
- slots, topology_state->avail_slots);
4483
+ list_for_each_entry(pos, &topology_state->vcpis, next) {
4484
+ if (pos->port == port) {
4485
+ found = true;
4486
+ break;
4487
+ }
4488
+ }
4489
+ if (WARN_ON(!found)) {
4490
+ DRM_ERROR("no VCPI for [MST PORT:%p] found in mst state %p\n",
4491
+ port, &topology_state->base);
4492
+ return -EINVAL;
4493
+ }
4494
+
4495
+ DRM_DEBUG_ATOMIC("[MST PORT:%p] VCPI %d -> 0\n", port, pos->vcpi);
4496
+ if (pos->vcpi) {
4497
+ drm_dp_mst_put_port_malloc(port);
4498
+ pos->vcpi = 0;
4499
+ pos->pbn = 0;
4500
+ }
27534501
27544502 return 0;
27554503 }
....@@ -2770,14 +4518,15 @@
27704518 if (slots < 0)
27714519 return false;
27724520
2773
- port = drm_dp_get_validated_port_ref(mgr, port);
4521
+ port = drm_dp_mst_topology_get_port_validated(mgr, port);
27744522 if (!port)
27754523 return false;
27764524
27774525 if (port->vcpi.vcpi > 0) {
2778
- DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", port->vcpi.vcpi, port->vcpi.pbn, pbn);
4526
+ DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n",
4527
+ port->vcpi.vcpi, port->vcpi.pbn, pbn);
27794528 if (pbn == port->vcpi.pbn) {
2780
- drm_dp_put_port(port);
4529
+ drm_dp_mst_topology_put_port(port);
27814530 return true;
27824531 }
27834532 }
....@@ -2785,14 +4534,16 @@
27854534 ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn, slots);
27864535 if (ret) {
27874536 DRM_DEBUG_KMS("failed to init vcpi slots=%d max=63 ret=%d\n",
2788
- DIV_ROUND_UP(pbn, mgr->pbn_div), ret);
2789
- drm_dp_put_port(port);
4537
+ DIV_ROUND_UP(pbn, mgr->pbn_div), ret);
4538
+ drm_dp_mst_topology_put_port(port);
27904539 goto out;
27914540 }
27924541 DRM_DEBUG_KMS("initing vcpi for pbn=%d slots=%d\n",
2793
- pbn, port->vcpi.num_slots);
4542
+ pbn, port->vcpi.num_slots);
27944543
2795
- drm_dp_put_port(port);
4544
+ /* Keep port allocated until its payload has been removed */
4545
+ drm_dp_mst_get_port_malloc(port);
4546
+ drm_dp_mst_topology_put_port(port);
27964547 return true;
27974548 out:
27984549 return false;
....@@ -2802,12 +4553,13 @@
28024553 int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
28034554 {
28044555 int slots = 0;
2805
- port = drm_dp_get_validated_port_ref(mgr, port);
4556
+
4557
+ port = drm_dp_mst_topology_get_port_validated(mgr, port);
28064558 if (!port)
28074559 return slots;
28084560
28094561 slots = port->vcpi.num_slots;
2810
- drm_dp_put_port(port);
4562
+ drm_dp_mst_topology_put_port(port);
28114563 return slots;
28124564 }
28134565 EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots);
....@@ -2821,23 +4573,36 @@
28214573 */
28224574 void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
28234575 {
2824
- port = drm_dp_get_validated_port_ref(mgr, port);
2825
- if (!port)
2826
- return;
4576
+ /*
4577
+ * A port with VCPI will remain allocated until its VCPI is
4578
+ * released, no verified ref needed
4579
+ */
4580
+
28274581 port->vcpi.num_slots = 0;
2828
- drm_dp_put_port(port);
28294582 }
28304583 EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots);
28314584
28324585 /**
28334586 * drm_dp_mst_deallocate_vcpi() - deallocate a VCPI
28344587 * @mgr: manager for this port
2835
- * @port: unverified port to deallocate vcpi for
4588
+ * @port: port to deallocate vcpi for
4589
+ *
4590
+ * This can be called unconditionally, regardless of whether
4591
+ * drm_dp_mst_allocate_vcpi() succeeded or not.
28364592 */
2837
-void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
4593
+void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
4594
+ struct drm_dp_mst_port *port)
28384595 {
2839
- port = drm_dp_get_validated_port_ref(mgr, port);
2840
- if (!port)
4596
+ bool skip;
4597
+
4598
+ if (!port->vcpi.vcpi)
4599
+ return;
4600
+
4601
+ mutex_lock(&mgr->lock);
4602
+ skip = !drm_dp_mst_port_downstream_of_branch(port, mgr->mst_primary);
4603
+ mutex_unlock(&mgr->lock);
4604
+
4605
+ if (skip)
28414606 return;
28424607
28434608 drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
....@@ -2845,7 +4610,7 @@
28454610 port->vcpi.pbn = 0;
28464611 port->vcpi.aligned_pbn = 0;
28474612 port->vcpi.vcpi = 0;
2848
- drm_dp_put_port(port);
4613
+ drm_dp_mst_put_port_malloc(port);
28494614 }
28504615 EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi);
28514616
....@@ -2904,10 +4669,15 @@
29044669 }
29054670
29064671 /**
2907
- * drm_dp_check_act_status() - Check ACT handled status.
4672
+ * drm_dp_check_act_status() - Polls for ACT handled status.
29084673 * @mgr: manager to use
29094674 *
2910
- * Check the payload status bits in the DPCD for ACT handled completion.
4675
+ * Tries waiting for the MST hub to finish updating it's payload table by
4676
+ * polling for the ACT handled bit for up to 3 seconds (yes-some hubs really
4677
+ * take that long).
4678
+ *
4679
+ * Returns:
4680
+ * 0 if the ACT was handled in time, negative error code on failure.
29114681 */
29124682 int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr)
29134683 {
....@@ -2924,10 +4694,14 @@
29244694 status & DP_PAYLOAD_ACT_HANDLED || status < 0,
29254695 200, timeout_ms * USEC_PER_MSEC);
29264696 if (ret < 0 && status >= 0) {
2927
- DRM_DEBUG_KMS("Failed to get ACT after %dms, last status: %02x\n",
2928
- timeout_ms, status);
4697
+ DRM_ERROR("Failed to get ACT after %dms, last status: %02x\n",
4698
+ timeout_ms, status);
29294699 return -EINVAL;
29304700 } else if (status < 0) {
4701
+ /*
4702
+ * Failure here isn't unexpected - the hub may have
4703
+ * just been unplugged
4704
+ */
29314705 DRM_DEBUG_KMS("Failed to read payload table status: %d\n",
29324706 status);
29334707 return status;
....@@ -2941,18 +4715,12 @@
29414715 * drm_dp_calc_pbn_mode() - Calculate the PBN for a mode.
29424716 * @clock: dot clock for the mode
29434717 * @bpp: bpp for the mode.
4718
+ * @dsc: DSC mode. If true, bpp has units of 1/16 of a bit per pixel
29444719 *
29454720 * This uses the formula in the spec to calculate the PBN value for a mode.
29464721 */
2947
-int drm_dp_calc_pbn_mode(int clock, int bpp)
4722
+int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc)
29484723 {
2949
- u64 kbps;
2950
- s64 peak_kbps;
2951
- u32 numerator;
2952
- u32 denominator;
2953
-
2954
- kbps = clock * bpp;
2955
-
29564724 /*
29574725 * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
29584726 * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
....@@ -2962,41 +4730,20 @@
29624730 * peak_kbps *= (1006/1000)
29634731 * peak_kbps *= (64/54)
29644732 * peak_kbps *= 8 convert to bytes
4733
+ *
4734
+ * If the bpp is in units of 1/16, further divide by 16. Put this
4735
+ * factor in the numerator rather than the denominator to avoid
4736
+ * integer overflow
29654737 */
29664738
2967
- numerator = 64 * 1006;
2968
- denominator = 54 * 8 * 1000 * 1000;
4739
+ if (dsc)
4740
+ return DIV_ROUND_UP_ULL(mul_u32_u32(clock * (bpp / 16), 64 * 1006),
4741
+ 8 * 54 * 1000 * 1000);
29694742
2970
- kbps *= numerator;
2971
- peak_kbps = drm_fixp_from_fraction(kbps, denominator);
2972
-
2973
- return drm_fixp2int_ceil(peak_kbps);
4743
+ return DIV_ROUND_UP_ULL(mul_u32_u32(clock * bpp, 64 * 1006),
4744
+ 8 * 54 * 1000 * 1000);
29744745 }
29754746 EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
2976
-
2977
-static int test_calc_pbn_mode(void)
2978
-{
2979
- int ret;
2980
- ret = drm_dp_calc_pbn_mode(154000, 30);
2981
- if (ret != 689) {
2982
- DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
2983
- 154000, 30, 689, ret);
2984
- return -EINVAL;
2985
- }
2986
- ret = drm_dp_calc_pbn_mode(234000, 30);
2987
- if (ret != 1047) {
2988
- DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
2989
- 234000, 30, 1047, ret);
2990
- return -EINVAL;
2991
- }
2992
- ret = drm_dp_calc_pbn_mode(297000, 24);
2993
- if (ret != 1063) {
2994
- DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
2995
- 297000, 24, 1063, ret);
2996
- return -EINVAL;
2997
- }
2998
- return 0;
2999
-}
30004747
30014748 /* we want to kick the TX after we've ack the up/down IRQs. */
30024749 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr)
....@@ -3048,6 +4795,7 @@
30484795
30494796 mst_edid = drm_dp_mst_get_edid(port->connector, mgr, port);
30504797 drm_edid_get_monitor_name(mst_edid, name, namelen);
4798
+ kfree(mst_edid);
30514799 }
30524800
30534801 /**
....@@ -3104,15 +4852,34 @@
31044852 int ret;
31054853
31064854 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, buf, DP_RECEIVER_CAP_SIZE);
4855
+ if (ret) {
4856
+ seq_printf(m, "dpcd read failed\n");
4857
+ goto out;
4858
+ }
31074859 seq_printf(m, "dpcd: %*ph\n", DP_RECEIVER_CAP_SIZE, buf);
4860
+
31084861 ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2);
4862
+ if (ret != 2) {
4863
+ seq_printf(m, "faux/mst read failed\n");
4864
+ goto out;
4865
+ }
31094866 seq_printf(m, "faux/mst: %*ph\n", 2, buf);
4867
+
31104868 ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1);
4869
+ if (ret != 1) {
4870
+ seq_printf(m, "mst ctrl read failed\n");
4871
+ goto out;
4872
+ }
31114873 seq_printf(m, "mst ctrl: %*ph\n", 1, buf);
31124874
31134875 /* dump the standard OUI branch header */
31144876 ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE);
4877
+ if (ret != DP_BRANCH_OUI_HEADER_SIZE) {
4878
+ seq_printf(m, "branch oui read failed\n");
4879
+ goto out;
4880
+ }
31154881 seq_printf(m, "branch oui: %*phN devid: ", 3, buf);
4882
+
31164883 for (i = 0x3; i < 0x8 && buf[i]; i++)
31174884 seq_printf(m, "%c", buf[i]);
31184885 seq_printf(m, " revision: hw: %x.%x sw: %x.%x\n",
....@@ -3121,6 +4888,7 @@
31214888 seq_printf(m, "payload table: %*ph\n", DP_PAYLOAD_TABLE_SIZE, buf);
31224889 }
31234890
4891
+out:
31244892 mutex_unlock(&mgr->lock);
31254893
31264894 }
....@@ -3136,66 +4904,147 @@
31364904 mutex_unlock(&mgr->qlock);
31374905 }
31384906
3139
-static void drm_dp_free_mst_port(struct kref *kref)
4907
+static inline void
4908
+drm_dp_delayed_destroy_port(struct drm_dp_mst_port *port)
31404909 {
3141
- struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
3142
- kref_put(&port->parent->kref, drm_dp_free_mst_branch_device);
3143
- kfree(port);
4910
+ drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE, port->mcs);
4911
+
4912
+ if (port->connector) {
4913
+ drm_connector_unregister(port->connector);
4914
+ drm_connector_put(port->connector);
4915
+ }
4916
+
4917
+ drm_dp_mst_put_port_malloc(port);
31444918 }
31454919
3146
-static void drm_dp_destroy_connector_work(struct work_struct *work)
4920
+static inline void
4921
+drm_dp_delayed_destroy_mstb(struct drm_dp_mst_branch *mstb)
31474922 {
3148
- struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
3149
- struct drm_dp_mst_port *port;
3150
- bool send_hotplug = false;
4923
+ struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
4924
+ struct drm_dp_mst_port *port, *port_tmp;
4925
+ struct drm_dp_sideband_msg_tx *txmsg, *txmsg_tmp;
4926
+ bool wake_tx = false;
4927
+
4928
+ mutex_lock(&mgr->lock);
4929
+ list_for_each_entry_safe(port, port_tmp, &mstb->ports, next) {
4930
+ list_del(&port->next);
4931
+ drm_dp_mst_topology_put_port(port);
4932
+ }
4933
+ mutex_unlock(&mgr->lock);
4934
+
4935
+ /* drop any tx slot msg */
4936
+ mutex_lock(&mstb->mgr->qlock);
4937
+ list_for_each_entry_safe(txmsg, txmsg_tmp, &mgr->tx_msg_downq, next) {
4938
+ if (txmsg->dst != mstb)
4939
+ continue;
4940
+
4941
+ txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
4942
+ list_del(&txmsg->next);
4943
+ wake_tx = true;
4944
+ }
4945
+ mutex_unlock(&mstb->mgr->qlock);
4946
+
4947
+ if (wake_tx)
4948
+ wake_up_all(&mstb->mgr->tx_waitq);
4949
+
4950
+ drm_dp_mst_put_mstb_malloc(mstb);
4951
+}
4952
+
4953
+static void drm_dp_delayed_destroy_work(struct work_struct *work)
4954
+{
4955
+ struct drm_dp_mst_topology_mgr *mgr =
4956
+ container_of(work, struct drm_dp_mst_topology_mgr,
4957
+ delayed_destroy_work);
4958
+ bool send_hotplug = false, go_again;
4959
+
31514960 /*
31524961 * Not a regular list traverse as we have to drop the destroy
3153
- * connector lock before destroying the connector, to avoid AB->BA
4962
+ * connector lock before destroying the mstb/port, to avoid AB->BA
31544963 * ordering between this lock and the config mutex.
31554964 */
3156
- for (;;) {
3157
- mutex_lock(&mgr->destroy_connector_lock);
3158
- port = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_dp_mst_port, next);
3159
- if (!port) {
3160
- mutex_unlock(&mgr->destroy_connector_lock);
3161
- break;
3162
- }
3163
- list_del(&port->next);
3164
- mutex_unlock(&mgr->destroy_connector_lock);
4965
+ do {
4966
+ go_again = false;
31654967
3166
- kref_init(&port->kref);
3167
- INIT_LIST_HEAD(&port->next);
4968
+ for (;;) {
4969
+ struct drm_dp_mst_branch *mstb;
31684970
3169
- mgr->cbs->destroy_connector(mgr, port->connector);
4971
+ mutex_lock(&mgr->delayed_destroy_lock);
4972
+ mstb = list_first_entry_or_null(&mgr->destroy_branch_device_list,
4973
+ struct drm_dp_mst_branch,
4974
+ destroy_next);
4975
+ if (mstb)
4976
+ list_del(&mstb->destroy_next);
4977
+ mutex_unlock(&mgr->delayed_destroy_lock);
31704978
3171
- drm_dp_port_teardown_pdt(port, port->pdt);
3172
- port->pdt = DP_PEER_DEVICE_NONE;
4979
+ if (!mstb)
4980
+ break;
31734981
3174
- if (!port->input && port->vcpi.vcpi > 0) {
3175
- drm_dp_mst_reset_vcpi_slots(mgr, port);
3176
- drm_dp_update_payload_part1(mgr);
3177
- drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
4982
+ drm_dp_delayed_destroy_mstb(mstb);
4983
+ go_again = true;
31784984 }
31794985
3180
- kref_put(&port->kref, drm_dp_free_mst_port);
3181
- send_hotplug = true;
3182
- }
4986
+ for (;;) {
4987
+ struct drm_dp_mst_port *port;
4988
+
4989
+ mutex_lock(&mgr->delayed_destroy_lock);
4990
+ port = list_first_entry_or_null(&mgr->destroy_port_list,
4991
+ struct drm_dp_mst_port,
4992
+ next);
4993
+ if (port)
4994
+ list_del(&port->next);
4995
+ mutex_unlock(&mgr->delayed_destroy_lock);
4996
+
4997
+ if (!port)
4998
+ break;
4999
+
5000
+ drm_dp_delayed_destroy_port(port);
5001
+ send_hotplug = true;
5002
+ go_again = true;
5003
+ }
5004
+ } while (go_again);
5005
+
31835006 if (send_hotplug)
3184
- (*mgr->cbs->hotplug)(mgr);
5007
+ drm_kms_helper_hotplug_event(mgr->dev);
31855008 }
31865009
31875010 static struct drm_private_state *
31885011 drm_dp_mst_duplicate_state(struct drm_private_obj *obj)
31895012 {
3190
- struct drm_dp_mst_topology_state *state;
5013
+ struct drm_dp_mst_topology_state *state, *old_state =
5014
+ to_dp_mst_topology_state(obj->state);
5015
+ struct drm_dp_vcpi_allocation *pos, *vcpi;
31915016
3192
- state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
5017
+ state = kmemdup(old_state, sizeof(*state), GFP_KERNEL);
31935018 if (!state)
31945019 return NULL;
31955020
31965021 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
31975022
5023
+ INIT_LIST_HEAD(&state->vcpis);
5024
+
5025
+ list_for_each_entry(pos, &old_state->vcpis, next) {
5026
+ /* Prune leftover freed VCPI allocations */
5027
+ if (!pos->vcpi)
5028
+ continue;
5029
+
5030
+ vcpi = kmemdup(pos, sizeof(*vcpi), GFP_KERNEL);
5031
+ if (!vcpi)
5032
+ goto fail;
5033
+
5034
+ drm_dp_mst_get_port_malloc(vcpi->port);
5035
+ list_add(&vcpi->next, &state->vcpis);
5036
+ }
5037
+
31985038 return &state->base;
5039
+
5040
+fail:
5041
+ list_for_each_entry_safe(pos, vcpi, &state->vcpis, next) {
5042
+ drm_dp_mst_put_port_malloc(pos->port);
5043
+ kfree(pos);
5044
+ }
5045
+ kfree(state);
5046
+
5047
+ return NULL;
31995048 }
32005049
32015050 static void drm_dp_mst_destroy_state(struct drm_private_obj *obj,
....@@ -3203,14 +5052,346 @@
32035052 {
32045053 struct drm_dp_mst_topology_state *mst_state =
32055054 to_dp_mst_topology_state(state);
5055
+ struct drm_dp_vcpi_allocation *pos, *tmp;
5056
+
5057
+ list_for_each_entry_safe(pos, tmp, &mst_state->vcpis, next) {
5058
+ /* We only keep references to ports with non-zero VCPIs */
5059
+ if (pos->vcpi)
5060
+ drm_dp_mst_put_port_malloc(pos->port);
5061
+ kfree(pos);
5062
+ }
32065063
32075064 kfree(mst_state);
32085065 }
32095066
3210
-static const struct drm_private_state_funcs mst_state_funcs = {
5067
+static bool drm_dp_mst_port_downstream_of_branch(struct drm_dp_mst_port *port,
5068
+ struct drm_dp_mst_branch *branch)
5069
+{
5070
+ while (port->parent) {
5071
+ if (port->parent == branch)
5072
+ return true;
5073
+
5074
+ if (port->parent->port_parent)
5075
+ port = port->parent->port_parent;
5076
+ else
5077
+ break;
5078
+ }
5079
+ return false;
5080
+}
5081
+
5082
+static int
5083
+drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port,
5084
+ struct drm_dp_mst_topology_state *state);
5085
+
5086
+static int
5087
+drm_dp_mst_atomic_check_mstb_bw_limit(struct drm_dp_mst_branch *mstb,
5088
+ struct drm_dp_mst_topology_state *state)
5089
+{
5090
+ struct drm_dp_vcpi_allocation *vcpi;
5091
+ struct drm_dp_mst_port *port;
5092
+ int pbn_used = 0, ret;
5093
+ bool found = false;
5094
+
5095
+ /* Check that we have at least one port in our state that's downstream
5096
+ * of this branch, otherwise we can skip this branch
5097
+ */
5098
+ list_for_each_entry(vcpi, &state->vcpis, next) {
5099
+ if (!vcpi->pbn ||
5100
+ !drm_dp_mst_port_downstream_of_branch(vcpi->port, mstb))
5101
+ continue;
5102
+
5103
+ found = true;
5104
+ break;
5105
+ }
5106
+ if (!found)
5107
+ return 0;
5108
+
5109
+ if (mstb->port_parent)
5110
+ DRM_DEBUG_ATOMIC("[MSTB:%p] [MST PORT:%p] Checking bandwidth limits on [MSTB:%p]\n",
5111
+ mstb->port_parent->parent, mstb->port_parent,
5112
+ mstb);
5113
+ else
5114
+ DRM_DEBUG_ATOMIC("[MSTB:%p] Checking bandwidth limits\n",
5115
+ mstb);
5116
+
5117
+ list_for_each_entry(port, &mstb->ports, next) {
5118
+ ret = drm_dp_mst_atomic_check_port_bw_limit(port, state);
5119
+ if (ret < 0)
5120
+ return ret;
5121
+
5122
+ pbn_used += ret;
5123
+ }
5124
+
5125
+ return pbn_used;
5126
+}
5127
+
5128
+static int
5129
+drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port,
5130
+ struct drm_dp_mst_topology_state *state)
5131
+{
5132
+ struct drm_dp_vcpi_allocation *vcpi;
5133
+ int pbn_used = 0;
5134
+
5135
+ if (port->pdt == DP_PEER_DEVICE_NONE)
5136
+ return 0;
5137
+
5138
+ if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
5139
+ bool found = false;
5140
+
5141
+ list_for_each_entry(vcpi, &state->vcpis, next) {
5142
+ if (vcpi->port != port)
5143
+ continue;
5144
+ if (!vcpi->pbn)
5145
+ return 0;
5146
+
5147
+ found = true;
5148
+ break;
5149
+ }
5150
+ if (!found)
5151
+ return 0;
5152
+
5153
+ /* This should never happen, as it means we tried to
5154
+ * set a mode before querying the full_pbn
5155
+ */
5156
+ if (WARN_ON(!port->full_pbn))
5157
+ return -EINVAL;
5158
+
5159
+ pbn_used = vcpi->pbn;
5160
+ } else {
5161
+ pbn_used = drm_dp_mst_atomic_check_mstb_bw_limit(port->mstb,
5162
+ state);
5163
+ if (pbn_used <= 0)
5164
+ return pbn_used;
5165
+ }
5166
+
5167
+ if (pbn_used > port->full_pbn) {
5168
+ DRM_DEBUG_ATOMIC("[MSTB:%p] [MST PORT:%p] required PBN of %d exceeds port limit of %d\n",
5169
+ port->parent, port, pbn_used,
5170
+ port->full_pbn);
5171
+ return -ENOSPC;
5172
+ }
5173
+
5174
+ DRM_DEBUG_ATOMIC("[MSTB:%p] [MST PORT:%p] uses %d out of %d PBN\n",
5175
+ port->parent, port, pbn_used, port->full_pbn);
5176
+
5177
+ return pbn_used;
5178
+}
5179
+
5180
+static inline int
5181
+drm_dp_mst_atomic_check_vcpi_alloc_limit(struct drm_dp_mst_topology_mgr *mgr,
5182
+ struct drm_dp_mst_topology_state *mst_state)
5183
+{
5184
+ struct drm_dp_vcpi_allocation *vcpi;
5185
+ int avail_slots = 63, payload_count = 0;
5186
+
5187
+ list_for_each_entry(vcpi, &mst_state->vcpis, next) {
5188
+ /* Releasing VCPI is always OK-even if the port is gone */
5189
+ if (!vcpi->vcpi) {
5190
+ DRM_DEBUG_ATOMIC("[MST PORT:%p] releases all VCPI slots\n",
5191
+ vcpi->port);
5192
+ continue;
5193
+ }
5194
+
5195
+ DRM_DEBUG_ATOMIC("[MST PORT:%p] requires %d vcpi slots\n",
5196
+ vcpi->port, vcpi->vcpi);
5197
+
5198
+ avail_slots -= vcpi->vcpi;
5199
+ if (avail_slots < 0) {
5200
+ DRM_DEBUG_ATOMIC("[MST PORT:%p] not enough VCPI slots in mst state %p (avail=%d)\n",
5201
+ vcpi->port, mst_state,
5202
+ avail_slots + vcpi->vcpi);
5203
+ return -ENOSPC;
5204
+ }
5205
+
5206
+ if (++payload_count > mgr->max_payloads) {
5207
+ DRM_DEBUG_ATOMIC("[MST MGR:%p] state %p has too many payloads (max=%d)\n",
5208
+ mgr, mst_state, mgr->max_payloads);
5209
+ return -EINVAL;
5210
+ }
5211
+ }
5212
+ DRM_DEBUG_ATOMIC("[MST MGR:%p] mst state %p VCPI avail=%d used=%d\n",
5213
+ mgr, mst_state, avail_slots,
5214
+ 63 - avail_slots);
5215
+
5216
+ return 0;
5217
+}
5218
+
5219
+/**
5220
+ * drm_dp_mst_add_affected_dsc_crtcs
5221
+ * @state: Pointer to the new struct drm_dp_mst_topology_state
5222
+ * @mgr: MST topology manager
5223
+ *
5224
+ * Whenever there is a change in mst topology
5225
+ * DSC configuration would have to be recalculated
5226
+ * therefore we need to trigger modeset on all affected
5227
+ * CRTCs in that topology
5228
+ *
5229
+ * See also:
5230
+ * drm_dp_mst_atomic_enable_dsc()
5231
+ */
5232
+int drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state, struct drm_dp_mst_topology_mgr *mgr)
5233
+{
5234
+ struct drm_dp_mst_topology_state *mst_state;
5235
+ struct drm_dp_vcpi_allocation *pos;
5236
+ struct drm_connector *connector;
5237
+ struct drm_connector_state *conn_state;
5238
+ struct drm_crtc *crtc;
5239
+ struct drm_crtc_state *crtc_state;
5240
+
5241
+ mst_state = drm_atomic_get_mst_topology_state(state, mgr);
5242
+
5243
+ if (IS_ERR(mst_state))
5244
+ return PTR_ERR(mst_state);
5245
+
5246
+ list_for_each_entry(pos, &mst_state->vcpis, next) {
5247
+
5248
+ connector = pos->port->connector;
5249
+
5250
+ if (!connector)
5251
+ return -EINVAL;
5252
+
5253
+ conn_state = drm_atomic_get_connector_state(state, connector);
5254
+
5255
+ if (IS_ERR(conn_state))
5256
+ return PTR_ERR(conn_state);
5257
+
5258
+ crtc = conn_state->crtc;
5259
+
5260
+ if (!crtc)
5261
+ continue;
5262
+
5263
+ if (!drm_dp_mst_dsc_aux_for_port(pos->port))
5264
+ continue;
5265
+
5266
+ crtc_state = drm_atomic_get_crtc_state(mst_state->base.state, crtc);
5267
+
5268
+ if (IS_ERR(crtc_state))
5269
+ return PTR_ERR(crtc_state);
5270
+
5271
+ DRM_DEBUG_ATOMIC("[MST MGR:%p] Setting mode_changed flag on CRTC %p\n",
5272
+ mgr, crtc);
5273
+
5274
+ crtc_state->mode_changed = true;
5275
+ }
5276
+ return 0;
5277
+}
5278
+EXPORT_SYMBOL(drm_dp_mst_add_affected_dsc_crtcs);
5279
+
5280
+/**
5281
+ * drm_dp_mst_atomic_enable_dsc - Set DSC Enable Flag to On/Off
5282
+ * @state: Pointer to the new drm_atomic_state
5283
+ * @port: Pointer to the affected MST Port
5284
+ * @pbn: Newly recalculated bw required for link with DSC enabled
5285
+ * @pbn_div: Divider to calculate correct number of pbn per slot
5286
+ * @enable: Boolean flag to enable or disable DSC on the port
5287
+ *
5288
+ * This function enables DSC on the given Port
5289
+ * by recalculating its vcpi from pbn provided
5290
+ * and sets dsc_enable flag to keep track of which
5291
+ * ports have DSC enabled
5292
+ *
5293
+ */
5294
+int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state,
5295
+ struct drm_dp_mst_port *port,
5296
+ int pbn, int pbn_div,
5297
+ bool enable)
5298
+{
5299
+ struct drm_dp_mst_topology_state *mst_state;
5300
+ struct drm_dp_vcpi_allocation *pos;
5301
+ bool found = false;
5302
+ int vcpi = 0;
5303
+
5304
+ mst_state = drm_atomic_get_mst_topology_state(state, port->mgr);
5305
+
5306
+ if (IS_ERR(mst_state))
5307
+ return PTR_ERR(mst_state);
5308
+
5309
+ list_for_each_entry(pos, &mst_state->vcpis, next) {
5310
+ if (pos->port == port) {
5311
+ found = true;
5312
+ break;
5313
+ }
5314
+ }
5315
+
5316
+ if (!found) {
5317
+ DRM_DEBUG_ATOMIC("[MST PORT:%p] Couldn't find VCPI allocation in mst state %p\n",
5318
+ port, mst_state);
5319
+ return -EINVAL;
5320
+ }
5321
+
5322
+ if (pos->dsc_enabled == enable) {
5323
+ DRM_DEBUG_ATOMIC("[MST PORT:%p] DSC flag is already set to %d, returning %d VCPI slots\n",
5324
+ port, enable, pos->vcpi);
5325
+ vcpi = pos->vcpi;
5326
+ }
5327
+
5328
+ if (enable) {
5329
+ vcpi = drm_dp_atomic_find_vcpi_slots(state, port->mgr, port, pbn, pbn_div);
5330
+ DRM_DEBUG_ATOMIC("[MST PORT:%p] Enabling DSC flag, reallocating %d VCPI slots on the port\n",
5331
+ port, vcpi);
5332
+ if (vcpi < 0)
5333
+ return -EINVAL;
5334
+ }
5335
+
5336
+ pos->dsc_enabled = enable;
5337
+
5338
+ return vcpi;
5339
+}
5340
+EXPORT_SYMBOL(drm_dp_mst_atomic_enable_dsc);
5341
+/**
5342
+ * drm_dp_mst_atomic_check - Check that the new state of an MST topology in an
5343
+ * atomic update is valid
5344
+ * @state: Pointer to the new &struct drm_dp_mst_topology_state
5345
+ *
5346
+ * Checks the given topology state for an atomic update to ensure that it's
5347
+ * valid. This includes checking whether there's enough bandwidth to support
5348
+ * the new VCPI allocations in the atomic update.
5349
+ *
5350
+ * Any atomic drivers supporting DP MST must make sure to call this after
5351
+ * checking the rest of their state in their
5352
+ * &drm_mode_config_funcs.atomic_check() callback.
5353
+ *
5354
+ * See also:
5355
+ * drm_dp_atomic_find_vcpi_slots()
5356
+ * drm_dp_atomic_release_vcpi_slots()
5357
+ *
5358
+ * Returns:
5359
+ *
5360
+ * 0 if the new state is valid, negative error code otherwise.
5361
+ */
5362
+int drm_dp_mst_atomic_check(struct drm_atomic_state *state)
5363
+{
5364
+ struct drm_dp_mst_topology_mgr *mgr;
5365
+ struct drm_dp_mst_topology_state *mst_state;
5366
+ int i, ret = 0;
5367
+
5368
+ for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
5369
+ if (!mgr->mst_state)
5370
+ continue;
5371
+
5372
+ ret = drm_dp_mst_atomic_check_vcpi_alloc_limit(mgr, mst_state);
5373
+ if (ret)
5374
+ break;
5375
+
5376
+ mutex_lock(&mgr->lock);
5377
+ ret = drm_dp_mst_atomic_check_mstb_bw_limit(mgr->mst_primary,
5378
+ mst_state);
5379
+ mutex_unlock(&mgr->lock);
5380
+ if (ret < 0)
5381
+ break;
5382
+ else
5383
+ ret = 0;
5384
+ }
5385
+
5386
+ return ret;
5387
+}
5388
+EXPORT_SYMBOL(drm_dp_mst_atomic_check);
5389
+
5390
+const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs = {
32115391 .atomic_duplicate_state = drm_dp_mst_duplicate_state,
32125392 .atomic_destroy_state = drm_dp_mst_destroy_state,
32135393 };
5394
+EXPORT_SYMBOL(drm_dp_mst_topology_state_funcs);
32145395
32155396 /**
32165397 * drm_atomic_get_mst_topology_state: get MST topology state
....@@ -3230,9 +5411,6 @@
32305411 struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
32315412 struct drm_dp_mst_topology_mgr *mgr)
32325413 {
3233
- struct drm_device *dev = mgr->dev;
3234
-
3235
- WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
32365414 return to_dp_mst_topology_state(drm_atomic_get_private_obj_state(state, &mgr->base));
32375415 }
32385416 EXPORT_SYMBOL(drm_atomic_get_mst_topology_state);
....@@ -3258,12 +5436,29 @@
32585436 mutex_init(&mgr->lock);
32595437 mutex_init(&mgr->qlock);
32605438 mutex_init(&mgr->payload_lock);
3261
- mutex_init(&mgr->destroy_connector_lock);
5439
+ mutex_init(&mgr->delayed_destroy_lock);
5440
+ mutex_init(&mgr->up_req_lock);
5441
+ mutex_init(&mgr->probe_lock);
5442
+#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
5443
+ mutex_init(&mgr->topology_ref_history_lock);
5444
+#endif
32625445 INIT_LIST_HEAD(&mgr->tx_msg_downq);
3263
- INIT_LIST_HEAD(&mgr->destroy_connector_list);
5446
+ INIT_LIST_HEAD(&mgr->destroy_port_list);
5447
+ INIT_LIST_HEAD(&mgr->destroy_branch_device_list);
5448
+ INIT_LIST_HEAD(&mgr->up_req_list);
5449
+
5450
+ /*
5451
+ * delayed_destroy_work will be queued on a dedicated WQ, so that any
5452
+ * requeuing will be also flushed when deiniting the topology manager.
5453
+ */
5454
+ mgr->delayed_destroy_wq = alloc_ordered_workqueue("drm_dp_mst_wq", 0);
5455
+ if (mgr->delayed_destroy_wq == NULL)
5456
+ return -ENOMEM;
5457
+
32645458 INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
32655459 INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
3266
- INIT_WORK(&mgr->destroy_connector_work, drm_dp_destroy_connector_work);
5460
+ INIT_WORK(&mgr->delayed_destroy_work, drm_dp_delayed_destroy_work);
5461
+ INIT_WORK(&mgr->up_req_work, drm_dp_mst_up_req_work);
32675462 init_waitqueue_head(&mgr->tx_waitq);
32685463 mgr->dev = dev;
32695464 mgr->aux = aux;
....@@ -3280,21 +5475,17 @@
32805475 if (!mgr->proposed_vcpis)
32815476 return -ENOMEM;
32825477 set_bit(0, &mgr->payload_mask);
3283
- if (test_calc_pbn_mode() < 0)
3284
- DRM_ERROR("MST PBN self-test failed\n");
32855478
32865479 mst_state = kzalloc(sizeof(*mst_state), GFP_KERNEL);
32875480 if (mst_state == NULL)
32885481 return -ENOMEM;
32895482
32905483 mst_state->mgr = mgr;
5484
+ INIT_LIST_HEAD(&mst_state->vcpis);
32915485
3292
- /* max. time slots - one slot for MTP header */
3293
- mst_state->avail_slots = 63;
3294
-
3295
- drm_atomic_private_obj_init(&mgr->base,
5486
+ drm_atomic_private_obj_init(dev, &mgr->base,
32965487 &mst_state->base,
3297
- &mst_state_funcs);
5488
+ &drm_dp_mst_topology_state_funcs);
32985489
32995490 return 0;
33005491 }
....@@ -3306,8 +5497,13 @@
33065497 */
33075498 void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
33085499 {
5500
+ drm_dp_mst_topology_mgr_set_mst(mgr, false);
33095501 flush_work(&mgr->work);
3310
- flush_work(&mgr->destroy_connector_work);
5502
+ /* The following will also drain any requeued work on the WQ. */
5503
+ if (mgr->delayed_destroy_wq) {
5504
+ destroy_workqueue(mgr->delayed_destroy_wq);
5505
+ mgr->delayed_destroy_wq = NULL;
5506
+ }
33115507 mutex_lock(&mgr->payload_lock);
33125508 kfree(mgr->payloads);
33135509 mgr->payloads = NULL;
....@@ -3318,37 +5514,58 @@
33185514 mgr->aux = NULL;
33195515 drm_atomic_private_obj_fini(&mgr->base);
33205516 mgr->funcs = NULL;
5517
+
5518
+ mutex_destroy(&mgr->delayed_destroy_lock);
5519
+ mutex_destroy(&mgr->payload_lock);
5520
+ mutex_destroy(&mgr->qlock);
5521
+ mutex_destroy(&mgr->lock);
5522
+ mutex_destroy(&mgr->up_req_lock);
5523
+ mutex_destroy(&mgr->probe_lock);
5524
+#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
5525
+ mutex_destroy(&mgr->topology_ref_history_lock);
5526
+#endif
33215527 }
33225528 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy);
33235529
3324
-/* I2C device */
3325
-static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
3326
- int num)
5530
+static bool remote_i2c_read_ok(const struct i2c_msg msgs[], int num)
33275531 {
3328
- struct drm_dp_aux *aux = adapter->algo_data;
3329
- struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port, aux);
3330
- struct drm_dp_mst_branch *mstb;
5532
+ int i;
5533
+
5534
+ if (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS)
5535
+ return false;
5536
+
5537
+ for (i = 0; i < num - 1; i++) {
5538
+ if (msgs[i].flags & I2C_M_RD ||
5539
+ msgs[i].len > 0xff)
5540
+ return false;
5541
+ }
5542
+
5543
+ return msgs[num - 1].flags & I2C_M_RD &&
5544
+ msgs[num - 1].len <= 0xff;
5545
+}
5546
+
5547
+static bool remote_i2c_write_ok(const struct i2c_msg msgs[], int num)
5548
+{
5549
+ int i;
5550
+
5551
+ for (i = 0; i < num - 1; i++) {
5552
+ if (msgs[i].flags & I2C_M_RD || !(msgs[i].flags & I2C_M_STOP) ||
5553
+ msgs[i].len > 0xff)
5554
+ return false;
5555
+ }
5556
+
5557
+ return !(msgs[num - 1].flags & I2C_M_RD) && msgs[num - 1].len <= 0xff;
5558
+}
5559
+
5560
+static int drm_dp_mst_i2c_read(struct drm_dp_mst_branch *mstb,
5561
+ struct drm_dp_mst_port *port,
5562
+ struct i2c_msg *msgs, int num)
5563
+{
33315564 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
33325565 unsigned int i;
3333
- bool reading = false;
33345566 struct drm_dp_sideband_msg_req_body msg;
33355567 struct drm_dp_sideband_msg_tx *txmsg = NULL;
33365568 int ret;
3337
-
3338
- mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
3339
- if (!mstb)
3340
- return -EREMOTEIO;
3341
-
3342
- /* construct i2c msg */
3343
- /* see if last msg is a read */
3344
- if (msgs[num - 1].flags & I2C_M_RD)
3345
- reading = true;
3346
-
3347
- if (!reading || (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS)) {
3348
- DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n");
3349
- ret = -EIO;
3350
- goto out;
3351
- }
33525569
33535570 memset(&msg, 0, sizeof(msg));
33545571 msg.req_type = DP_REMOTE_I2C_READ;
....@@ -3377,7 +5594,7 @@
33775594 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
33785595 if (ret > 0) {
33795596
3380
- if (txmsg->reply.reply_type == 1) { /* got a NAK back */
5597
+ if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
33815598 ret = -EREMOTEIO;
33825599 goto out;
33835600 }
....@@ -3390,7 +5607,79 @@
33905607 }
33915608 out:
33925609 kfree(txmsg);
3393
- drm_dp_put_mst_branch_device(mstb);
5610
+ return ret;
5611
+}
5612
+
5613
+static int drm_dp_mst_i2c_write(struct drm_dp_mst_branch *mstb,
5614
+ struct drm_dp_mst_port *port,
5615
+ struct i2c_msg *msgs, int num)
5616
+{
5617
+ struct drm_dp_mst_topology_mgr *mgr = port->mgr;
5618
+ unsigned int i;
5619
+ struct drm_dp_sideband_msg_req_body msg;
5620
+ struct drm_dp_sideband_msg_tx *txmsg = NULL;
5621
+ int ret;
5622
+
5623
+ txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
5624
+ if (!txmsg) {
5625
+ ret = -ENOMEM;
5626
+ goto out;
5627
+ }
5628
+ for (i = 0; i < num; i++) {
5629
+ memset(&msg, 0, sizeof(msg));
5630
+ msg.req_type = DP_REMOTE_I2C_WRITE;
5631
+ msg.u.i2c_write.port_number = port->port_num;
5632
+ msg.u.i2c_write.write_i2c_device_id = msgs[i].addr;
5633
+ msg.u.i2c_write.num_bytes = msgs[i].len;
5634
+ msg.u.i2c_write.bytes = msgs[i].buf;
5635
+
5636
+ memset(txmsg, 0, sizeof(*txmsg));
5637
+ txmsg->dst = mstb;
5638
+
5639
+ drm_dp_encode_sideband_req(&msg, txmsg);
5640
+ drm_dp_queue_down_tx(mgr, txmsg);
5641
+
5642
+ ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
5643
+ if (ret > 0) {
5644
+ if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
5645
+ ret = -EREMOTEIO;
5646
+ goto out;
5647
+ }
5648
+ } else {
5649
+ goto out;
5650
+ }
5651
+ }
5652
+ ret = num;
5653
+out:
5654
+ kfree(txmsg);
5655
+ return ret;
5656
+}
5657
+
5658
+/* I2C device */
5659
+static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter,
5660
+ struct i2c_msg *msgs, int num)
5661
+{
5662
+ struct drm_dp_aux *aux = adapter->algo_data;
5663
+ struct drm_dp_mst_port *port =
5664
+ container_of(aux, struct drm_dp_mst_port, aux);
5665
+ struct drm_dp_mst_branch *mstb;
5666
+ struct drm_dp_mst_topology_mgr *mgr = port->mgr;
5667
+ int ret;
5668
+
5669
+ mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
5670
+ if (!mstb)
5671
+ return -EREMOTEIO;
5672
+
5673
+ if (remote_i2c_read_ok(msgs, num)) {
5674
+ ret = drm_dp_mst_i2c_read(mstb, port, msgs, num);
5675
+ } else if (remote_i2c_write_ok(msgs, num)) {
5676
+ ret = drm_dp_mst_i2c_write(mstb, port, msgs, num);
5677
+ } else {
5678
+ DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n");
5679
+ ret = -EIO;
5680
+ }
5681
+
5682
+ drm_dp_mst_topology_put_mstb(mstb);
33945683 return ret;
33955684 }
33965685
....@@ -3409,22 +5698,26 @@
34095698
34105699 /**
34115700 * drm_dp_mst_register_i2c_bus() - register an I2C adapter for I2C-over-AUX
3412
- * @aux: DisplayPort AUX channel
5701
+ * @port: The port to add the I2C bus on
34135702 *
34145703 * Returns 0 on success or a negative error code on failure.
34155704 */
3416
-static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux)
5705
+static int drm_dp_mst_register_i2c_bus(struct drm_dp_mst_port *port)
34175706 {
5707
+ struct drm_dp_aux *aux = &port->aux;
5708
+ struct device *parent_dev = port->mgr->dev->dev;
5709
+
34185710 aux->ddc.algo = &drm_dp_mst_i2c_algo;
34195711 aux->ddc.algo_data = aux;
34205712 aux->ddc.retries = 3;
34215713
34225714 aux->ddc.class = I2C_CLASS_DDC;
34235715 aux->ddc.owner = THIS_MODULE;
3424
- aux->ddc.dev.parent = aux->dev;
3425
- aux->ddc.dev.of_node = aux->dev->of_node;
5716
+ /* FIXME: set the kdev of the port's connector as parent */
5717
+ aux->ddc.dev.parent = parent_dev;
5718
+ aux->ddc.dev.of_node = parent_dev->of_node;
34265719
3427
- strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(aux->dev),
5720
+ strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(parent_dev),
34285721 sizeof(aux->ddc.name));
34295722
34305723 return i2c_add_adapter(&aux->ddc);
....@@ -3432,9 +5725,180 @@
34325725
34335726 /**
34345727 * drm_dp_mst_unregister_i2c_bus() - unregister an I2C-over-AUX adapter
3435
- * @aux: DisplayPort AUX channel
5728
+ * @port: The port to remove the I2C bus from
34365729 */
3437
-static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux)
5730
+static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_mst_port *port)
34385731 {
3439
- i2c_del_adapter(&aux->ddc);
5732
+ i2c_del_adapter(&port->aux.ddc);
34405733 }
5734
+
5735
+/**
5736
+ * drm_dp_mst_is_virtual_dpcd() - Is the given port a virtual DP Peer Device
5737
+ * @port: The port to check
5738
+ *
5739
+ * A single physical MST hub object can be represented in the topology
5740
+ * by multiple branches, with virtual ports between those branches.
5741
+ *
5742
+ * As of DP1.4, An MST hub with internal (virtual) ports must expose
5743
+ * certain DPCD registers over those ports. See sections 2.6.1.1.1
5744
+ * and 2.6.1.1.2 of Display Port specification v1.4 for details.
5745
+ *
5746
+ * May acquire mgr->lock
5747
+ *
5748
+ * Returns:
5749
+ * true if the port is a virtual DP peer device, false otherwise
5750
+ */
5751
+static bool drm_dp_mst_is_virtual_dpcd(struct drm_dp_mst_port *port)
5752
+{
5753
+ struct drm_dp_mst_port *downstream_port;
5754
+
5755
+ if (!port || port->dpcd_rev < DP_DPCD_REV_14)
5756
+ return false;
5757
+
5758
+ /* Virtual DP Sink (Internal Display Panel) */
5759
+ if (port->port_num >= 8)
5760
+ return true;
5761
+
5762
+ /* DP-to-HDMI Protocol Converter */
5763
+ if (port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV &&
5764
+ !port->mcs &&
5765
+ port->ldps)
5766
+ return true;
5767
+
5768
+ /* DP-to-DP */
5769
+ mutex_lock(&port->mgr->lock);
5770
+ if (port->pdt == DP_PEER_DEVICE_MST_BRANCHING &&
5771
+ port->mstb &&
5772
+ port->mstb->num_ports == 2) {
5773
+ list_for_each_entry(downstream_port, &port->mstb->ports, next) {
5774
+ if (downstream_port->pdt == DP_PEER_DEVICE_SST_SINK &&
5775
+ !downstream_port->input) {
5776
+ mutex_unlock(&port->mgr->lock);
5777
+ return true;
5778
+ }
5779
+ }
5780
+ }
5781
+ mutex_unlock(&port->mgr->lock);
5782
+
5783
+ return false;
5784
+}
5785
+
5786
+/**
5787
+ * drm_dp_mst_dsc_aux_for_port() - Find the correct aux for DSC
5788
+ * @port: The port to check. A leaf of the MST tree with an attached display.
5789
+ *
5790
+ * Depending on the situation, DSC may be enabled via the endpoint aux,
5791
+ * the immediately upstream aux, or the connector's physical aux.
5792
+ *
5793
+ * This is both the correct aux to read DSC_CAPABILITY and the
5794
+ * correct aux to write DSC_ENABLED.
5795
+ *
5796
+ * This operation can be expensive (up to four aux reads), so
5797
+ * the caller should cache the return.
5798
+ *
5799
+ * Returns:
5800
+ * NULL if DSC cannot be enabled on this port, otherwise the aux device
5801
+ */
5802
+struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
5803
+{
5804
+ struct drm_dp_mst_port *immediate_upstream_port;
5805
+ struct drm_dp_mst_port *fec_port;
5806
+ struct drm_dp_desc desc = {};
5807
+ u8 endpoint_fec;
5808
+ u8 endpoint_dsc;
5809
+
5810
+ if (!port)
5811
+ return NULL;
5812
+
5813
+ if (port->parent->port_parent)
5814
+ immediate_upstream_port = port->parent->port_parent;
5815
+ else
5816
+ immediate_upstream_port = NULL;
5817
+
5818
+ fec_port = immediate_upstream_port;
5819
+ while (fec_port) {
5820
+ /*
5821
+ * Each physical link (i.e. not a virtual port) between the
5822
+ * output and the primary device must support FEC
5823
+ */
5824
+ if (!drm_dp_mst_is_virtual_dpcd(fec_port) &&
5825
+ !fec_port->fec_capable)
5826
+ return NULL;
5827
+
5828
+ fec_port = fec_port->parent->port_parent;
5829
+ }
5830
+
5831
+ /* DP-to-DP peer device */
5832
+ if (drm_dp_mst_is_virtual_dpcd(immediate_upstream_port)) {
5833
+ u8 upstream_dsc;
5834
+
5835
+ if (drm_dp_dpcd_read(&port->aux,
5836
+ DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1)
5837
+ return NULL;
5838
+ if (drm_dp_dpcd_read(&port->aux,
5839
+ DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1)
5840
+ return NULL;
5841
+ if (drm_dp_dpcd_read(&immediate_upstream_port->aux,
5842
+ DP_DSC_SUPPORT, &upstream_dsc, 1) != 1)
5843
+ return NULL;
5844
+
5845
+ /* Enpoint decompression with DP-to-DP peer device */
5846
+ if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) &&
5847
+ (endpoint_fec & DP_FEC_CAPABLE) &&
5848
+ (upstream_dsc & 0x2) /* DSC passthrough */)
5849
+ return &port->aux;
5850
+
5851
+ /* Virtual DPCD decompression with DP-to-DP peer device */
5852
+ return &immediate_upstream_port->aux;
5853
+ }
5854
+
5855
+ /* Virtual DPCD decompression with DP-to-HDMI or Virtual DP Sink */
5856
+ if (drm_dp_mst_is_virtual_dpcd(port))
5857
+ return &port->aux;
5858
+
5859
+ /*
5860
+ * Synaptics quirk
5861
+ * Applies to ports for which:
5862
+ * - Physical aux has Synaptics OUI
5863
+ * - DPv1.4 or higher
5864
+ * - Port is on primary branch device
5865
+ * - Not a VGA adapter (DP_DWN_STRM_PORT_TYPE_ANALOG)
5866
+ */
5867
+ if (drm_dp_read_desc(port->mgr->aux, &desc, true))
5868
+ return NULL;
5869
+
5870
+ if (drm_dp_has_quirk(&desc, 0,
5871
+ DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) &&
5872
+ port->mgr->dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14 &&
5873
+ port->parent == port->mgr->mst_primary) {
5874
+ u8 downstreamport;
5875
+
5876
+ if (drm_dp_dpcd_read(&port->aux, DP_DOWNSTREAMPORT_PRESENT,
5877
+ &downstreamport, 1) < 0)
5878
+ return NULL;
5879
+
5880
+ if ((downstreamport & DP_DWN_STRM_PORT_PRESENT) &&
5881
+ ((downstreamport & DP_DWN_STRM_PORT_TYPE_MASK)
5882
+ != DP_DWN_STRM_PORT_TYPE_ANALOG))
5883
+ return port->mgr->aux;
5884
+ }
5885
+
5886
+ /*
5887
+ * The check below verifies if the MST sink
5888
+ * connected to the GPU is capable of DSC -
5889
+ * therefore the endpoint needs to be
5890
+ * both DSC and FEC capable.
5891
+ */
5892
+ if (drm_dp_dpcd_read(&port->aux,
5893
+ DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1)
5894
+ return NULL;
5895
+ if (drm_dp_dpcd_read(&port->aux,
5896
+ DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1)
5897
+ return NULL;
5898
+ if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) &&
5899
+ (endpoint_fec & DP_FEC_CAPABLE))
5900
+ return &port->aux;
5901
+
5902
+ return NULL;
5903
+}
5904
+EXPORT_SYMBOL(drm_dp_mst_dsc_aux_for_port);