hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/drivers/gpu/drm/drm_dp_mst_topology.c
....@@ -20,20 +20,33 @@
2020 * OF THIS SOFTWARE.
2121 */
2222
23
-#include <linux/kernel.h>
23
+#include <linux/bitfield.h>
2424 #include <linux/delay.h>
25
-#include <linux/init.h>
2625 #include <linux/errno.h>
26
+#include <linux/i2c.h>
27
+#include <linux/init.h>
28
+#include <linux/kernel.h>
29
+#include <linux/random.h>
2730 #include <linux/sched.h>
2831 #include <linux/seq_file.h>
29
-#include <linux/i2c.h>
30
-#include <drm/drm_dp_mst_helper.h>
31
-#include <drm/drmP.h>
3232 #include <linux/iopoll.h>
3333
34
-#include <drm/drm_fixed.h>
34
+#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
35
+#include <linux/stacktrace.h>
36
+#include <linux/sort.h>
37
+#include <linux/timekeeping.h>
38
+#include <linux/math64.h>
39
+#endif
40
+
3541 #include <drm/drm_atomic.h>
3642 #include <drm/drm_atomic_helper.h>
43
+#include <drm/drm_dp_mst_helper.h>
44
+#include <drm/drm_drv.h>
45
+#include <drm/drm_print.h>
46
+#include <drm/drm_probe_helper.h>
47
+
48
+#include "drm_crtc_helper_internal.h"
49
+#include "drm_dp_mst_topology_internal.h"
3750
3851 /**
3952 * DOC: dp mst helper
....@@ -42,27 +55,144 @@
4255 * protocol. The helpers contain a topology manager and bandwidth manager.
4356 * The helpers encapsulate the sending and received of sideband msgs.
4457 */
58
+struct drm_dp_pending_up_req {
59
+ struct drm_dp_sideband_msg_hdr hdr;
60
+ struct drm_dp_sideband_msg_req_body msg;
61
+ struct list_head next;
62
+};
63
+
4564 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
4665 char *buf);
47
-static int test_calc_pbn_mode(void);
4866
49
-static void drm_dp_put_port(struct drm_dp_mst_port *port);
67
+static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port);
5068
5169 static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
5270 int id,
5371 struct drm_dp_payload *payload);
5472
55
-static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
56
- struct drm_dp_mst_branch *mstb);
73
+static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
74
+ struct drm_dp_mst_port *port,
75
+ int offset, int size, u8 *bytes);
76
+static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
77
+ struct drm_dp_mst_port *port,
78
+ int offset, int size, u8 *bytes);
79
+
80
+static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
81
+ struct drm_dp_mst_branch *mstb);
82
+
83
+static void
84
+drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr *mgr,
85
+ struct drm_dp_mst_branch *mstb);
86
+
5787 static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
5888 struct drm_dp_mst_branch *mstb,
5989 struct drm_dp_mst_port *port);
6090 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
6191 u8 *guid);
6292
63
-static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux);
64
-static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux);
93
+static int drm_dp_mst_register_i2c_bus(struct drm_dp_mst_port *port);
94
+static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_mst_port *port);
6595 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr);
96
+
97
+static bool drm_dp_mst_port_downstream_of_branch(struct drm_dp_mst_port *port,
98
+ struct drm_dp_mst_branch *branch);
99
+
100
+#define DBG_PREFIX "[dp_mst]"
101
+
102
+#define DP_STR(x) [DP_ ## x] = #x
103
+
104
+static const char *drm_dp_mst_req_type_str(u8 req_type)
105
+{
106
+ static const char * const req_type_str[] = {
107
+ DP_STR(GET_MSG_TRANSACTION_VERSION),
108
+ DP_STR(LINK_ADDRESS),
109
+ DP_STR(CONNECTION_STATUS_NOTIFY),
110
+ DP_STR(ENUM_PATH_RESOURCES),
111
+ DP_STR(ALLOCATE_PAYLOAD),
112
+ DP_STR(QUERY_PAYLOAD),
113
+ DP_STR(RESOURCE_STATUS_NOTIFY),
114
+ DP_STR(CLEAR_PAYLOAD_ID_TABLE),
115
+ DP_STR(REMOTE_DPCD_READ),
116
+ DP_STR(REMOTE_DPCD_WRITE),
117
+ DP_STR(REMOTE_I2C_READ),
118
+ DP_STR(REMOTE_I2C_WRITE),
119
+ DP_STR(POWER_UP_PHY),
120
+ DP_STR(POWER_DOWN_PHY),
121
+ DP_STR(SINK_EVENT_NOTIFY),
122
+ DP_STR(QUERY_STREAM_ENC_STATUS),
123
+ };
124
+
125
+ if (req_type >= ARRAY_SIZE(req_type_str) ||
126
+ !req_type_str[req_type])
127
+ return "unknown";
128
+
129
+ return req_type_str[req_type];
130
+}
131
+
132
+#undef DP_STR
133
+#define DP_STR(x) [DP_NAK_ ## x] = #x
134
+
135
+static const char *drm_dp_mst_nak_reason_str(u8 nak_reason)
136
+{
137
+ static const char * const nak_reason_str[] = {
138
+ DP_STR(WRITE_FAILURE),
139
+ DP_STR(INVALID_READ),
140
+ DP_STR(CRC_FAILURE),
141
+ DP_STR(BAD_PARAM),
142
+ DP_STR(DEFER),
143
+ DP_STR(LINK_FAILURE),
144
+ DP_STR(NO_RESOURCES),
145
+ DP_STR(DPCD_FAIL),
146
+ DP_STR(I2C_NAK),
147
+ DP_STR(ALLOCATE_FAIL),
148
+ };
149
+
150
+ if (nak_reason >= ARRAY_SIZE(nak_reason_str) ||
151
+ !nak_reason_str[nak_reason])
152
+ return "unknown";
153
+
154
+ return nak_reason_str[nak_reason];
155
+}
156
+
157
+#undef DP_STR
158
+#define DP_STR(x) [DRM_DP_SIDEBAND_TX_ ## x] = #x
159
+
160
+static const char *drm_dp_mst_sideband_tx_state_str(int state)
161
+{
162
+ static const char * const sideband_reason_str[] = {
163
+ DP_STR(QUEUED),
164
+ DP_STR(START_SEND),
165
+ DP_STR(SENT),
166
+ DP_STR(RX),
167
+ DP_STR(TIMEOUT),
168
+ };
169
+
170
+ if (state >= ARRAY_SIZE(sideband_reason_str) ||
171
+ !sideband_reason_str[state])
172
+ return "unknown";
173
+
174
+ return sideband_reason_str[state];
175
+}
176
+
177
+static int
178
+drm_dp_mst_rad_to_str(const u8 rad[8], u8 lct, char *out, size_t len)
179
+{
180
+ int i;
181
+ u8 unpacked_rad[16];
182
+
183
+ for (i = 0; i < lct; i++) {
184
+ if (i % 2)
185
+ unpacked_rad[i] = rad[i / 2] >> 4;
186
+ else
187
+ unpacked_rad[i] = rad[i / 2] & BIT_MASK(4);
188
+ }
189
+
190
+ /* TODO: Eventually add something to printk so we can format the rad
191
+ * like this: 1.2.3
192
+ */
193
+ return snprintf(out, len, "%*phC", lct, unpacked_rad);
194
+}
195
+
66196 /* sideband msg handling */
67197 static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles)
68198 {
....@@ -134,6 +264,7 @@
134264 static inline u8 drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr *hdr)
135265 {
136266 u8 size = 3;
267
+
137268 size += (hdr->lct / 2);
138269 return size;
139270 }
....@@ -144,6 +275,7 @@
144275 int idx = 0;
145276 int i;
146277 u8 crc4;
278
+
147279 buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf);
148280 for (i = 0; i < (hdr->lct / 2); i++)
149281 buf[idx++] = hdr->rad[i];
....@@ -164,6 +296,7 @@
164296 u8 len;
165297 int i;
166298 u8 idx;
299
+
167300 if (buf[0] == 0)
168301 return false;
169302 len = 3;
....@@ -194,16 +327,20 @@
194327 return true;
195328 }
196329
197
-static void drm_dp_encode_sideband_req(struct drm_dp_sideband_msg_req_body *req,
198
- struct drm_dp_sideband_msg_tx *raw)
330
+void
331
+drm_dp_encode_sideband_req(const struct drm_dp_sideband_msg_req_body *req,
332
+ struct drm_dp_sideband_msg_tx *raw)
199333 {
200334 int idx = 0;
201335 int i;
202336 u8 *buf = raw->msg;
337
+
203338 buf[idx++] = req->req_type & 0x7f;
204339
205340 switch (req->req_type) {
206341 case DP_ENUM_PATH_RESOURCES:
342
+ case DP_POWER_DOWN_PHY:
343
+ case DP_POWER_UP_PHY:
207344 buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
208345 idx++;
209346 break;
....@@ -291,19 +428,301 @@
291428 memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes);
292429 idx += req->u.i2c_write.num_bytes;
293430 break;
431
+ case DP_QUERY_STREAM_ENC_STATUS: {
432
+ const struct drm_dp_query_stream_enc_status *msg;
294433
295
- case DP_POWER_DOWN_PHY:
296
- case DP_POWER_UP_PHY:
297
- buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
434
+ msg = &req->u.enc_status;
435
+ buf[idx] = msg->stream_id;
298436 idx++;
437
+ memcpy(&buf[idx], msg->client_id, sizeof(msg->client_id));
438
+ idx += sizeof(msg->client_id);
439
+ buf[idx] = 0;
440
+ buf[idx] |= FIELD_PREP(GENMASK(1, 0), msg->stream_event);
441
+ buf[idx] |= msg->valid_stream_event ? BIT(2) : 0;
442
+ buf[idx] |= FIELD_PREP(GENMASK(4, 3), msg->stream_behavior);
443
+ buf[idx] |= msg->valid_stream_behavior ? BIT(5) : 0;
444
+ idx++;
445
+ }
299446 break;
300447 }
301448 raw->cur_len = idx;
449
+}
450
+EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_encode_sideband_req);
451
+
452
+/* Decode a sideband request we've encoded, mainly used for debugging */
453
+int
454
+drm_dp_decode_sideband_req(const struct drm_dp_sideband_msg_tx *raw,
455
+ struct drm_dp_sideband_msg_req_body *req)
456
+{
457
+ const u8 *buf = raw->msg;
458
+ int i, idx = 0;
459
+
460
+ req->req_type = buf[idx++] & 0x7f;
461
+ switch (req->req_type) {
462
+ case DP_ENUM_PATH_RESOURCES:
463
+ case DP_POWER_DOWN_PHY:
464
+ case DP_POWER_UP_PHY:
465
+ req->u.port_num.port_number = (buf[idx] >> 4) & 0xf;
466
+ break;
467
+ case DP_ALLOCATE_PAYLOAD:
468
+ {
469
+ struct drm_dp_allocate_payload *a =
470
+ &req->u.allocate_payload;
471
+
472
+ a->number_sdp_streams = buf[idx] & 0xf;
473
+ a->port_number = (buf[idx] >> 4) & 0xf;
474
+
475
+ WARN_ON(buf[++idx] & 0x80);
476
+ a->vcpi = buf[idx] & 0x7f;
477
+
478
+ a->pbn = buf[++idx] << 8;
479
+ a->pbn |= buf[++idx];
480
+
481
+ idx++;
482
+ for (i = 0; i < a->number_sdp_streams; i++) {
483
+ a->sdp_stream_sink[i] =
484
+ (buf[idx + (i / 2)] >> ((i % 2) ? 0 : 4)) & 0xf;
485
+ }
486
+ }
487
+ break;
488
+ case DP_QUERY_PAYLOAD:
489
+ req->u.query_payload.port_number = (buf[idx] >> 4) & 0xf;
490
+ WARN_ON(buf[++idx] & 0x80);
491
+ req->u.query_payload.vcpi = buf[idx] & 0x7f;
492
+ break;
493
+ case DP_REMOTE_DPCD_READ:
494
+ {
495
+ struct drm_dp_remote_dpcd_read *r = &req->u.dpcd_read;
496
+
497
+ r->port_number = (buf[idx] >> 4) & 0xf;
498
+
499
+ r->dpcd_address = (buf[idx] << 16) & 0xf0000;
500
+ r->dpcd_address |= (buf[++idx] << 8) & 0xff00;
501
+ r->dpcd_address |= buf[++idx] & 0xff;
502
+
503
+ r->num_bytes = buf[++idx];
504
+ }
505
+ break;
506
+ case DP_REMOTE_DPCD_WRITE:
507
+ {
508
+ struct drm_dp_remote_dpcd_write *w =
509
+ &req->u.dpcd_write;
510
+
511
+ w->port_number = (buf[idx] >> 4) & 0xf;
512
+
513
+ w->dpcd_address = (buf[idx] << 16) & 0xf0000;
514
+ w->dpcd_address |= (buf[++idx] << 8) & 0xff00;
515
+ w->dpcd_address |= buf[++idx] & 0xff;
516
+
517
+ w->num_bytes = buf[++idx];
518
+
519
+ w->bytes = kmemdup(&buf[++idx], w->num_bytes,
520
+ GFP_KERNEL);
521
+ if (!w->bytes)
522
+ return -ENOMEM;
523
+ }
524
+ break;
525
+ case DP_REMOTE_I2C_READ:
526
+ {
527
+ struct drm_dp_remote_i2c_read *r = &req->u.i2c_read;
528
+ struct drm_dp_remote_i2c_read_tx *tx;
529
+ bool failed = false;
530
+
531
+ r->num_transactions = buf[idx] & 0x3;
532
+ r->port_number = (buf[idx] >> 4) & 0xf;
533
+ for (i = 0; i < r->num_transactions; i++) {
534
+ tx = &r->transactions[i];
535
+
536
+ tx->i2c_dev_id = buf[++idx] & 0x7f;
537
+ tx->num_bytes = buf[++idx];
538
+ tx->bytes = kmemdup(&buf[++idx],
539
+ tx->num_bytes,
540
+ GFP_KERNEL);
541
+ if (!tx->bytes) {
542
+ failed = true;
543
+ break;
544
+ }
545
+ idx += tx->num_bytes;
546
+ tx->no_stop_bit = (buf[idx] >> 5) & 0x1;
547
+ tx->i2c_transaction_delay = buf[idx] & 0xf;
548
+ }
549
+
550
+ if (failed) {
551
+ for (i = 0; i < r->num_transactions; i++) {
552
+ tx = &r->transactions[i];
553
+ kfree(tx->bytes);
554
+ }
555
+ return -ENOMEM;
556
+ }
557
+
558
+ r->read_i2c_device_id = buf[++idx] & 0x7f;
559
+ r->num_bytes_read = buf[++idx];
560
+ }
561
+ break;
562
+ case DP_REMOTE_I2C_WRITE:
563
+ {
564
+ struct drm_dp_remote_i2c_write *w = &req->u.i2c_write;
565
+
566
+ w->port_number = (buf[idx] >> 4) & 0xf;
567
+ w->write_i2c_device_id = buf[++idx] & 0x7f;
568
+ w->num_bytes = buf[++idx];
569
+ w->bytes = kmemdup(&buf[++idx], w->num_bytes,
570
+ GFP_KERNEL);
571
+ if (!w->bytes)
572
+ return -ENOMEM;
573
+ }
574
+ break;
575
+ case DP_QUERY_STREAM_ENC_STATUS:
576
+ req->u.enc_status.stream_id = buf[idx++];
577
+ for (i = 0; i < sizeof(req->u.enc_status.client_id); i++)
578
+ req->u.enc_status.client_id[i] = buf[idx++];
579
+
580
+ req->u.enc_status.stream_event = FIELD_GET(GENMASK(1, 0),
581
+ buf[idx]);
582
+ req->u.enc_status.valid_stream_event = FIELD_GET(BIT(2),
583
+ buf[idx]);
584
+ req->u.enc_status.stream_behavior = FIELD_GET(GENMASK(4, 3),
585
+ buf[idx]);
586
+ req->u.enc_status.valid_stream_behavior = FIELD_GET(BIT(5),
587
+ buf[idx]);
588
+ break;
589
+ }
590
+
591
+ return 0;
592
+}
593
+EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_decode_sideband_req);
594
+
595
+void
596
+drm_dp_dump_sideband_msg_req_body(const struct drm_dp_sideband_msg_req_body *req,
597
+ int indent, struct drm_printer *printer)
598
+{
599
+ int i;
600
+
601
+#define P(f, ...) drm_printf_indent(printer, indent, f, ##__VA_ARGS__)
602
+ if (req->req_type == DP_LINK_ADDRESS) {
603
+ /* No contents to print */
604
+ P("type=%s\n", drm_dp_mst_req_type_str(req->req_type));
605
+ return;
606
+ }
607
+
608
+ P("type=%s contents:\n", drm_dp_mst_req_type_str(req->req_type));
609
+ indent++;
610
+
611
+ switch (req->req_type) {
612
+ case DP_ENUM_PATH_RESOURCES:
613
+ case DP_POWER_DOWN_PHY:
614
+ case DP_POWER_UP_PHY:
615
+ P("port=%d\n", req->u.port_num.port_number);
616
+ break;
617
+ case DP_ALLOCATE_PAYLOAD:
618
+ P("port=%d vcpi=%d pbn=%d sdp_streams=%d %*ph\n",
619
+ req->u.allocate_payload.port_number,
620
+ req->u.allocate_payload.vcpi, req->u.allocate_payload.pbn,
621
+ req->u.allocate_payload.number_sdp_streams,
622
+ req->u.allocate_payload.number_sdp_streams,
623
+ req->u.allocate_payload.sdp_stream_sink);
624
+ break;
625
+ case DP_QUERY_PAYLOAD:
626
+ P("port=%d vcpi=%d\n",
627
+ req->u.query_payload.port_number,
628
+ req->u.query_payload.vcpi);
629
+ break;
630
+ case DP_REMOTE_DPCD_READ:
631
+ P("port=%d dpcd_addr=%05x len=%d\n",
632
+ req->u.dpcd_read.port_number, req->u.dpcd_read.dpcd_address,
633
+ req->u.dpcd_read.num_bytes);
634
+ break;
635
+ case DP_REMOTE_DPCD_WRITE:
636
+ P("port=%d addr=%05x len=%d: %*ph\n",
637
+ req->u.dpcd_write.port_number,
638
+ req->u.dpcd_write.dpcd_address,
639
+ req->u.dpcd_write.num_bytes, req->u.dpcd_write.num_bytes,
640
+ req->u.dpcd_write.bytes);
641
+ break;
642
+ case DP_REMOTE_I2C_READ:
643
+ P("port=%d num_tx=%d id=%d size=%d:\n",
644
+ req->u.i2c_read.port_number,
645
+ req->u.i2c_read.num_transactions,
646
+ req->u.i2c_read.read_i2c_device_id,
647
+ req->u.i2c_read.num_bytes_read);
648
+
649
+ indent++;
650
+ for (i = 0; i < req->u.i2c_read.num_transactions; i++) {
651
+ const struct drm_dp_remote_i2c_read_tx *rtx =
652
+ &req->u.i2c_read.transactions[i];
653
+
654
+ P("%d: id=%03d size=%03d no_stop_bit=%d tx_delay=%03d: %*ph\n",
655
+ i, rtx->i2c_dev_id, rtx->num_bytes,
656
+ rtx->no_stop_bit, rtx->i2c_transaction_delay,
657
+ rtx->num_bytes, rtx->bytes);
658
+ }
659
+ break;
660
+ case DP_REMOTE_I2C_WRITE:
661
+ P("port=%d id=%d size=%d: %*ph\n",
662
+ req->u.i2c_write.port_number,
663
+ req->u.i2c_write.write_i2c_device_id,
664
+ req->u.i2c_write.num_bytes, req->u.i2c_write.num_bytes,
665
+ req->u.i2c_write.bytes);
666
+ break;
667
+ case DP_QUERY_STREAM_ENC_STATUS:
668
+ P("stream_id=%u client_id=%*ph stream_event=%x "
669
+ "valid_event=%d stream_behavior=%x valid_behavior=%d",
670
+ req->u.enc_status.stream_id,
671
+ (int)ARRAY_SIZE(req->u.enc_status.client_id),
672
+ req->u.enc_status.client_id, req->u.enc_status.stream_event,
673
+ req->u.enc_status.valid_stream_event,
674
+ req->u.enc_status.stream_behavior,
675
+ req->u.enc_status.valid_stream_behavior);
676
+ break;
677
+ default:
678
+ P("???\n");
679
+ break;
680
+ }
681
+#undef P
682
+}
683
+EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_dump_sideband_msg_req_body);
684
+
685
+static inline void
686
+drm_dp_mst_dump_sideband_msg_tx(struct drm_printer *p,
687
+ const struct drm_dp_sideband_msg_tx *txmsg)
688
+{
689
+ struct drm_dp_sideband_msg_req_body req;
690
+ char buf[64];
691
+ int ret;
692
+ int i;
693
+
694
+ drm_dp_mst_rad_to_str(txmsg->dst->rad, txmsg->dst->lct, buf,
695
+ sizeof(buf));
696
+ drm_printf(p, "txmsg cur_offset=%x cur_len=%x seqno=%x state=%s path_msg=%d dst=%s\n",
697
+ txmsg->cur_offset, txmsg->cur_len, txmsg->seqno,
698
+ drm_dp_mst_sideband_tx_state_str(txmsg->state),
699
+ txmsg->path_msg, buf);
700
+
701
+ ret = drm_dp_decode_sideband_req(txmsg, &req);
702
+ if (ret) {
703
+ drm_printf(p, "<failed to decode sideband req: %d>\n", ret);
704
+ return;
705
+ }
706
+ drm_dp_dump_sideband_msg_req_body(&req, 1, p);
707
+
708
+ switch (req.req_type) {
709
+ case DP_REMOTE_DPCD_WRITE:
710
+ kfree(req.u.dpcd_write.bytes);
711
+ break;
712
+ case DP_REMOTE_I2C_READ:
713
+ for (i = 0; i < req.u.i2c_read.num_transactions; i++)
714
+ kfree(req.u.i2c_read.transactions[i].bytes);
715
+ break;
716
+ case DP_REMOTE_I2C_WRITE:
717
+ kfree(req.u.i2c_write.bytes);
718
+ break;
719
+ }
302720 }
303721
304722 static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len)
305723 {
306724 u8 crc4;
725
+
307726 crc4 = drm_dp_msg_data_crc4(msg, len);
308727 msg[len] = crc4;
309728 }
....@@ -319,55 +738,53 @@
319738 raw->cur_len = idx;
320739 }
321740
322
-/* this adds a chunk of msg to the builder to get the final msg */
323
-static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg,
324
- u8 *replybuf, u8 replybuflen, bool hdr)
741
+static int drm_dp_sideband_msg_set_header(struct drm_dp_sideband_msg_rx *msg,
742
+ struct drm_dp_sideband_msg_hdr *hdr,
743
+ u8 hdrlen)
325744 {
326
- int ret;
745
+ /*
746
+ * ignore out-of-order messages or messages that are part of a
747
+ * failed transaction
748
+ */
749
+ if (!hdr->somt && !msg->have_somt)
750
+ return false;
751
+
752
+ /* get length contained in this portion */
753
+ msg->curchunk_idx = 0;
754
+ msg->curchunk_len = hdr->msg_len;
755
+ msg->curchunk_hdrlen = hdrlen;
756
+
757
+ /* we have already gotten an somt - don't bother parsing */
758
+ if (hdr->somt && msg->have_somt)
759
+ return false;
760
+
761
+ if (hdr->somt) {
762
+ memcpy(&msg->initial_hdr, hdr,
763
+ sizeof(struct drm_dp_sideband_msg_hdr));
764
+ msg->have_somt = true;
765
+ }
766
+ if (hdr->eomt)
767
+ msg->have_eomt = true;
768
+
769
+ return true;
770
+}
771
+
772
+/* this adds a chunk of msg to the builder to get the final msg */
773
+static bool drm_dp_sideband_append_payload(struct drm_dp_sideband_msg_rx *msg,
774
+ u8 *replybuf, u8 replybuflen)
775
+{
327776 u8 crc4;
328777
329
- if (hdr) {
330
- u8 hdrlen;
331
- struct drm_dp_sideband_msg_hdr recv_hdr;
332
- ret = drm_dp_decode_sideband_msg_hdr(&recv_hdr, replybuf, replybuflen, &hdrlen);
333
- if (ret == false) {
334
- print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16, 1, replybuf, replybuflen, false);
335
- return false;
336
- }
337
-
338
- /*
339
- * ignore out-of-order messages or messages that are part of a
340
- * failed transaction
341
- */
342
- if (!recv_hdr.somt && !msg->have_somt)
343
- return false;
344
-
345
- /* get length contained in this portion */
346
- msg->curchunk_len = recv_hdr.msg_len;
347
- msg->curchunk_hdrlen = hdrlen;
348
-
349
- /* we have already gotten an somt - don't bother parsing */
350
- if (recv_hdr.somt && msg->have_somt)
351
- return false;
352
-
353
- if (recv_hdr.somt) {
354
- memcpy(&msg->initial_hdr, &recv_hdr, sizeof(struct drm_dp_sideband_msg_hdr));
355
- msg->have_somt = true;
356
- }
357
- if (recv_hdr.eomt)
358
- msg->have_eomt = true;
359
-
360
- /* copy the bytes for the remainder of this header chunk */
361
- msg->curchunk_idx = min(msg->curchunk_len, (u8)(replybuflen - hdrlen));
362
- memcpy(&msg->chunk[0], replybuf + hdrlen, msg->curchunk_idx);
363
- } else {
364
- memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen);
365
- msg->curchunk_idx += replybuflen;
366
- }
778
+ memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen);
779
+ msg->curchunk_idx += replybuflen;
367780
368781 if (msg->curchunk_idx >= msg->curchunk_len) {
369782 /* do CRC */
370783 crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1);
784
+ if (crc4 != msg->chunk[msg->curchunk_len - 1])
785
+ print_hex_dump(KERN_DEBUG, "wrong crc",
786
+ DUMP_PREFIX_NONE, 16, 1,
787
+ msg->chunk, msg->curchunk_len, false);
371788 /* copy chunk into bigger msg */
372789 memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1);
373790 msg->curlen += msg->curchunk_len - 1;
....@@ -380,6 +797,7 @@
380797 {
381798 int idx = 1;
382799 int i;
800
+
383801 memcpy(repmsg->u.link_addr.guid, &raw->msg[idx], 16);
384802 idx += 16;
385803 repmsg->u.link_addr.nports = raw->msg[idx] & 0xf;
....@@ -431,6 +849,7 @@
431849 struct drm_dp_sideband_msg_reply_body *repmsg)
432850 {
433851 int idx = 1;
852
+
434853 repmsg->u.remote_dpcd_read_ack.port_number = raw->msg[idx] & 0xf;
435854 idx++;
436855 if (idx > raw->curlen)
....@@ -451,6 +870,7 @@
451870 struct drm_dp_sideband_msg_reply_body *repmsg)
452871 {
453872 int idx = 1;
873
+
454874 repmsg->u.remote_dpcd_write_ack.port_number = raw->msg[idx] & 0xf;
455875 idx++;
456876 if (idx > raw->curlen)
....@@ -484,6 +904,7 @@
484904 struct drm_dp_sideband_msg_reply_body *repmsg)
485905 {
486906 int idx = 1;
907
+
487908 repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf;
488909 repmsg->u.path_resources.fec_capable = raw->msg[idx] & 0x1;
489910 idx++;
....@@ -507,6 +928,7 @@
507928 struct drm_dp_sideband_msg_reply_body *repmsg)
508929 {
509930 int idx = 1;
931
+
510932 repmsg->u.allocate_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
511933 idx++;
512934 if (idx > raw->curlen)
....@@ -529,6 +951,7 @@
529951 struct drm_dp_sideband_msg_reply_body *repmsg)
530952 {
531953 int idx = 1;
954
+
532955 repmsg->u.query_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
533956 idx++;
534957 if (idx > raw->curlen)
....@@ -558,6 +981,42 @@
558981 return true;
559982 }
560983
984
+static bool
985
+drm_dp_sideband_parse_query_stream_enc_status(
986
+ struct drm_dp_sideband_msg_rx *raw,
987
+ struct drm_dp_sideband_msg_reply_body *repmsg)
988
+{
989
+ struct drm_dp_query_stream_enc_status_ack_reply *reply;
990
+
991
+ reply = &repmsg->u.enc_status;
992
+
993
+ reply->stream_id = raw->msg[3];
994
+
995
+ reply->reply_signed = raw->msg[2] & BIT(0);
996
+
997
+ /*
998
+ * NOTE: It's my impression from reading the spec that the below parsing
999
+ * is correct. However I noticed while testing with an HDCP 1.4 display
1000
+ * through an HDCP 2.2 hub that only bit 3 was set. In that case, I
1001
+ * would expect both bits to be set. So keep the parsing following the
1002
+ * spec, but beware reality might not match the spec (at least for some
1003
+ * configurations).
1004
+ */
1005
+ reply->hdcp_1x_device_present = raw->msg[2] & BIT(4);
1006
+ reply->hdcp_2x_device_present = raw->msg[2] & BIT(3);
1007
+
1008
+ reply->query_capable_device_present = raw->msg[2] & BIT(5);
1009
+ reply->legacy_device_present = raw->msg[2] & BIT(6);
1010
+ reply->unauthorizable_device_present = raw->msg[2] & BIT(7);
1011
+
1012
+ reply->auth_completed = !!(raw->msg[1] & BIT(3));
1013
+ reply->encryption_enabled = !!(raw->msg[1] & BIT(4));
1014
+ reply->repeater_present = !!(raw->msg[1] & BIT(5));
1015
+ reply->state = (raw->msg[1] & GENMASK(7, 6)) >> 6;
1016
+
1017
+ return true;
1018
+}
1019
+
5611020 static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
5621021 struct drm_dp_sideband_msg_reply_body *msg)
5631022 {
....@@ -565,7 +1024,7 @@
5651024 msg->reply_type = (raw->msg[0] & 0x80) >> 7;
5661025 msg->req_type = (raw->msg[0] & 0x7f);
5671026
568
- if (msg->reply_type) {
1027
+ if (msg->reply_type == DP_SIDEBAND_REPLY_NAK) {
5691028 memcpy(msg->u.nak.guid, &raw->msg[1], 16);
5701029 msg->u.nak.reason = raw->msg[17];
5711030 msg->u.nak.nak_data = raw->msg[18];
....@@ -583,6 +1042,8 @@
5831042 return drm_dp_sideband_parse_remote_dpcd_write(raw, msg);
5841043 case DP_REMOTE_I2C_READ:
5851044 return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg);
1045
+ case DP_REMOTE_I2C_WRITE:
1046
+ return true; /* since there's nothing to parse */
5861047 case DP_ENUM_PATH_RESOURCES:
5871048 return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg);
5881049 case DP_ALLOCATE_PAYLOAD:
....@@ -590,8 +1051,13 @@
5901051 case DP_POWER_DOWN_PHY:
5911052 case DP_POWER_UP_PHY:
5921053 return drm_dp_sideband_parse_power_updown_phy_ack(raw, msg);
1054
+ case DP_CLEAR_PAYLOAD_ID_TABLE:
1055
+ return true; /* since there's nothing to parse */
1056
+ case DP_QUERY_STREAM_ENC_STATUS:
1057
+ return drm_dp_sideband_parse_query_stream_enc_status(raw, msg);
5931058 default:
594
- DRM_ERROR("Got unknown reply 0x%02x\n", msg->req_type);
1059
+ DRM_ERROR("Got unknown reply 0x%02x (%s)\n", msg->req_type,
1060
+ drm_dp_mst_req_type_str(msg->req_type));
5951061 return false;
5961062 }
5971063 }
....@@ -658,12 +1124,14 @@
6581124 case DP_RESOURCE_STATUS_NOTIFY:
6591125 return drm_dp_sideband_parse_resource_status_notify(raw, msg);
6601126 default:
661
- DRM_ERROR("Got unknown request 0x%02x\n", msg->req_type);
1127
+ DRM_ERROR("Got unknown request 0x%02x (%s)\n", msg->req_type,
1128
+ drm_dp_mst_req_type_str(msg->req_type));
6621129 return false;
6631130 }
6641131 }
6651132
666
-static int build_dpcd_write(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes, u8 *bytes)
1133
+static void build_dpcd_write(struct drm_dp_sideband_msg_tx *msg,
1134
+ u8 port_num, u32 offset, u8 num_bytes, u8 *bytes)
6671135 {
6681136 struct drm_dp_sideband_msg_req_body req;
6691137
....@@ -673,20 +1141,27 @@
6731141 req.u.dpcd_write.num_bytes = num_bytes;
6741142 req.u.dpcd_write.bytes = bytes;
6751143 drm_dp_encode_sideband_req(&req, msg);
676
-
677
- return 0;
6781144 }
6791145
680
-static int build_link_address(struct drm_dp_sideband_msg_tx *msg)
1146
+static void build_link_address(struct drm_dp_sideband_msg_tx *msg)
6811147 {
6821148 struct drm_dp_sideband_msg_req_body req;
6831149
6841150 req.req_type = DP_LINK_ADDRESS;
6851151 drm_dp_encode_sideband_req(&req, msg);
686
- return 0;
6871152 }
6881153
689
-static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int port_num)
1154
+static void build_clear_payload_id_table(struct drm_dp_sideband_msg_tx *msg)
1155
+{
1156
+ struct drm_dp_sideband_msg_req_body req;
1157
+
1158
+ req.req_type = DP_CLEAR_PAYLOAD_ID_TABLE;
1159
+ drm_dp_encode_sideband_req(&req, msg);
1160
+ msg->path_msg = true;
1161
+}
1162
+
1163
+static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg,
1164
+ int port_num)
6901165 {
6911166 struct drm_dp_sideband_msg_req_body req;
6921167
....@@ -697,12 +1172,14 @@
6971172 return 0;
6981173 }
6991174
700
-static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_num,
701
- u8 vcpi, uint16_t pbn,
702
- u8 number_sdp_streams,
703
- u8 *sdp_stream_sink)
1175
+static void build_allocate_payload(struct drm_dp_sideband_msg_tx *msg,
1176
+ int port_num,
1177
+ u8 vcpi, uint16_t pbn,
1178
+ u8 number_sdp_streams,
1179
+ u8 *sdp_stream_sink)
7041180 {
7051181 struct drm_dp_sideband_msg_req_body req;
1182
+
7061183 memset(&req, 0, sizeof(req));
7071184 req.req_type = DP_ALLOCATE_PAYLOAD;
7081185 req.u.allocate_payload.port_number = port_num;
....@@ -713,11 +1190,10 @@
7131190 number_sdp_streams);
7141191 drm_dp_encode_sideband_req(&req, msg);
7151192 msg->path_msg = true;
716
- return 0;
7171193 }
7181194
719
-static int build_power_updown_phy(struct drm_dp_sideband_msg_tx *msg,
720
- int port_num, bool power_up)
1195
+static void build_power_updown_phy(struct drm_dp_sideband_msg_tx *msg,
1196
+ int port_num, bool power_up)
7211197 {
7221198 struct drm_dp_sideband_msg_req_body req;
7231199
....@@ -729,6 +1205,24 @@
7291205 req.u.port_num.port_number = port_num;
7301206 drm_dp_encode_sideband_req(&req, msg);
7311207 msg->path_msg = true;
1208
+}
1209
+
1210
+static int
1211
+build_query_stream_enc_status(struct drm_dp_sideband_msg_tx *msg, u8 stream_id,
1212
+ u8 *q_id)
1213
+{
1214
+ struct drm_dp_sideband_msg_req_body req;
1215
+
1216
+ req.req_type = DP_QUERY_STREAM_ENC_STATUS;
1217
+ req.u.enc_status.stream_id = stream_id;
1218
+ memcpy(req.u.enc_status.client_id, q_id,
1219
+ sizeof(req.u.enc_status.client_id));
1220
+ req.u.enc_status.stream_event = 0;
1221
+ req.u.enc_status.valid_stream_event = false;
1222
+ req.u.enc_status.stream_behavior = 0;
1223
+ req.u.enc_status.valid_stream_behavior = false;
1224
+
1225
+ drm_dp_encode_sideband_req(&req, msg);
7321226 return 0;
7331227 }
7341228
....@@ -765,6 +1259,7 @@
7651259 int vcpi)
7661260 {
7671261 int i;
1262
+
7681263 if (vcpi == 0)
7691264 return;
7701265
....@@ -773,11 +1268,11 @@
7731268 clear_bit(vcpi - 1, &mgr->vcpi_mask);
7741269
7751270 for (i = 0; i < mgr->max_payloads; i++) {
776
- if (mgr->proposed_vcpis[i])
777
- if (mgr->proposed_vcpis[i]->vcpi == vcpi) {
778
- mgr->proposed_vcpis[i] = NULL;
779
- clear_bit(i + 1, &mgr->payload_mask);
780
- }
1271
+ if (mgr->proposed_vcpis[i] &&
1272
+ mgr->proposed_vcpis[i]->vcpi == vcpi) {
1273
+ mgr->proposed_vcpis[i] = NULL;
1274
+ clear_bit(i + 1, &mgr->payload_mask);
1275
+ }
7811276 }
7821277 mutex_unlock(&mgr->payload_lock);
7831278 }
....@@ -801,12 +1296,38 @@
8011296 struct drm_dp_sideband_msg_tx *txmsg)
8021297 {
8031298 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
1299
+ unsigned long wait_timeout = msecs_to_jiffies(4000);
1300
+ unsigned long wait_expires = jiffies + wait_timeout;
8041301 int ret;
8051302
806
- ret = wait_event_timeout(mgr->tx_waitq,
807
- check_txmsg_state(mgr, txmsg),
808
- (4 * HZ));
809
- mutex_lock(&mstb->mgr->qlock);
1303
+ for (;;) {
1304
+ /*
1305
+ * If the driver provides a way for this, change to
1306
+ * poll-waiting for the MST reply interrupt if we didn't receive
1307
+ * it for 50 msec. This would cater for cases where the HPD
1308
+ * pulse signal got lost somewhere, even though the sink raised
1309
+ * the corresponding MST interrupt correctly. One example is the
1310
+ * Club 3D CAC-1557 TypeC -> DP adapter which for some reason
1311
+ * filters out short pulses with a duration less than ~540 usec.
1312
+ *
1313
+ * The poll period is 50 msec to avoid missing an interrupt
1314
+ * after the sink has cleared it (after a 110msec timeout
1315
+ * since it raised the interrupt).
1316
+ */
1317
+ ret = wait_event_timeout(mgr->tx_waitq,
1318
+ check_txmsg_state(mgr, txmsg),
1319
+ mgr->cbs->poll_hpd_irq ?
1320
+ msecs_to_jiffies(50) :
1321
+ wait_timeout);
1322
+
1323
+ if (ret || !mgr->cbs->poll_hpd_irq ||
1324
+ time_after(jiffies, wait_expires))
1325
+ break;
1326
+
1327
+ mgr->cbs->poll_hpd_irq(mgr);
1328
+ }
1329
+
1330
+ mutex_lock(&mgr->qlock);
8101331 if (ret > 0) {
8111332 if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) {
8121333 ret = -EIO;
....@@ -820,18 +1341,19 @@
8201341
8211342 /* remove from q */
8221343 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED ||
823
- txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND) {
1344
+ txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
1345
+ txmsg->state == DRM_DP_SIDEBAND_TX_SENT)
8241346 list_del(&txmsg->next);
825
- }
826
-
827
- if (txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
828
- txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
829
- mstb->tx_slots[txmsg->seqno] = NULL;
830
- }
8311347 }
8321348 out:
1349
+ if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) {
1350
+ struct drm_printer p = drm_debug_printer(DBG_PREFIX);
1351
+
1352
+ drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
1353
+ }
8331354 mutex_unlock(&mgr->qlock);
8341355
1356
+ drm_dp_mst_kick_tx(mgr);
8351357 return ret;
8361358 }
8371359
....@@ -847,142 +1369,606 @@
8471369 if (lct > 1)
8481370 memcpy(mstb->rad, rad, lct / 2);
8491371 INIT_LIST_HEAD(&mstb->ports);
850
- kref_init(&mstb->kref);
1372
+ kref_init(&mstb->topology_kref);
1373
+ kref_init(&mstb->malloc_kref);
8511374 return mstb;
8521375 }
8531376
854
-static void drm_dp_free_mst_port(struct kref *kref);
855
-
8561377 static void drm_dp_free_mst_branch_device(struct kref *kref)
8571378 {
858
- struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
859
- if (mstb->port_parent) {
860
- if (list_empty(&mstb->port_parent->next))
861
- kref_put(&mstb->port_parent->kref, drm_dp_free_mst_port);
862
- }
1379
+ struct drm_dp_mst_branch *mstb =
1380
+ container_of(kref, struct drm_dp_mst_branch, malloc_kref);
1381
+
1382
+ if (mstb->port_parent)
1383
+ drm_dp_mst_put_port_malloc(mstb->port_parent);
1384
+
8631385 kfree(mstb);
8641386 }
8651387
1388
+/**
1389
+ * DOC: Branch device and port refcounting
1390
+ *
1391
+ * Topology refcount overview
1392
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~
1393
+ *
1394
+ * The refcounting schemes for &struct drm_dp_mst_branch and &struct
1395
+ * drm_dp_mst_port are somewhat unusual. Both ports and branch devices have
1396
+ * two different kinds of refcounts: topology refcounts, and malloc refcounts.
1397
+ *
1398
+ * Topology refcounts are not exposed to drivers, and are handled internally
1399
+ * by the DP MST helpers. The helpers use them in order to prevent the
1400
+ * in-memory topology state from being changed in the middle of critical
1401
+ * operations like changing the internal state of payload allocations. This
1402
+ * means each branch and port will be considered to be connected to the rest
1403
+ * of the topology until its topology refcount reaches zero. Additionally,
1404
+ * for ports this means that their associated &struct drm_connector will stay
1405
+ * registered with userspace until the port's refcount reaches 0.
1406
+ *
1407
+ * Malloc refcount overview
1408
+ * ~~~~~~~~~~~~~~~~~~~~~~~~
1409
+ *
1410
+ * Malloc references are used to keep a &struct drm_dp_mst_port or &struct
1411
+ * drm_dp_mst_branch allocated even after all of its topology references have
1412
+ * been dropped, so that the driver or MST helpers can safely access each
1413
+ * branch's last known state before it was disconnected from the topology.
1414
+ * When the malloc refcount of a port or branch reaches 0, the memory
1415
+ * allocation containing the &struct drm_dp_mst_branch or &struct
1416
+ * drm_dp_mst_port respectively will be freed.
1417
+ *
1418
+ * For &struct drm_dp_mst_branch, malloc refcounts are not currently exposed
1419
+ * to drivers. As of writing this documentation, there are no drivers that
1420
+ * have a usecase for accessing &struct drm_dp_mst_branch outside of the MST
1421
+ * helpers. Exposing this API to drivers in a race-free manner would take more
1422
+ * tweaking of the refcounting scheme, however patches are welcome provided
1423
+ * there is a legitimate driver usecase for this.
1424
+ *
1425
+ * Refcount relationships in a topology
1426
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1427
+ *
1428
+ * Let's take a look at why the relationship between topology and malloc
1429
+ * refcounts is designed the way it is.
1430
+ *
1431
+ * .. kernel-figure:: dp-mst/topology-figure-1.dot
1432
+ *
1433
+ * An example of topology and malloc refs in a DP MST topology with two
1434
+ * active payloads. Topology refcount increments are indicated by solid
1435
+ * lines, and malloc refcount increments are indicated by dashed lines.
1436
+ * Each starts from the branch which incremented the refcount, and ends at
1437
+ * the branch to which the refcount belongs to, i.e. the arrow points the
1438
+ * same way as the C pointers used to reference a structure.
1439
+ *
1440
+ * As you can see in the above figure, every branch increments the topology
1441
+ * refcount of its children, and increments the malloc refcount of its
1442
+ * parent. Additionally, every payload increments the malloc refcount of its
1443
+ * assigned port by 1.
1444
+ *
1445
+ * So, what would happen if MSTB #3 from the above figure was unplugged from
1446
+ * the system, but the driver hadn't yet removed payload #2 from port #3? The
1447
+ * topology would start to look like the figure below.
1448
+ *
1449
+ * .. kernel-figure:: dp-mst/topology-figure-2.dot
1450
+ *
1451
+ * Ports and branch devices which have been released from memory are
1452
+ * colored grey, and references which have been removed are colored red.
1453
+ *
1454
+ * Whenever a port or branch device's topology refcount reaches zero, it will
1455
+ * decrement the topology refcounts of all its children, the malloc refcount
1456
+ * of its parent, and finally its own malloc refcount. For MSTB #4 and port
1457
+ * #4, this means they both have been disconnected from the topology and freed
1458
+ * from memory. But, because payload #2 is still holding a reference to port
1459
+ * #3, port #3 is removed from the topology but its &struct drm_dp_mst_port
1460
+ * is still accessible from memory. This also means port #3 has not yet
1461
+ * decremented the malloc refcount of MSTB #3, so its &struct
1462
+ * drm_dp_mst_branch will also stay allocated in memory until port #3's
1463
+ * malloc refcount reaches 0.
1464
+ *
1465
+ * This relationship is necessary because in order to release payload #2, we
1466
+ * need to be able to figure out the last relative of port #3 that's still
1467
+ * connected to the topology. In this case, we would travel up the topology as
1468
+ * shown below.
1469
+ *
1470
+ * .. kernel-figure:: dp-mst/topology-figure-3.dot
1471
+ *
1472
+ * And finally, remove payload #2 by communicating with port #2 through
1473
+ * sideband transactions.
1474
+ */
1475
+
1476
+/**
1477
+ * drm_dp_mst_get_mstb_malloc() - Increment the malloc refcount of a branch
1478
+ * device
1479
+ * @mstb: The &struct drm_dp_mst_branch to increment the malloc refcount of
1480
+ *
1481
+ * Increments &drm_dp_mst_branch.malloc_kref. When
1482
+ * &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb
1483
+ * will be released and @mstb may no longer be used.
1484
+ *
1485
+ * See also: drm_dp_mst_put_mstb_malloc()
1486
+ */
1487
+static void
1488
+drm_dp_mst_get_mstb_malloc(struct drm_dp_mst_branch *mstb)
1489
+{
1490
+ kref_get(&mstb->malloc_kref);
1491
+ DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref));
1492
+}
1493
+
1494
+/**
1495
+ * drm_dp_mst_put_mstb_malloc() - Decrement the malloc refcount of a branch
1496
+ * device
1497
+ * @mstb: The &struct drm_dp_mst_branch to decrement the malloc refcount of
1498
+ *
1499
+ * Decrements &drm_dp_mst_branch.malloc_kref. When
1500
+ * &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb
1501
+ * will be released and @mstb may no longer be used.
1502
+ *
1503
+ * See also: drm_dp_mst_get_mstb_malloc()
1504
+ */
1505
+static void
1506
+drm_dp_mst_put_mstb_malloc(struct drm_dp_mst_branch *mstb)
1507
+{
1508
+ DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref) - 1);
1509
+ kref_put(&mstb->malloc_kref, drm_dp_free_mst_branch_device);
1510
+}
1511
+
1512
+static void drm_dp_free_mst_port(struct kref *kref)
1513
+{
1514
+ struct drm_dp_mst_port *port =
1515
+ container_of(kref, struct drm_dp_mst_port, malloc_kref);
1516
+
1517
+ drm_dp_mst_put_mstb_malloc(port->parent);
1518
+ kfree(port);
1519
+}
1520
+
1521
+/**
1522
+ * drm_dp_mst_get_port_malloc() - Increment the malloc refcount of an MST port
1523
+ * @port: The &struct drm_dp_mst_port to increment the malloc refcount of
1524
+ *
1525
+ * Increments &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref
1526
+ * reaches 0, the memory allocation for @port will be released and @port may
1527
+ * no longer be used.
1528
+ *
1529
+ * Because @port could potentially be freed at any time by the DP MST helpers
1530
+ * if &drm_dp_mst_port.malloc_kref reaches 0, including during a call to this
1531
+ * function, drivers that which to make use of &struct drm_dp_mst_port should
1532
+ * ensure that they grab at least one main malloc reference to their MST ports
1533
+ * in &drm_dp_mst_topology_cbs.add_connector. This callback is called before
1534
+ * there is any chance for &drm_dp_mst_port.malloc_kref to reach 0.
1535
+ *
1536
+ * See also: drm_dp_mst_put_port_malloc()
1537
+ */
1538
+void
1539
+drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port)
1540
+{
1541
+ kref_get(&port->malloc_kref);
1542
+ DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->malloc_kref));
1543
+}
1544
+EXPORT_SYMBOL(drm_dp_mst_get_port_malloc);
1545
+
1546
+/**
1547
+ * drm_dp_mst_put_port_malloc() - Decrement the malloc refcount of an MST port
1548
+ * @port: The &struct drm_dp_mst_port to decrement the malloc refcount of
1549
+ *
1550
+ * Decrements &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref
1551
+ * reaches 0, the memory allocation for @port will be released and @port may
1552
+ * no longer be used.
1553
+ *
1554
+ * See also: drm_dp_mst_get_port_malloc()
1555
+ */
1556
+void
1557
+drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port)
1558
+{
1559
+ DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->malloc_kref) - 1);
1560
+ kref_put(&port->malloc_kref, drm_dp_free_mst_port);
1561
+}
1562
+EXPORT_SYMBOL(drm_dp_mst_put_port_malloc);
1563
+
1564
+#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
1565
+
1566
+#define STACK_DEPTH 8
1567
+
1568
+static noinline void
1569
+__topology_ref_save(struct drm_dp_mst_topology_mgr *mgr,
1570
+ struct drm_dp_mst_topology_ref_history *history,
1571
+ enum drm_dp_mst_topology_ref_type type)
1572
+{
1573
+ struct drm_dp_mst_topology_ref_entry *entry = NULL;
1574
+ depot_stack_handle_t backtrace;
1575
+ ulong stack_entries[STACK_DEPTH];
1576
+ uint n;
1577
+ int i;
1578
+
1579
+ n = stack_trace_save(stack_entries, ARRAY_SIZE(stack_entries), 1);
1580
+ backtrace = stack_depot_save(stack_entries, n, GFP_KERNEL);
1581
+ if (!backtrace)
1582
+ return;
1583
+
1584
+ /* Try to find an existing entry for this backtrace */
1585
+ for (i = 0; i < history->len; i++) {
1586
+ if (history->entries[i].backtrace == backtrace) {
1587
+ entry = &history->entries[i];
1588
+ break;
1589
+ }
1590
+ }
1591
+
1592
+ /* Otherwise add one */
1593
+ if (!entry) {
1594
+ struct drm_dp_mst_topology_ref_entry *new;
1595
+ int new_len = history->len + 1;
1596
+
1597
+ new = krealloc(history->entries, sizeof(*new) * new_len,
1598
+ GFP_KERNEL);
1599
+ if (!new)
1600
+ return;
1601
+
1602
+ entry = &new[history->len];
1603
+ history->len = new_len;
1604
+ history->entries = new;
1605
+
1606
+ entry->backtrace = backtrace;
1607
+ entry->type = type;
1608
+ entry->count = 0;
1609
+ }
1610
+ entry->count++;
1611
+ entry->ts_nsec = ktime_get_ns();
1612
+}
1613
+
1614
+static int
1615
+topology_ref_history_cmp(const void *a, const void *b)
1616
+{
1617
+ const struct drm_dp_mst_topology_ref_entry *entry_a = a, *entry_b = b;
1618
+
1619
+ if (entry_a->ts_nsec > entry_b->ts_nsec)
1620
+ return 1;
1621
+ else if (entry_a->ts_nsec < entry_b->ts_nsec)
1622
+ return -1;
1623
+ else
1624
+ return 0;
1625
+}
1626
+
1627
+static inline const char *
1628
+topology_ref_type_to_str(enum drm_dp_mst_topology_ref_type type)
1629
+{
1630
+ if (type == DRM_DP_MST_TOPOLOGY_REF_GET)
1631
+ return "get";
1632
+ else
1633
+ return "put";
1634
+}
1635
+
1636
+static void
1637
+__dump_topology_ref_history(struct drm_dp_mst_topology_ref_history *history,
1638
+ void *ptr, const char *type_str)
1639
+{
1640
+ struct drm_printer p = drm_debug_printer(DBG_PREFIX);
1641
+ char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
1642
+ int i;
1643
+
1644
+ if (!buf)
1645
+ return;
1646
+
1647
+ if (!history->len)
1648
+ goto out;
1649
+
1650
+ /* First, sort the list so that it goes from oldest to newest
1651
+ * reference entry
1652
+ */
1653
+ sort(history->entries, history->len, sizeof(*history->entries),
1654
+ topology_ref_history_cmp, NULL);
1655
+
1656
+ drm_printf(&p, "%s (%p) topology count reached 0, dumping history:\n",
1657
+ type_str, ptr);
1658
+
1659
+ for (i = 0; i < history->len; i++) {
1660
+ const struct drm_dp_mst_topology_ref_entry *entry =
1661
+ &history->entries[i];
1662
+ ulong *entries;
1663
+ uint nr_entries;
1664
+ u64 ts_nsec = entry->ts_nsec;
1665
+ u32 rem_nsec = do_div(ts_nsec, 1000000000);
1666
+
1667
+ nr_entries = stack_depot_fetch(entry->backtrace, &entries);
1668
+ stack_trace_snprint(buf, PAGE_SIZE, entries, nr_entries, 4);
1669
+
1670
+ drm_printf(&p, " %d %ss (last at %5llu.%06u):\n%s",
1671
+ entry->count,
1672
+ topology_ref_type_to_str(entry->type),
1673
+ ts_nsec, rem_nsec / 1000, buf);
1674
+ }
1675
+
1676
+ /* Now free the history, since this is the only time we expose it */
1677
+ kfree(history->entries);
1678
+out:
1679
+ kfree(buf);
1680
+}
1681
+
1682
+static __always_inline void
1683
+drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb)
1684
+{
1685
+ __dump_topology_ref_history(&mstb->topology_ref_history, mstb,
1686
+ "MSTB");
1687
+}
1688
+
1689
+static __always_inline void
1690
+drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port)
1691
+{
1692
+ __dump_topology_ref_history(&port->topology_ref_history, port,
1693
+ "Port");
1694
+}
1695
+
1696
+static __always_inline void
1697
+save_mstb_topology_ref(struct drm_dp_mst_branch *mstb,
1698
+ enum drm_dp_mst_topology_ref_type type)
1699
+{
1700
+ __topology_ref_save(mstb->mgr, &mstb->topology_ref_history, type);
1701
+}
1702
+
1703
+static __always_inline void
1704
+save_port_topology_ref(struct drm_dp_mst_port *port,
1705
+ enum drm_dp_mst_topology_ref_type type)
1706
+{
1707
+ __topology_ref_save(port->mgr, &port->topology_ref_history, type);
1708
+}
1709
+
1710
+static inline void
1711
+topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr)
1712
+{
1713
+ mutex_lock(&mgr->topology_ref_history_lock);
1714
+}
1715
+
1716
+static inline void
1717
+topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr)
1718
+{
1719
+ mutex_unlock(&mgr->topology_ref_history_lock);
1720
+}
1721
+#else
1722
+static inline void
1723
+topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr) {}
1724
+static inline void
1725
+topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr) {}
1726
+static inline void
1727
+drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb) {}
1728
+static inline void
1729
+drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port) {}
1730
+#define save_mstb_topology_ref(mstb, type)
1731
+#define save_port_topology_ref(port, type)
1732
+#endif
1733
+
8661734 static void drm_dp_destroy_mst_branch_device(struct kref *kref)
8671735 {
868
- struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
869
- struct drm_dp_mst_port *port, *tmp;
870
- bool wake_tx = false;
1736
+ struct drm_dp_mst_branch *mstb =
1737
+ container_of(kref, struct drm_dp_mst_branch, topology_kref);
1738
+ struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
1739
+
1740
+ drm_dp_mst_dump_mstb_topology_history(mstb);
1741
+
1742
+ INIT_LIST_HEAD(&mstb->destroy_next);
8711743
8721744 /*
873
- * init kref again to be used by ports to remove mst branch when it is
874
- * not needed anymore
1745
+ * This can get called under mgr->mutex, so we need to perform the
1746
+ * actual destruction of the mstb in another worker
8751747 */
876
- kref_init(kref);
877
-
878
- if (mstb->port_parent && list_empty(&mstb->port_parent->next))
879
- kref_get(&mstb->port_parent->kref);
880
-
881
- /*
882
- * destroy all ports - don't need lock
883
- * as there are no more references to the mst branch
884
- * device at this point.
885
- */
886
- list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
887
- list_del(&port->next);
888
- drm_dp_put_port(port);
889
- }
890
-
891
- /* drop any tx slots msg */
892
- mutex_lock(&mstb->mgr->qlock);
893
- if (mstb->tx_slots[0]) {
894
- mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
895
- mstb->tx_slots[0] = NULL;
896
- wake_tx = true;
897
- }
898
- if (mstb->tx_slots[1]) {
899
- mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
900
- mstb->tx_slots[1] = NULL;
901
- wake_tx = true;
902
- }
903
- mutex_unlock(&mstb->mgr->qlock);
904
-
905
- if (wake_tx)
906
- wake_up_all(&mstb->mgr->tx_waitq);
907
-
908
- kref_put(kref, drm_dp_free_mst_branch_device);
1748
+ mutex_lock(&mgr->delayed_destroy_lock);
1749
+ list_add(&mstb->destroy_next, &mgr->destroy_branch_device_list);
1750
+ mutex_unlock(&mgr->delayed_destroy_lock);
1751
+ queue_work(mgr->delayed_destroy_wq, &mgr->delayed_destroy_work);
9091752 }
9101753
911
-static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch *mstb)
1754
+/**
1755
+ * drm_dp_mst_topology_try_get_mstb() - Increment the topology refcount of a
1756
+ * branch device unless it's zero
1757
+ * @mstb: &struct drm_dp_mst_branch to increment the topology refcount of
1758
+ *
1759
+ * Attempts to grab a topology reference to @mstb, if it hasn't yet been
1760
+ * removed from the topology (e.g. &drm_dp_mst_branch.topology_kref has
1761
+ * reached 0). Holding a topology reference implies that a malloc reference
1762
+ * will be held to @mstb as long as the user holds the topology reference.
1763
+ *
1764
+ * Care should be taken to ensure that the user has at least one malloc
1765
+ * reference to @mstb. If you already have a topology reference to @mstb, you
1766
+ * should use drm_dp_mst_topology_get_mstb() instead.
1767
+ *
1768
+ * See also:
1769
+ * drm_dp_mst_topology_get_mstb()
1770
+ * drm_dp_mst_topology_put_mstb()
1771
+ *
1772
+ * Returns:
1773
+ * * 1: A topology reference was grabbed successfully
1774
+ * * 0: @port is no longer in the topology, no reference was grabbed
1775
+ */
1776
+static int __must_check
1777
+drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch *mstb)
9121778 {
913
- kref_put(&mstb->kref, drm_dp_destroy_mst_branch_device);
1779
+ int ret;
1780
+
1781
+ topology_ref_history_lock(mstb->mgr);
1782
+ ret = kref_get_unless_zero(&mstb->topology_kref);
1783
+ if (ret) {
1784
+ DRM_DEBUG("mstb %p (%d)\n",
1785
+ mstb, kref_read(&mstb->topology_kref));
1786
+ save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET);
1787
+ }
1788
+
1789
+ topology_ref_history_unlock(mstb->mgr);
1790
+
1791
+ return ret;
9141792 }
9151793
916
-
917
-static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt)
1794
+/**
1795
+ * drm_dp_mst_topology_get_mstb() - Increment the topology refcount of a
1796
+ * branch device
1797
+ * @mstb: The &struct drm_dp_mst_branch to increment the topology refcount of
1798
+ *
1799
+ * Increments &drm_dp_mst_branch.topology_refcount without checking whether or
1800
+ * not it's already reached 0. This is only valid to use in scenarios where
1801
+ * you are already guaranteed to have at least one active topology reference
1802
+ * to @mstb. Otherwise, drm_dp_mst_topology_try_get_mstb() must be used.
1803
+ *
1804
+ * See also:
1805
+ * drm_dp_mst_topology_try_get_mstb()
1806
+ * drm_dp_mst_topology_put_mstb()
1807
+ */
1808
+static void drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch *mstb)
9181809 {
919
- struct drm_dp_mst_branch *mstb;
1810
+ topology_ref_history_lock(mstb->mgr);
9201811
921
- switch (old_pdt) {
922
- case DP_PEER_DEVICE_DP_LEGACY_CONV:
923
- case DP_PEER_DEVICE_SST_SINK:
924
- /* remove i2c over sideband */
925
- drm_dp_mst_unregister_i2c_bus(&port->aux);
926
- break;
927
- case DP_PEER_DEVICE_MST_BRANCHING:
928
- mstb = port->mstb;
929
- port->mstb = NULL;
930
- drm_dp_put_mst_branch_device(mstb);
931
- break;
932
- }
1812
+ save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET);
1813
+ WARN_ON(kref_read(&mstb->topology_kref) == 0);
1814
+ kref_get(&mstb->topology_kref);
1815
+ DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref));
1816
+
1817
+ topology_ref_history_unlock(mstb->mgr);
1818
+}
1819
+
1820
+/**
1821
+ * drm_dp_mst_topology_put_mstb() - release a topology reference to a branch
1822
+ * device
1823
+ * @mstb: The &struct drm_dp_mst_branch to release the topology reference from
1824
+ *
1825
+ * Releases a topology reference from @mstb by decrementing
1826
+ * &drm_dp_mst_branch.topology_kref.
1827
+ *
1828
+ * See also:
1829
+ * drm_dp_mst_topology_try_get_mstb()
1830
+ * drm_dp_mst_topology_get_mstb()
1831
+ */
1832
+static void
1833
+drm_dp_mst_topology_put_mstb(struct drm_dp_mst_branch *mstb)
1834
+{
1835
+ topology_ref_history_lock(mstb->mgr);
1836
+
1837
+ DRM_DEBUG("mstb %p (%d)\n",
1838
+ mstb, kref_read(&mstb->topology_kref) - 1);
1839
+ save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_PUT);
1840
+
1841
+ topology_ref_history_unlock(mstb->mgr);
1842
+ kref_put(&mstb->topology_kref, drm_dp_destroy_mst_branch_device);
9331843 }
9341844
9351845 static void drm_dp_destroy_port(struct kref *kref)
9361846 {
937
- struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
1847
+ struct drm_dp_mst_port *port =
1848
+ container_of(kref, struct drm_dp_mst_port, topology_kref);
9381849 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
9391850
940
- if (!port->input) {
941
- port->vcpi.num_slots = 0;
1851
+ drm_dp_mst_dump_port_topology_history(port);
9421852
943
- kfree(port->cached_edid);
944
-
945
- /*
946
- * The only time we don't have a connector
947
- * on an output port is if the connector init
948
- * fails.
949
- */
950
- if (port->connector) {
951
- /* we can't destroy the connector here, as
952
- * we might be holding the mode_config.mutex
953
- * from an EDID retrieval */
954
-
955
- mutex_lock(&mgr->destroy_connector_lock);
956
- kref_get(&port->parent->kref);
957
- list_add(&port->next, &mgr->destroy_connector_list);
958
- mutex_unlock(&mgr->destroy_connector_lock);
959
- schedule_work(&mgr->destroy_connector_work);
960
- return;
961
- }
962
- /* no need to clean up vcpi
963
- * as if we have no connector we never setup a vcpi */
964
- drm_dp_port_teardown_pdt(port, port->pdt);
965
- port->pdt = DP_PEER_DEVICE_NONE;
1853
+ /* There's nothing that needs locking to destroy an input port yet */
1854
+ if (port->input) {
1855
+ drm_dp_mst_put_port_malloc(port);
1856
+ return;
9661857 }
967
- kfree(port);
1858
+
1859
+ kfree(port->cached_edid);
1860
+
1861
+ /*
1862
+ * we can't destroy the connector here, as we might be holding the
1863
+ * mode_config.mutex from an EDID retrieval
1864
+ */
1865
+ mutex_lock(&mgr->delayed_destroy_lock);
1866
+ list_add(&port->next, &mgr->destroy_port_list);
1867
+ mutex_unlock(&mgr->delayed_destroy_lock);
1868
+ queue_work(mgr->delayed_destroy_wq, &mgr->delayed_destroy_work);
9681869 }
9691870
970
-static void drm_dp_put_port(struct drm_dp_mst_port *port)
1871
+/**
1872
+ * drm_dp_mst_topology_try_get_port() - Increment the topology refcount of a
1873
+ * port unless it's zero
1874
+ * @port: &struct drm_dp_mst_port to increment the topology refcount of
1875
+ *
1876
+ * Attempts to grab a topology reference to @port, if it hasn't yet been
1877
+ * removed from the topology (e.g. &drm_dp_mst_port.topology_kref has reached
1878
+ * 0). Holding a topology reference implies that a malloc reference will be
1879
+ * held to @port as long as the user holds the topology reference.
1880
+ *
1881
+ * Care should be taken to ensure that the user has at least one malloc
1882
+ * reference to @port. If you already have a topology reference to @port, you
1883
+ * should use drm_dp_mst_topology_get_port() instead.
1884
+ *
1885
+ * See also:
1886
+ * drm_dp_mst_topology_get_port()
1887
+ * drm_dp_mst_topology_put_port()
1888
+ *
1889
+ * Returns:
1890
+ * * 1: A topology reference was grabbed successfully
1891
+ * * 0: @port is no longer in the topology, no reference was grabbed
1892
+ */
1893
+static int __must_check
1894
+drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port *port)
9711895 {
972
- kref_put(&port->kref, drm_dp_destroy_port);
1896
+ int ret;
1897
+
1898
+ topology_ref_history_lock(port->mgr);
1899
+ ret = kref_get_unless_zero(&port->topology_kref);
1900
+ if (ret) {
1901
+ DRM_DEBUG("port %p (%d)\n",
1902
+ port, kref_read(&port->topology_kref));
1903
+ save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET);
1904
+ }
1905
+
1906
+ topology_ref_history_unlock(port->mgr);
1907
+ return ret;
9731908 }
9741909
975
-static struct drm_dp_mst_branch *drm_dp_mst_get_validated_mstb_ref_locked(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_branch *to_find)
1910
+/**
1911
+ * drm_dp_mst_topology_get_port() - Increment the topology refcount of a port
1912
+ * @port: The &struct drm_dp_mst_port to increment the topology refcount of
1913
+ *
1914
+ * Increments &drm_dp_mst_port.topology_refcount without checking whether or
1915
+ * not it's already reached 0. This is only valid to use in scenarios where
1916
+ * you are already guaranteed to have at least one active topology reference
1917
+ * to @port. Otherwise, drm_dp_mst_topology_try_get_port() must be used.
1918
+ *
1919
+ * See also:
1920
+ * drm_dp_mst_topology_try_get_port()
1921
+ * drm_dp_mst_topology_put_port()
1922
+ */
1923
+static void drm_dp_mst_topology_get_port(struct drm_dp_mst_port *port)
1924
+{
1925
+ topology_ref_history_lock(port->mgr);
1926
+
1927
+ WARN_ON(kref_read(&port->topology_kref) == 0);
1928
+ kref_get(&port->topology_kref);
1929
+ DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->topology_kref));
1930
+ save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET);
1931
+
1932
+ topology_ref_history_unlock(port->mgr);
1933
+}
1934
+
1935
+/**
1936
+ * drm_dp_mst_topology_put_port() - release a topology reference to a port
1937
+ * @port: The &struct drm_dp_mst_port to release the topology reference from
1938
+ *
1939
+ * Releases a topology reference from @port by decrementing
1940
+ * &drm_dp_mst_port.topology_kref.
1941
+ *
1942
+ * See also:
1943
+ * drm_dp_mst_topology_try_get_port()
1944
+ * drm_dp_mst_topology_get_port()
1945
+ */
1946
+static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port)
1947
+{
1948
+ topology_ref_history_lock(port->mgr);
1949
+
1950
+ DRM_DEBUG("port %p (%d)\n",
1951
+ port, kref_read(&port->topology_kref) - 1);
1952
+ save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_PUT);
1953
+
1954
+ topology_ref_history_unlock(port->mgr);
1955
+ kref_put(&port->topology_kref, drm_dp_destroy_port);
1956
+}
1957
+
1958
+static struct drm_dp_mst_branch *
1959
+drm_dp_mst_topology_get_mstb_validated_locked(struct drm_dp_mst_branch *mstb,
1960
+ struct drm_dp_mst_branch *to_find)
9761961 {
9771962 struct drm_dp_mst_port *port;
9781963 struct drm_dp_mst_branch *rmstb;
979
- if (to_find == mstb) {
980
- kref_get(&mstb->kref);
1964
+
1965
+ if (to_find == mstb)
9811966 return mstb;
982
- }
1967
+
9831968 list_for_each_entry(port, &mstb->ports, next) {
9841969 if (port->mstb) {
985
- rmstb = drm_dp_mst_get_validated_mstb_ref_locked(port->mstb, to_find);
1970
+ rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
1971
+ port->mstb, to_find);
9861972 if (rmstb)
9871973 return rmstb;
9881974 }
....@@ -990,27 +1976,37 @@
9901976 return NULL;
9911977 }
9921978
993
-static struct drm_dp_mst_branch *drm_dp_get_validated_mstb_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_branch *mstb)
1979
+static struct drm_dp_mst_branch *
1980
+drm_dp_mst_topology_get_mstb_validated(struct drm_dp_mst_topology_mgr *mgr,
1981
+ struct drm_dp_mst_branch *mstb)
9941982 {
9951983 struct drm_dp_mst_branch *rmstb = NULL;
1984
+
9961985 mutex_lock(&mgr->lock);
997
- if (mgr->mst_primary)
998
- rmstb = drm_dp_mst_get_validated_mstb_ref_locked(mgr->mst_primary, mstb);
1986
+ if (mgr->mst_primary) {
1987
+ rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
1988
+ mgr->mst_primary, mstb);
1989
+
1990
+ if (rmstb && !drm_dp_mst_topology_try_get_mstb(rmstb))
1991
+ rmstb = NULL;
1992
+ }
9991993 mutex_unlock(&mgr->lock);
10001994 return rmstb;
10011995 }
10021996
1003
-static struct drm_dp_mst_port *drm_dp_mst_get_port_ref_locked(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_port *to_find)
1997
+static struct drm_dp_mst_port *
1998
+drm_dp_mst_topology_get_port_validated_locked(struct drm_dp_mst_branch *mstb,
1999
+ struct drm_dp_mst_port *to_find)
10042000 {
10052001 struct drm_dp_mst_port *port, *mport;
10062002
10072003 list_for_each_entry(port, &mstb->ports, next) {
1008
- if (port == to_find) {
1009
- kref_get(&port->kref);
2004
+ if (port == to_find)
10102005 return port;
1011
- }
2006
+
10122007 if (port->mstb) {
1013
- mport = drm_dp_mst_get_port_ref_locked(port->mstb, to_find);
2008
+ mport = drm_dp_mst_topology_get_port_validated_locked(
2009
+ port->mstb, to_find);
10142010 if (mport)
10152011 return mport;
10162012 }
....@@ -1018,12 +2014,20 @@
10182014 return NULL;
10192015 }
10202016
1021
-static struct drm_dp_mst_port *drm_dp_get_validated_port_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2017
+static struct drm_dp_mst_port *
2018
+drm_dp_mst_topology_get_port_validated(struct drm_dp_mst_topology_mgr *mgr,
2019
+ struct drm_dp_mst_port *port)
10222020 {
10232021 struct drm_dp_mst_port *rport = NULL;
2022
+
10242023 mutex_lock(&mgr->lock);
1025
- if (mgr->mst_primary)
1026
- rport = drm_dp_mst_get_port_ref_locked(mgr->mst_primary, port);
2024
+ if (mgr->mst_primary) {
2025
+ rport = drm_dp_mst_topology_get_port_validated_locked(
2026
+ mgr->mst_primary, port);
2027
+
2028
+ if (rport && !drm_dp_mst_topology_try_get_port(rport))
2029
+ rport = NULL;
2030
+ }
10272031 mutex_unlock(&mgr->lock);
10282032 return rport;
10292033 }
....@@ -1031,11 +2035,12 @@
10312035 static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num)
10322036 {
10332037 struct drm_dp_mst_port *port;
2038
+ int ret;
10342039
10352040 list_for_each_entry(port, &mstb->ports, next) {
10362041 if (port->port_num == port_num) {
1037
- kref_get(&port->kref);
1038
- return port;
2042
+ ret = drm_dp_mst_topology_try_get_port(port);
2043
+ return ret ? port : NULL;
10392044 }
10402045 }
10412046
....@@ -1053,6 +2058,7 @@
10532058 int parent_lct = port->parent->lct;
10542059 int shift = 4;
10552060 int idx = (parent_lct - 1) / 2;
2061
+
10562062 if (parent_lct > 1) {
10572063 memcpy(rad, port->parent->rad, idx + 1);
10582064 shift = (parent_lct % 2) ? 4 : 0;
....@@ -1063,58 +2069,165 @@
10632069 return parent_lct + 1;
10642070 }
10652071
1066
-/*
1067
- * return sends link address for new mstb
1068
- */
1069
-static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port)
2072
+static bool drm_dp_mst_is_end_device(u8 pdt, bool mcs)
10702073 {
1071
- int ret;
1072
- u8 rad[6], lct;
1073
- bool send_link = false;
1074
- switch (port->pdt) {
2074
+ switch (pdt) {
10752075 case DP_PEER_DEVICE_DP_LEGACY_CONV:
10762076 case DP_PEER_DEVICE_SST_SINK:
1077
- /* add i2c over sideband */
1078
- ret = drm_dp_mst_register_i2c_bus(&port->aux);
1079
- break;
2077
+ return true;
10802078 case DP_PEER_DEVICE_MST_BRANCHING:
1081
- lct = drm_dp_calculate_rad(port, rad);
2079
+ /* For sst branch device */
2080
+ if (!mcs)
2081
+ return true;
10822082
1083
- port->mstb = drm_dp_add_mst_branch_device(lct, rad);
1084
- if (port->mstb) {
1085
- port->mstb->mgr = port->mgr;
1086
- port->mstb->port_parent = port;
1087
-
1088
- send_link = true;
1089
- }
1090
- break;
2083
+ return false;
10912084 }
1092
- return send_link;
2085
+ return true;
10932086 }
10942087
1095
-static void drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
2088
+static int
2089
+drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt,
2090
+ bool new_mcs)
10962091 {
1097
- int ret;
2092
+ struct drm_dp_mst_topology_mgr *mgr = port->mgr;
2093
+ struct drm_dp_mst_branch *mstb;
2094
+ u8 rad[8], lct;
2095
+ int ret = 0;
2096
+
2097
+ if (port->pdt == new_pdt && port->mcs == new_mcs)
2098
+ return 0;
2099
+
2100
+ /* Teardown the old pdt, if there is one */
2101
+ if (port->pdt != DP_PEER_DEVICE_NONE) {
2102
+ if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
2103
+ /*
2104
+ * If the new PDT would also have an i2c bus,
2105
+ * don't bother with reregistering it
2106
+ */
2107
+ if (new_pdt != DP_PEER_DEVICE_NONE &&
2108
+ drm_dp_mst_is_end_device(new_pdt, new_mcs)) {
2109
+ port->pdt = new_pdt;
2110
+ port->mcs = new_mcs;
2111
+ return 0;
2112
+ }
2113
+
2114
+ /* remove i2c over sideband */
2115
+ drm_dp_mst_unregister_i2c_bus(port);
2116
+ } else {
2117
+ mutex_lock(&mgr->lock);
2118
+ drm_dp_mst_topology_put_mstb(port->mstb);
2119
+ port->mstb = NULL;
2120
+ mutex_unlock(&mgr->lock);
2121
+ }
2122
+ }
2123
+
2124
+ port->pdt = new_pdt;
2125
+ port->mcs = new_mcs;
2126
+
2127
+ if (port->pdt != DP_PEER_DEVICE_NONE) {
2128
+ if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
2129
+ /* add i2c over sideband */
2130
+ ret = drm_dp_mst_register_i2c_bus(port);
2131
+ } else {
2132
+ lct = drm_dp_calculate_rad(port, rad);
2133
+ mstb = drm_dp_add_mst_branch_device(lct, rad);
2134
+ if (!mstb) {
2135
+ ret = -ENOMEM;
2136
+ DRM_ERROR("Failed to create MSTB for port %p",
2137
+ port);
2138
+ goto out;
2139
+ }
2140
+
2141
+ mutex_lock(&mgr->lock);
2142
+ port->mstb = mstb;
2143
+ mstb->mgr = port->mgr;
2144
+ mstb->port_parent = port;
2145
+
2146
+ /*
2147
+ * Make sure this port's memory allocation stays
2148
+ * around until its child MSTB releases it
2149
+ */
2150
+ drm_dp_mst_get_port_malloc(port);
2151
+ mutex_unlock(&mgr->lock);
2152
+
2153
+ /* And make sure we send a link address for this */
2154
+ ret = 1;
2155
+ }
2156
+ }
2157
+
2158
+out:
2159
+ if (ret < 0)
2160
+ port->pdt = DP_PEER_DEVICE_NONE;
2161
+ return ret;
2162
+}
2163
+
2164
+/**
2165
+ * drm_dp_mst_dpcd_read() - read a series of bytes from the DPCD via sideband
2166
+ * @aux: Fake sideband AUX CH
2167
+ * @offset: address of the (first) register to read
2168
+ * @buffer: buffer to store the register values
2169
+ * @size: number of bytes in @buffer
2170
+ *
2171
+ * Performs the same functionality for remote devices via
2172
+ * sideband messaging as drm_dp_dpcd_read() does for local
2173
+ * devices via actual AUX CH.
2174
+ *
2175
+ * Return: Number of bytes read, or negative error code on failure.
2176
+ */
2177
+ssize_t drm_dp_mst_dpcd_read(struct drm_dp_aux *aux,
2178
+ unsigned int offset, void *buffer, size_t size)
2179
+{
2180
+ struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port,
2181
+ aux);
2182
+
2183
+ return drm_dp_send_dpcd_read(port->mgr, port,
2184
+ offset, size, buffer);
2185
+}
2186
+
2187
+/**
2188
+ * drm_dp_mst_dpcd_write() - write a series of bytes to the DPCD via sideband
2189
+ * @aux: Fake sideband AUX CH
2190
+ * @offset: address of the (first) register to write
2191
+ * @buffer: buffer containing the values to write
2192
+ * @size: number of bytes in @buffer
2193
+ *
2194
+ * Performs the same functionality for remote devices via
2195
+ * sideband messaging as drm_dp_dpcd_write() does for local
2196
+ * devices via actual AUX CH.
2197
+ *
2198
+ * Return: number of bytes written on success, negative error code on failure.
2199
+ */
2200
+ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux,
2201
+ unsigned int offset, void *buffer, size_t size)
2202
+{
2203
+ struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port,
2204
+ aux);
2205
+
2206
+ return drm_dp_send_dpcd_write(port->mgr, port,
2207
+ offset, size, buffer);
2208
+}
2209
+
2210
+static int drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
2211
+{
2212
+ int ret = 0;
10982213
10992214 memcpy(mstb->guid, guid, 16);
11002215
11012216 if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) {
11022217 if (mstb->port_parent) {
1103
- ret = drm_dp_send_dpcd_write(
1104
- mstb->mgr,
1105
- mstb->port_parent,
1106
- DP_GUID,
1107
- 16,
1108
- mstb->guid);
2218
+ ret = drm_dp_send_dpcd_write(mstb->mgr,
2219
+ mstb->port_parent,
2220
+ DP_GUID, 16, mstb->guid);
11092221 } else {
1110
-
1111
- ret = drm_dp_dpcd_write(
1112
- mstb->mgr->aux,
1113
- DP_GUID,
1114
- mstb->guid,
1115
- 16);
2222
+ ret = drm_dp_dpcd_write(mstb->mgr->aux,
2223
+ DP_GUID, mstb->guid, 16);
11162224 }
11172225 }
2226
+
2227
+ if (ret < 16 && ret > 0)
2228
+ return -EPROTO;
2229
+
2230
+ return ret == 16 ? 0 : ret;
11182231 }
11192232
11202233 static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
....@@ -1124,10 +2237,12 @@
11242237 {
11252238 int i;
11262239 char temp[8];
2240
+
11272241 snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id);
11282242 for (i = 0; i < (mstb->lct - 1); i++) {
11292243 int shift = (i % 2) ? 0 : 4;
11302244 int port_num = (mstb->rad[i / 2] >> shift) & 0xf;
2245
+
11312246 snprintf(temp, sizeof(temp), "-%d", port_num);
11322247 strlcat(proppath, temp, proppath_size);
11332248 }
....@@ -1135,35 +2250,177 @@
11352250 strlcat(proppath, temp, proppath_size);
11362251 }
11372252
1138
-static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
1139
- struct drm_device *dev,
1140
- struct drm_dp_link_addr_reply_port *port_msg)
2253
+/**
2254
+ * drm_dp_mst_connector_late_register() - Late MST connector registration
2255
+ * @connector: The MST connector
2256
+ * @port: The MST port for this connector
2257
+ *
2258
+ * Helper to register the remote aux device for this MST port. Drivers should
2259
+ * call this from their mst connector's late_register hook to enable MST aux
2260
+ * devices.
2261
+ *
2262
+ * Return: 0 on success, negative error code on failure.
2263
+ */
2264
+int drm_dp_mst_connector_late_register(struct drm_connector *connector,
2265
+ struct drm_dp_mst_port *port)
11412266 {
1142
- struct drm_dp_mst_port *port;
1143
- bool ret;
1144
- bool created = false;
1145
- int old_pdt = 0;
1146
- int old_ddps = 0;
1147
- port = drm_dp_get_port(mstb, port_msg->port_number);
1148
- if (!port) {
1149
- port = kzalloc(sizeof(*port), GFP_KERNEL);
1150
- if (!port)
1151
- return;
1152
- kref_init(&port->kref);
1153
- port->parent = mstb;
1154
- port->port_num = port_msg->port_number;
1155
- port->mgr = mstb->mgr;
1156
- port->aux.name = "DPMST";
1157
- port->aux.dev = dev->dev;
1158
- created = true;
1159
- } else {
1160
- old_pdt = port->pdt;
1161
- old_ddps = port->ddps;
2267
+ DRM_DEBUG_KMS("registering %s remote bus for %s\n",
2268
+ port->aux.name, connector->kdev->kobj.name);
2269
+
2270
+ port->aux.dev = connector->kdev;
2271
+ return drm_dp_aux_register_devnode(&port->aux);
2272
+}
2273
+EXPORT_SYMBOL(drm_dp_mst_connector_late_register);
2274
+
2275
+/**
2276
+ * drm_dp_mst_connector_early_unregister() - Early MST connector unregistration
2277
+ * @connector: The MST connector
2278
+ * @port: The MST port for this connector
2279
+ *
2280
+ * Helper to unregister the remote aux device for this MST port, registered by
2281
+ * drm_dp_mst_connector_late_register(). Drivers should call this from their mst
2282
+ * connector's early_unregister hook.
2283
+ */
2284
+void drm_dp_mst_connector_early_unregister(struct drm_connector *connector,
2285
+ struct drm_dp_mst_port *port)
2286
+{
2287
+ DRM_DEBUG_KMS("unregistering %s remote bus for %s\n",
2288
+ port->aux.name, connector->kdev->kobj.name);
2289
+ drm_dp_aux_unregister_devnode(&port->aux);
2290
+}
2291
+EXPORT_SYMBOL(drm_dp_mst_connector_early_unregister);
2292
+
2293
+static void
2294
+drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb,
2295
+ struct drm_dp_mst_port *port)
2296
+{
2297
+ struct drm_dp_mst_topology_mgr *mgr = port->mgr;
2298
+ char proppath[255];
2299
+ int ret;
2300
+
2301
+ build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath));
2302
+ port->connector = mgr->cbs->add_connector(mgr, port, proppath);
2303
+ if (!port->connector) {
2304
+ ret = -ENOMEM;
2305
+ goto error;
11622306 }
11632307
1164
- port->pdt = port_msg->peer_device_type;
2308
+ if (port->pdt != DP_PEER_DEVICE_NONE &&
2309
+ drm_dp_mst_is_end_device(port->pdt, port->mcs) &&
2310
+ port->port_num >= DP_MST_LOGICAL_PORT_0) {
2311
+ port->cached_edid = drm_get_edid(port->connector,
2312
+ &port->aux.ddc);
2313
+ drm_connector_set_tile_property(port->connector);
2314
+ }
2315
+
2316
+ drm_connector_register(port->connector);
2317
+ return;
2318
+
2319
+error:
2320
+ DRM_ERROR("Failed to create connector for port %p: %d\n", port, ret);
2321
+}
2322
+
2323
+/*
2324
+ * Drop a topology reference, and unlink the port from the in-memory topology
2325
+ * layout
2326
+ */
2327
+static void
2328
+drm_dp_mst_topology_unlink_port(struct drm_dp_mst_topology_mgr *mgr,
2329
+ struct drm_dp_mst_port *port)
2330
+{
2331
+ mutex_lock(&mgr->lock);
2332
+ port->parent->num_ports--;
2333
+ list_del(&port->next);
2334
+ mutex_unlock(&mgr->lock);
2335
+ drm_dp_mst_topology_put_port(port);
2336
+}
2337
+
2338
+static struct drm_dp_mst_port *
2339
+drm_dp_mst_add_port(struct drm_device *dev,
2340
+ struct drm_dp_mst_topology_mgr *mgr,
2341
+ struct drm_dp_mst_branch *mstb, u8 port_number)
2342
+{
2343
+ struct drm_dp_mst_port *port = kzalloc(sizeof(*port), GFP_KERNEL);
2344
+
2345
+ if (!port)
2346
+ return NULL;
2347
+
2348
+ kref_init(&port->topology_kref);
2349
+ kref_init(&port->malloc_kref);
2350
+ port->parent = mstb;
2351
+ port->port_num = port_number;
2352
+ port->mgr = mgr;
2353
+ port->aux.name = "DPMST";
2354
+ port->aux.dev = dev->dev;
2355
+ port->aux.is_remote = true;
2356
+
2357
+ /* initialize the MST downstream port's AUX crc work queue */
2358
+ drm_dp_remote_aux_init(&port->aux);
2359
+
2360
+ /*
2361
+ * Make sure the memory allocation for our parent branch stays
2362
+ * around until our own memory allocation is released
2363
+ */
2364
+ drm_dp_mst_get_mstb_malloc(mstb);
2365
+
2366
+ return port;
2367
+}
2368
+
2369
+static int
2370
+drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
2371
+ struct drm_device *dev,
2372
+ struct drm_dp_link_addr_reply_port *port_msg)
2373
+{
2374
+ struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
2375
+ struct drm_dp_mst_port *port;
2376
+ int old_ddps = 0, ret;
2377
+ u8 new_pdt = DP_PEER_DEVICE_NONE;
2378
+ bool new_mcs = 0;
2379
+ bool created = false, send_link_addr = false, changed = false;
2380
+
2381
+ port = drm_dp_get_port(mstb, port_msg->port_number);
2382
+ if (!port) {
2383
+ port = drm_dp_mst_add_port(dev, mgr, mstb,
2384
+ port_msg->port_number);
2385
+ if (!port)
2386
+ return -ENOMEM;
2387
+ created = true;
2388
+ changed = true;
2389
+ } else if (!port->input && port_msg->input_port && port->connector) {
2390
+ /* Since port->connector can't be changed here, we create a
2391
+ * new port if input_port changes from 0 to 1
2392
+ */
2393
+ drm_dp_mst_topology_unlink_port(mgr, port);
2394
+ drm_dp_mst_topology_put_port(port);
2395
+ port = drm_dp_mst_add_port(dev, mgr, mstb,
2396
+ port_msg->port_number);
2397
+ if (!port)
2398
+ return -ENOMEM;
2399
+ changed = true;
2400
+ created = true;
2401
+ } else if (port->input && !port_msg->input_port) {
2402
+ changed = true;
2403
+ } else if (port->connector) {
2404
+ /* We're updating a port that's exposed to userspace, so do it
2405
+ * under lock
2406
+ */
2407
+ drm_modeset_lock(&mgr->base.lock, NULL);
2408
+
2409
+ old_ddps = port->ddps;
2410
+ changed = port->ddps != port_msg->ddps ||
2411
+ (port->ddps &&
2412
+ (port->ldps != port_msg->legacy_device_plug_status ||
2413
+ port->dpcd_rev != port_msg->dpcd_revision ||
2414
+ port->mcs != port_msg->mcs ||
2415
+ port->pdt != port_msg->peer_device_type ||
2416
+ port->num_sdp_stream_sinks !=
2417
+ port_msg->num_sdp_stream_sinks));
2418
+ }
2419
+
11652420 port->input = port_msg->input_port;
1166
- port->mcs = port_msg->mcs;
2421
+ if (!port->input)
2422
+ new_pdt = port_msg->peer_device_type;
2423
+ new_mcs = port_msg->mcs;
11672424 port->ddps = port_msg->ddps;
11682425 port->ldps = port_msg->legacy_device_plug_status;
11692426 port->dpcd_rev = port_msg->dpcd_revision;
....@@ -1173,93 +2430,141 @@
11732430 /* manage mstb port lists with mgr lock - take a reference
11742431 for this list */
11752432 if (created) {
1176
- mutex_lock(&mstb->mgr->lock);
1177
- kref_get(&port->kref);
2433
+ mutex_lock(&mgr->lock);
2434
+ drm_dp_mst_topology_get_port(port);
11782435 list_add(&port->next, &mstb->ports);
1179
- mutex_unlock(&mstb->mgr->lock);
2436
+ mstb->num_ports++;
2437
+ mutex_unlock(&mgr->lock);
11802438 }
11812439
1182
- if (old_ddps != port->ddps) {
1183
- if (port->ddps) {
1184
- if (!port->input)
1185
- drm_dp_send_enum_path_resources(mstb->mgr, mstb, port);
2440
+ /*
2441
+ * Reprobe PBN caps on both hotplug, and when re-probing the link
2442
+ * for our parent mstb
2443
+ */
2444
+ if (old_ddps != port->ddps || !created) {
2445
+ if (port->ddps && !port->input) {
2446
+ ret = drm_dp_send_enum_path_resources(mgr, mstb,
2447
+ port);
2448
+ if (ret == 1)
2449
+ changed = true;
11862450 } else {
1187
- port->available_pbn = 0;
1188
- }
1189
- }
1190
-
1191
- if (old_pdt != port->pdt && !port->input) {
1192
- drm_dp_port_teardown_pdt(port, old_pdt);
1193
-
1194
- ret = drm_dp_port_setup_pdt(port);
1195
- if (ret == true)
1196
- drm_dp_send_link_address(mstb->mgr, port->mstb);
1197
- }
1198
-
1199
- if (created && !port->input) {
1200
- char proppath[255];
1201
-
1202
- build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath));
1203
- port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath);
1204
- if (!port->connector) {
1205
- /* remove it from the port list */
1206
- mutex_lock(&mstb->mgr->lock);
1207
- list_del(&port->next);
1208
- mutex_unlock(&mstb->mgr->lock);
1209
- /* drop port list reference */
1210
- drm_dp_put_port(port);
1211
- goto out;
2451
+ port->full_pbn = 0;
12122452 }
1213
- if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
1214
- port->pdt == DP_PEER_DEVICE_SST_SINK) &&
1215
- port->port_num >= DP_MST_LOGICAL_PORT_0) {
1216
- port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc);
1217
- drm_connector_set_tile_property(port->connector);
1218
- }
1219
- (*mstb->mgr->cbs->register_connector)(port->connector);
12202453 }
12212454
1222
-out:
2455
+ ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs);
2456
+ if (ret == 1) {
2457
+ send_link_addr = true;
2458
+ } else if (ret < 0) {
2459
+ DRM_ERROR("Failed to change PDT on port %p: %d\n",
2460
+ port, ret);
2461
+ goto fail;
2462
+ }
2463
+
2464
+ /*
2465
+ * If this port wasn't just created, then we're reprobing because
2466
+ * we're coming out of suspend. In this case, always resend the link
2467
+ * address if there's an MSTB on this port
2468
+ */
2469
+ if (!created && port->pdt == DP_PEER_DEVICE_MST_BRANCHING &&
2470
+ port->mcs)
2471
+ send_link_addr = true;
2472
+
2473
+ if (port->connector)
2474
+ drm_modeset_unlock(&mgr->base.lock);
2475
+ else if (!port->input)
2476
+ drm_dp_mst_port_add_connector(mstb, port);
2477
+
2478
+ if (send_link_addr && port->mstb) {
2479
+ ret = drm_dp_send_link_address(mgr, port->mstb);
2480
+ if (ret == 1) /* MSTB below us changed */
2481
+ changed = true;
2482
+ else if (ret < 0)
2483
+ goto fail_put;
2484
+ }
2485
+
12232486 /* put reference to this port */
1224
- drm_dp_put_port(port);
2487
+ drm_dp_mst_topology_put_port(port);
2488
+ return changed;
2489
+
2490
+fail:
2491
+ drm_dp_mst_topology_unlink_port(mgr, port);
2492
+ if (port->connector)
2493
+ drm_modeset_unlock(&mgr->base.lock);
2494
+fail_put:
2495
+ drm_dp_mst_topology_put_port(port);
2496
+ return ret;
12252497 }
12262498
1227
-static void drm_dp_update_port(struct drm_dp_mst_branch *mstb,
1228
- struct drm_dp_connection_status_notify *conn_stat)
2499
+static void
2500
+drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
2501
+ struct drm_dp_connection_status_notify *conn_stat)
12292502 {
2503
+ struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
12302504 struct drm_dp_mst_port *port;
1231
- int old_pdt;
1232
- int old_ddps;
1233
- bool dowork = false;
2505
+ int old_ddps, ret;
2506
+ u8 new_pdt;
2507
+ bool new_mcs;
2508
+ bool dowork = false, create_connector = false;
2509
+
12342510 port = drm_dp_get_port(mstb, conn_stat->port_number);
12352511 if (!port)
12362512 return;
12372513
2514
+ if (port->connector) {
2515
+ if (!port->input && conn_stat->input_port) {
2516
+ /*
2517
+ * We can't remove a connector from an already exposed
2518
+ * port, so just throw the port out and make sure we
2519
+ * reprobe the link address of it's parent MSTB
2520
+ */
2521
+ drm_dp_mst_topology_unlink_port(mgr, port);
2522
+ mstb->link_address_sent = false;
2523
+ dowork = true;
2524
+ goto out;
2525
+ }
2526
+
2527
+ /* Locking is only needed if the port's exposed to userspace */
2528
+ drm_modeset_lock(&mgr->base.lock, NULL);
2529
+ } else if (port->input && !conn_stat->input_port) {
2530
+ create_connector = true;
2531
+ /* Reprobe link address so we get num_sdp_streams */
2532
+ mstb->link_address_sent = false;
2533
+ dowork = true;
2534
+ }
2535
+
12382536 old_ddps = port->ddps;
1239
- old_pdt = port->pdt;
1240
- port->pdt = conn_stat->peer_device_type;
1241
- port->mcs = conn_stat->message_capability_status;
2537
+ port->input = conn_stat->input_port;
12422538 port->ldps = conn_stat->legacy_device_plug_status;
12432539 port->ddps = conn_stat->displayport_device_plug_status;
12442540
12452541 if (old_ddps != port->ddps) {
1246
- if (port->ddps) {
1247
- dowork = true;
1248
- } else {
1249
- port->available_pbn = 0;
1250
- }
1251
- }
1252
- if (old_pdt != port->pdt && !port->input) {
1253
- drm_dp_port_teardown_pdt(port, old_pdt);
1254
-
1255
- if (drm_dp_port_setup_pdt(port))
1256
- dowork = true;
2542
+ if (port->ddps && !port->input)
2543
+ drm_dp_send_enum_path_resources(mgr, mstb, port);
2544
+ else
2545
+ port->full_pbn = 0;
12572546 }
12582547
1259
- drm_dp_put_port(port);
2548
+ new_pdt = port->input ? DP_PEER_DEVICE_NONE : conn_stat->peer_device_type;
2549
+ new_mcs = conn_stat->message_capability_status;
2550
+ ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs);
2551
+ if (ret == 1) {
2552
+ dowork = true;
2553
+ } else if (ret < 0) {
2554
+ DRM_ERROR("Failed to change PDT for port %p: %d\n",
2555
+ port, ret);
2556
+ dowork = false;
2557
+ }
2558
+
2559
+ if (port->connector)
2560
+ drm_modeset_unlock(&mgr->base.lock);
2561
+ else if (create_connector)
2562
+ drm_dp_mst_port_add_connector(mstb, port);
2563
+
2564
+out:
2565
+ drm_dp_mst_topology_put_port(port);
12602566 if (dowork)
12612567 queue_work(system_long_wq, &mstb->mgr->work);
1262
-
12632568 }
12642569
12652570 static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr,
....@@ -1267,7 +2572,7 @@
12672572 {
12682573 struct drm_dp_mst_branch *mstb;
12692574 struct drm_dp_mst_port *port;
1270
- int i;
2575
+ int i, ret;
12712576 /* find the port by iterating down */
12722577
12732578 mutex_lock(&mgr->lock);
....@@ -1292,7 +2597,9 @@
12922597 }
12932598 }
12942599 }
1295
- kref_get(&mstb->kref);
2600
+ ret = drm_dp_mst_topology_try_get_mstb(mstb);
2601
+ if (!ret)
2602
+ mstb = NULL;
12962603 out:
12972604 mutex_unlock(&mgr->lock);
12982605 return mstb;
....@@ -1300,7 +2607,7 @@
13002607
13012608 static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
13022609 struct drm_dp_mst_branch *mstb,
1303
- uint8_t *guid)
2610
+ const uint8_t *guid)
13042611 {
13052612 struct drm_dp_mst_branch *found_mstb;
13062613 struct drm_dp_mst_port *port;
....@@ -1322,67 +2629,112 @@
13222629 return NULL;
13232630 }
13242631
1325
-static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device_by_guid(
1326
- struct drm_dp_mst_topology_mgr *mgr,
1327
- uint8_t *guid)
2632
+static struct drm_dp_mst_branch *
2633
+drm_dp_get_mst_branch_device_by_guid(struct drm_dp_mst_topology_mgr *mgr,
2634
+ const uint8_t *guid)
13282635 {
13292636 struct drm_dp_mst_branch *mstb;
2637
+ int ret;
13302638
13312639 /* find the port by iterating down */
13322640 mutex_lock(&mgr->lock);
13332641
13342642 mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
1335
-
1336
- if (mstb)
1337
- kref_get(&mstb->kref);
2643
+ if (mstb) {
2644
+ ret = drm_dp_mst_topology_try_get_mstb(mstb);
2645
+ if (!ret)
2646
+ mstb = NULL;
2647
+ }
13382648
13392649 mutex_unlock(&mgr->lock);
13402650 return mstb;
13412651 }
13422652
1343
-static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
2653
+static int drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
13442654 struct drm_dp_mst_branch *mstb)
13452655 {
13462656 struct drm_dp_mst_port *port;
1347
- struct drm_dp_mst_branch *mstb_child;
1348
- if (!mstb->link_address_sent)
1349
- drm_dp_send_link_address(mgr, mstb);
2657
+ int ret;
2658
+ bool changed = false;
2659
+
2660
+ if (!mstb->link_address_sent) {
2661
+ ret = drm_dp_send_link_address(mgr, mstb);
2662
+ if (ret == 1)
2663
+ changed = true;
2664
+ else if (ret < 0)
2665
+ return ret;
2666
+ }
13502667
13512668 list_for_each_entry(port, &mstb->ports, next) {
1352
- if (port->input)
2669
+ struct drm_dp_mst_branch *mstb_child = NULL;
2670
+
2671
+ if (port->input || !port->ddps)
13532672 continue;
13542673
1355
- if (!port->ddps)
1356
- continue;
2674
+ if (port->mstb)
2675
+ mstb_child = drm_dp_mst_topology_get_mstb_validated(
2676
+ mgr, port->mstb);
13572677
1358
- if (!port->available_pbn)
1359
- drm_dp_send_enum_path_resources(mgr, mstb, port);
1360
-
1361
- if (port->mstb) {
1362
- mstb_child = drm_dp_get_validated_mstb_ref(mgr, port->mstb);
1363
- if (mstb_child) {
1364
- drm_dp_check_and_send_link_address(mgr, mstb_child);
1365
- drm_dp_put_mst_branch_device(mstb_child);
1366
- }
2678
+ if (mstb_child) {
2679
+ ret = drm_dp_check_and_send_link_address(mgr,
2680
+ mstb_child);
2681
+ drm_dp_mst_topology_put_mstb(mstb_child);
2682
+ if (ret == 1)
2683
+ changed = true;
2684
+ else if (ret < 0)
2685
+ return ret;
13672686 }
13682687 }
2688
+
2689
+ return changed;
13692690 }
13702691
13712692 static void drm_dp_mst_link_probe_work(struct work_struct *work)
13722693 {
1373
- struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, work);
2694
+ struct drm_dp_mst_topology_mgr *mgr =
2695
+ container_of(work, struct drm_dp_mst_topology_mgr, work);
2696
+ struct drm_device *dev = mgr->dev;
13742697 struct drm_dp_mst_branch *mstb;
2698
+ int ret;
2699
+ bool clear_payload_id_table;
2700
+
2701
+ mutex_lock(&mgr->probe_lock);
13752702
13762703 mutex_lock(&mgr->lock);
2704
+ clear_payload_id_table = !mgr->payload_id_table_cleared;
2705
+ mgr->payload_id_table_cleared = true;
2706
+
13772707 mstb = mgr->mst_primary;
13782708 if (mstb) {
1379
- kref_get(&mstb->kref);
2709
+ ret = drm_dp_mst_topology_try_get_mstb(mstb);
2710
+ if (!ret)
2711
+ mstb = NULL;
13802712 }
13812713 mutex_unlock(&mgr->lock);
1382
- if (mstb) {
1383
- drm_dp_check_and_send_link_address(mgr, mstb);
1384
- drm_dp_put_mst_branch_device(mstb);
2714
+ if (!mstb) {
2715
+ mutex_unlock(&mgr->probe_lock);
2716
+ return;
13852717 }
2718
+
2719
+ /*
2720
+ * Certain branch devices seem to incorrectly report an available_pbn
2721
+ * of 0 on downstream sinks, even after clearing the
2722
+ * DP_PAYLOAD_ALLOCATE_* registers in
2723
+ * drm_dp_mst_topology_mgr_set_mst(). Namely, the CableMatters USB-C
2724
+ * 2x DP hub. Sending a CLEAR_PAYLOAD_ID_TABLE message seems to make
2725
+ * things work again.
2726
+ */
2727
+ if (clear_payload_id_table) {
2728
+ DRM_DEBUG_KMS("Clearing payload ID table\n");
2729
+ drm_dp_send_clear_payload_id_table(mgr, mstb);
2730
+ }
2731
+
2732
+ ret = drm_dp_check_and_send_link_address(mgr, mstb);
2733
+ drm_dp_mst_topology_put_mstb(mstb);
2734
+
2735
+ mutex_unlock(&mgr->probe_lock);
2736
+ if (ret)
2737
+ drm_kms_helper_hotplug_event(dev);
13862738 }
13872739
13882740 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
....@@ -1401,7 +2753,8 @@
14012753 return false;
14022754 }
14032755
1404
-static int build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes)
2756
+static void build_dpcd_read(struct drm_dp_sideband_msg_tx *msg,
2757
+ u8 port_num, u32 offset, u8 num_bytes)
14052758 {
14062759 struct drm_dp_sideband_msg_req_body req;
14072760
....@@ -1410,8 +2763,6 @@
14102763 req.u.dpcd_read.dpcd_address = offset;
14112764 req.u.dpcd_read.num_bytes = num_bytes;
14122765 drm_dp_encode_sideband_req(&req, msg);
1413
-
1414
- return 0;
14152766 }
14162767
14172768 static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr,
....@@ -1452,34 +2803,24 @@
14522803 struct drm_dp_mst_branch *mstb = txmsg->dst;
14532804 u8 req_type;
14542805
1455
- /* both msg slots are full */
1456
- if (txmsg->seqno == -1) {
1457
- if (mstb->tx_slots[0] && mstb->tx_slots[1]) {
1458
- DRM_DEBUG_KMS("%s: failed to find slot\n", __func__);
1459
- return -EAGAIN;
1460
- }
1461
- if (mstb->tx_slots[0] == NULL && mstb->tx_slots[1] == NULL) {
1462
- txmsg->seqno = mstb->last_seqno;
1463
- mstb->last_seqno ^= 1;
1464
- } else if (mstb->tx_slots[0] == NULL)
1465
- txmsg->seqno = 0;
1466
- else
1467
- txmsg->seqno = 1;
1468
- mstb->tx_slots[txmsg->seqno] = txmsg;
1469
- }
1470
-
14712806 req_type = txmsg->msg[0] & 0x7f;
14722807 if (req_type == DP_CONNECTION_STATUS_NOTIFY ||
1473
- req_type == DP_RESOURCE_STATUS_NOTIFY)
2808
+ req_type == DP_RESOURCE_STATUS_NOTIFY ||
2809
+ req_type == DP_CLEAR_PAYLOAD_ID_TABLE)
14742810 hdr->broadcast = 1;
14752811 else
14762812 hdr->broadcast = 0;
14772813 hdr->path_msg = txmsg->path_msg;
1478
- hdr->lct = mstb->lct;
1479
- hdr->lcr = mstb->lct - 1;
1480
- if (mstb->lct > 1)
1481
- memcpy(hdr->rad, mstb->rad, mstb->lct / 2);
1482
- hdr->seqno = txmsg->seqno;
2814
+ if (hdr->broadcast) {
2815
+ hdr->lct = 1;
2816
+ hdr->lcr = 6;
2817
+ } else {
2818
+ hdr->lct = mstb->lct;
2819
+ hdr->lcr = mstb->lct - 1;
2820
+ }
2821
+
2822
+ memcpy(hdr->rad, mstb->rad, hdr->lct / 2);
2823
+
14832824 return 0;
14842825 }
14852826 /*
....@@ -1494,15 +2835,15 @@
14942835 int len, space, idx, tosend;
14952836 int ret;
14962837
2838
+ if (txmsg->state == DRM_DP_SIDEBAND_TX_SENT)
2839
+ return 0;
2840
+
14972841 memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr));
14982842
1499
- if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED) {
1500
- txmsg->seqno = -1;
2843
+ if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED)
15012844 txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND;
1502
- }
15032845
1504
- /* make hdr from dst mst - for replies use seqno
1505
- otherwise assign one */
2846
+ /* make hdr from dst mst */
15062847 ret = set_hdr_from_dst_qlock(&hdr, txmsg);
15072848 if (ret < 0)
15082849 return ret;
....@@ -1529,7 +2870,12 @@
15292870
15302871 ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx);
15312872 if (ret) {
1532
- DRM_DEBUG_KMS("sideband msg failed to send\n");
2873
+ if (drm_debug_enabled(DRM_UT_DP)) {
2874
+ struct drm_printer p = drm_debug_printer(DBG_PREFIX);
2875
+
2876
+ drm_printf(&p, "sideband msg failed to send\n");
2877
+ drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
2878
+ }
15332879 return ret;
15342880 }
15352881
....@@ -1552,37 +2898,14 @@
15522898 if (list_empty(&mgr->tx_msg_downq))
15532899 return;
15542900
1555
- txmsg = list_first_entry(&mgr->tx_msg_downq, struct drm_dp_sideband_msg_tx, next);
2901
+ txmsg = list_first_entry(&mgr->tx_msg_downq,
2902
+ struct drm_dp_sideband_msg_tx, next);
15562903 ret = process_single_tx_qlock(mgr, txmsg, false);
1557
- if (ret == 1) {
1558
- /* txmsg is sent it should be in the slots now */
1559
- list_del(&txmsg->next);
1560
- } else if (ret) {
2904
+ if (ret < 0) {
15612905 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
15622906 list_del(&txmsg->next);
1563
- if (txmsg->seqno != -1)
1564
- txmsg->dst->tx_slots[txmsg->seqno] = NULL;
15652907 txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
15662908 wake_up_all(&mgr->tx_waitq);
1567
- }
1568
-}
1569
-
1570
-/* called holding qlock */
1571
-static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
1572
- struct drm_dp_sideband_msg_tx *txmsg)
1573
-{
1574
- int ret;
1575
-
1576
- /* construct a chunk from the first msg in the tx_msg queue */
1577
- ret = process_single_tx_qlock(mgr, txmsg, true);
1578
-
1579
- if (ret != 1)
1580
- DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
1581
-
1582
- if (txmsg->seqno != -1) {
1583
- WARN_ON((unsigned int)txmsg->seqno >
1584
- ARRAY_SIZE(txmsg->dst->tx_slots));
1585
- txmsg->dst->tx_slots[txmsg->seqno] = NULL;
15862909 }
15872910 }
15882911
....@@ -1591,15 +2914,124 @@
15912914 {
15922915 mutex_lock(&mgr->qlock);
15932916 list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
2917
+
2918
+ if (drm_debug_enabled(DRM_UT_DP)) {
2919
+ struct drm_printer p = drm_debug_printer(DBG_PREFIX);
2920
+
2921
+ drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
2922
+ }
2923
+
15942924 if (list_is_singular(&mgr->tx_msg_downq))
15952925 process_single_down_tx_qlock(mgr);
15962926 mutex_unlock(&mgr->qlock);
15972927 }
15982928
1599
-static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
2929
+static void
2930
+drm_dp_dump_link_address(struct drm_dp_link_address_ack_reply *reply)
2931
+{
2932
+ struct drm_dp_link_addr_reply_port *port_reply;
2933
+ int i;
2934
+
2935
+ for (i = 0; i < reply->nports; i++) {
2936
+ port_reply = &reply->ports[i];
2937
+ DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n",
2938
+ i,
2939
+ port_reply->input_port,
2940
+ port_reply->peer_device_type,
2941
+ port_reply->port_number,
2942
+ port_reply->dpcd_revision,
2943
+ port_reply->mcs,
2944
+ port_reply->ddps,
2945
+ port_reply->legacy_device_plug_status,
2946
+ port_reply->num_sdp_streams,
2947
+ port_reply->num_sdp_stream_sinks);
2948
+ }
2949
+}
2950
+
2951
+static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
16002952 struct drm_dp_mst_branch *mstb)
16012953 {
1602
- int len;
2954
+ struct drm_dp_sideband_msg_tx *txmsg;
2955
+ struct drm_dp_link_address_ack_reply *reply;
2956
+ struct drm_dp_mst_port *port, *tmp;
2957
+ int i, ret, port_mask = 0;
2958
+ bool changed = false;
2959
+
2960
+ txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2961
+ if (!txmsg)
2962
+ return -ENOMEM;
2963
+
2964
+ txmsg->dst = mstb;
2965
+ build_link_address(txmsg);
2966
+
2967
+ mstb->link_address_sent = true;
2968
+ drm_dp_queue_down_tx(mgr, txmsg);
2969
+
2970
+ /* FIXME: Actually do some real error handling here */
2971
+ ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2972
+ if (ret <= 0) {
2973
+ DRM_ERROR("Sending link address failed with %d\n", ret);
2974
+ goto out;
2975
+ }
2976
+ if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
2977
+ DRM_ERROR("link address NAK received\n");
2978
+ ret = -EIO;
2979
+ goto out;
2980
+ }
2981
+
2982
+ reply = &txmsg->reply.u.link_addr;
2983
+ DRM_DEBUG_KMS("link address reply: %d\n", reply->nports);
2984
+ drm_dp_dump_link_address(reply);
2985
+
2986
+ ret = drm_dp_check_mstb_guid(mstb, reply->guid);
2987
+ if (ret) {
2988
+ char buf[64];
2989
+
2990
+ drm_dp_mst_rad_to_str(mstb->rad, mstb->lct, buf, sizeof(buf));
2991
+ DRM_ERROR("GUID check on %s failed: %d\n",
2992
+ buf, ret);
2993
+ goto out;
2994
+ }
2995
+
2996
+ for (i = 0; i < reply->nports; i++) {
2997
+ port_mask |= BIT(reply->ports[i].port_number);
2998
+ ret = drm_dp_mst_handle_link_address_port(mstb, mgr->dev,
2999
+ &reply->ports[i]);
3000
+ if (ret == 1)
3001
+ changed = true;
3002
+ else if (ret < 0)
3003
+ goto out;
3004
+ }
3005
+
3006
+ /* Prune any ports that are currently a part of mstb in our in-memory
3007
+ * topology, but were not seen in this link address. Usually this
3008
+ * means that they were removed while the topology was out of sync,
3009
+ * e.g. during suspend/resume
3010
+ */
3011
+ mutex_lock(&mgr->lock);
3012
+ list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
3013
+ if (port_mask & BIT(port->port_num))
3014
+ continue;
3015
+
3016
+ DRM_DEBUG_KMS("port %d was not in link address, removing\n",
3017
+ port->port_num);
3018
+ list_del(&port->next);
3019
+ drm_dp_mst_topology_put_port(port);
3020
+ changed = true;
3021
+ }
3022
+ mutex_unlock(&mgr->lock);
3023
+
3024
+out:
3025
+ if (ret <= 0)
3026
+ mstb->link_address_sent = false;
3027
+ kfree(txmsg);
3028
+ return ret < 0 ? ret : changed;
3029
+}
3030
+
3031
+static void
3032
+drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr *mgr,
3033
+ struct drm_dp_mst_branch *mstb)
3034
+{
16033035 struct drm_dp_sideband_msg_tx *txmsg;
16043036 int ret;
16053037
....@@ -1608,52 +3040,23 @@
16083040 return;
16093041
16103042 txmsg->dst = mstb;
1611
- len = build_link_address(txmsg);
3043
+ build_clear_payload_id_table(txmsg);
16123044
1613
- mstb->link_address_sent = true;
16143045 drm_dp_queue_down_tx(mgr, txmsg);
16153046
16163047 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
1617
- if (ret > 0) {
1618
- int i;
1619
-
1620
- if (txmsg->reply.reply_type == 1)
1621
- DRM_DEBUG_KMS("link address nak received\n");
1622
- else {
1623
- DRM_DEBUG_KMS("link address reply: %d\n", txmsg->reply.u.link_addr.nports);
1624
- for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
1625
- DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n", i,
1626
- txmsg->reply.u.link_addr.ports[i].input_port,
1627
- txmsg->reply.u.link_addr.ports[i].peer_device_type,
1628
- txmsg->reply.u.link_addr.ports[i].port_number,
1629
- txmsg->reply.u.link_addr.ports[i].dpcd_revision,
1630
- txmsg->reply.u.link_addr.ports[i].mcs,
1631
- txmsg->reply.u.link_addr.ports[i].ddps,
1632
- txmsg->reply.u.link_addr.ports[i].legacy_device_plug_status,
1633
- txmsg->reply.u.link_addr.ports[i].num_sdp_streams,
1634
- txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks);
1635
- }
1636
-
1637
- drm_dp_check_mstb_guid(mstb, txmsg->reply.u.link_addr.guid);
1638
-
1639
- for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
1640
- drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]);
1641
- }
1642
- (*mgr->cbs->hotplug)(mgr);
1643
- }
1644
- } else {
1645
- mstb->link_address_sent = false;
1646
- DRM_DEBUG_KMS("link address failed %d\n", ret);
1647
- }
3048
+ if (ret > 0 && txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
3049
+ DRM_DEBUG_KMS("clear payload table id nak received\n");
16483050
16493051 kfree(txmsg);
16503052 }
16513053
1652
-static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
1653
- struct drm_dp_mst_branch *mstb,
1654
- struct drm_dp_mst_port *port)
3054
+static int
3055
+drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
3056
+ struct drm_dp_mst_branch *mstb,
3057
+ struct drm_dp_mst_port *port)
16553058 {
1656
- int len;
3059
+ struct drm_dp_enum_path_resources_ack_reply *path_res;
16573060 struct drm_dp_sideband_msg_tx *txmsg;
16583061 int ret;
16593062
....@@ -1662,26 +3065,41 @@
16623065 return -ENOMEM;
16633066
16643067 txmsg->dst = mstb;
1665
- len = build_enum_path_resources(txmsg, port->port_num);
3068
+ build_enum_path_resources(txmsg, port->port_num);
16663069
16673070 drm_dp_queue_down_tx(mgr, txmsg);
16683071
16693072 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
16703073 if (ret > 0) {
1671
- if (txmsg->reply.reply_type == 1)
3074
+ ret = 0;
3075
+ path_res = &txmsg->reply.u.path_resources;
3076
+
3077
+ if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
16723078 DRM_DEBUG_KMS("enum path resources nak received\n");
1673
- else {
1674
- if (port->port_num != txmsg->reply.u.path_resources.port_number)
3079
+ } else {
3080
+ if (port->port_num != path_res->port_number)
16753081 DRM_ERROR("got incorrect port in response\n");
1676
- DRM_DEBUG_KMS("enum path resources %d: %d %d\n", txmsg->reply.u.path_resources.port_number, txmsg->reply.u.path_resources.full_payload_bw_number,
1677
- txmsg->reply.u.path_resources.avail_payload_bw_number);
1678
- port->available_pbn = txmsg->reply.u.path_resources.avail_payload_bw_number;
1679
- port->fec_capable = txmsg->reply.u.path_resources.fec_capable;
3082
+
3083
+ DRM_DEBUG_KMS("enum path resources %d: %d %d\n",
3084
+ path_res->port_number,
3085
+ path_res->full_payload_bw_number,
3086
+ path_res->avail_payload_bw_number);
3087
+
3088
+ /*
3089
+ * If something changed, make sure we send a
3090
+ * hotplug
3091
+ */
3092
+ if (port->full_pbn != path_res->full_payload_bw_number ||
3093
+ port->fec_capable != path_res->fec_capable)
3094
+ ret = 1;
3095
+
3096
+ port->full_pbn = path_res->full_payload_bw_number;
3097
+ port->fec_capable = path_res->fec_capable;
16803098 }
16813099 }
16823100
16833101 kfree(txmsg);
1684
- return 0;
3102
+ return ret;
16853103 }
16863104
16873105 static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb)
....@@ -1695,22 +3113,40 @@
16953113 return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent);
16963114 }
16973115
1698
-static struct drm_dp_mst_branch *drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,
1699
- struct drm_dp_mst_branch *mstb,
1700
- int *port_num)
3116
+/*
3117
+ * Searches upwards in the topology starting from mstb to try to find the
3118
+ * closest available parent of mstb that's still connected to the rest of the
3119
+ * topology. This can be used in order to perform operations like releasing
3120
+ * payloads, where the branch device which owned the payload may no longer be
3121
+ * around and thus would require that the payload on the last living relative
3122
+ * be freed instead.
3123
+ */
3124
+static struct drm_dp_mst_branch *
3125
+drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,
3126
+ struct drm_dp_mst_branch *mstb,
3127
+ int *port_num)
17013128 {
17023129 struct drm_dp_mst_branch *rmstb = NULL;
17033130 struct drm_dp_mst_port *found_port;
1704
- mutex_lock(&mgr->lock);
1705
- if (mgr->mst_primary) {
1706
- found_port = drm_dp_get_last_connected_port_to_mstb(mstb);
17073131
1708
- if (found_port) {
3132
+ mutex_lock(&mgr->lock);
3133
+ if (!mgr->mst_primary)
3134
+ goto out;
3135
+
3136
+ do {
3137
+ found_port = drm_dp_get_last_connected_port_to_mstb(mstb);
3138
+ if (!found_port)
3139
+ break;
3140
+
3141
+ if (drm_dp_mst_topology_try_get_mstb(found_port->parent)) {
17093142 rmstb = found_port->parent;
1710
- kref_get(&rmstb->kref);
17113143 *port_num = found_port->port_num;
3144
+ } else {
3145
+ /* Search again, starting from this parent */
3146
+ mstb = found_port->parent;
17123147 }
1713
- }
3148
+ } while (!rmstb);
3149
+out:
17143150 mutex_unlock(&mgr->lock);
17153151 return rmstb;
17163152 }
....@@ -1722,23 +3158,19 @@
17223158 {
17233159 struct drm_dp_sideband_msg_tx *txmsg;
17243160 struct drm_dp_mst_branch *mstb;
1725
- int len, ret, port_num;
3161
+ int ret, port_num;
17263162 u8 sinks[DRM_DP_MAX_SDP_STREAMS];
17273163 int i;
17283164
1729
- port = drm_dp_get_validated_port_ref(mgr, port);
1730
- if (!port)
1731
- return -EINVAL;
1732
-
17333165 port_num = port->port_num;
1734
- mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
3166
+ mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
17353167 if (!mstb) {
1736
- mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num);
3168
+ mstb = drm_dp_get_last_connected_port_and_mstb(mgr,
3169
+ port->parent,
3170
+ &port_num);
17373171
1738
- if (!mstb) {
1739
- drm_dp_put_port(port);
3172
+ if (!mstb)
17403173 return -EINVAL;
1741
- }
17423174 }
17433175
17443176 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
....@@ -1751,23 +3183,30 @@
17513183 sinks[i] = i;
17523184
17533185 txmsg->dst = mstb;
1754
- len = build_allocate_payload(txmsg, port_num,
1755
- id,
1756
- pbn, port->num_sdp_streams, sinks);
3186
+ build_allocate_payload(txmsg, port_num,
3187
+ id,
3188
+ pbn, port->num_sdp_streams, sinks);
17573189
17583190 drm_dp_queue_down_tx(mgr, txmsg);
17593191
3192
+ /*
3193
+ * FIXME: there is a small chance that between getting the last
3194
+ * connected mstb and sending the payload message, the last connected
3195
+ * mstb could also be removed from the topology. In the future, this
3196
+ * needs to be fixed by restarting the
3197
+ * drm_dp_get_last_connected_port_and_mstb() search in the event of a
3198
+ * timeout if the topology is still connected to the system.
3199
+ */
17603200 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
17613201 if (ret > 0) {
1762
- if (txmsg->reply.reply_type == 1) {
3202
+ if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
17633203 ret = -EINVAL;
1764
- } else
3204
+ else
17653205 ret = 0;
17663206 }
17673207 kfree(txmsg);
17683208 fail_put:
1769
- drm_dp_put_mst_branch_device(mstb);
1770
- drm_dp_put_port(port);
3209
+ drm_dp_mst_topology_put_mstb(mstb);
17713210 return ret;
17723211 }
17733212
....@@ -1775,71 +3214,86 @@
17753214 struct drm_dp_mst_port *port, bool power_up)
17763215 {
17773216 struct drm_dp_sideband_msg_tx *txmsg;
1778
- int len, ret;
3217
+ int ret;
17793218
1780
- port = drm_dp_get_validated_port_ref(mgr, port);
3219
+ port = drm_dp_mst_topology_get_port_validated(mgr, port);
17813220 if (!port)
17823221 return -EINVAL;
17833222
17843223 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
17853224 if (!txmsg) {
1786
- drm_dp_put_port(port);
3225
+ drm_dp_mst_topology_put_port(port);
17873226 return -ENOMEM;
17883227 }
17893228
17903229 txmsg->dst = port->parent;
1791
- len = build_power_updown_phy(txmsg, port->port_num, power_up);
3230
+ build_power_updown_phy(txmsg, port->port_num, power_up);
17923231 drm_dp_queue_down_tx(mgr, txmsg);
17933232
17943233 ret = drm_dp_mst_wait_tx_reply(port->parent, txmsg);
17953234 if (ret > 0) {
1796
- if (txmsg->reply.reply_type == 1)
3235
+ if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
17973236 ret = -EINVAL;
17983237 else
17993238 ret = 0;
18003239 }
18013240 kfree(txmsg);
1802
- drm_dp_put_port(port);
3241
+ drm_dp_mst_topology_put_port(port);
18033242
18043243 return ret;
18053244 }
18063245 EXPORT_SYMBOL(drm_dp_send_power_updown_phy);
18073246
1808
-int drm_dp_mst_get_dsc_info(struct drm_dp_mst_topology_mgr *mgr,
3247
+int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr,
18093248 struct drm_dp_mst_port *port,
1810
- struct drm_dp_mst_dsc_info *dsc_info)
3249
+ struct drm_dp_query_stream_enc_status_ack_reply *status)
18113250 {
1812
- if (!dsc_info)
1813
- return -EINVAL;
3251
+ struct drm_dp_sideband_msg_tx *txmsg;
3252
+ u8 nonce[7];
3253
+ int len, ret;
18143254
1815
- port = drm_dp_get_validated_port_ref(mgr, port);
1816
- if (!port)
1817
- return -EINVAL;
3255
+ txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3256
+ if (!txmsg)
3257
+ return -ENOMEM;
18183258
1819
- memcpy(dsc_info, &port->dsc_info, sizeof(struct drm_dp_mst_dsc_info));
1820
- drm_dp_put_port(port);
3259
+ port = drm_dp_mst_topology_get_port_validated(mgr, port);
3260
+ if (!port) {
3261
+ ret = -EINVAL;
3262
+ goto out_get_port;
3263
+ }
18213264
1822
- return 0;
3265
+ get_random_bytes(nonce, sizeof(nonce));
3266
+
3267
+ /*
3268
+ * "Source device targets the QUERY_STREAM_ENCRYPTION_STATUS message
3269
+ * transaction at the MST Branch device directly connected to the
3270
+ * Source"
3271
+ */
3272
+ txmsg->dst = mgr->mst_primary;
3273
+
3274
+ len = build_query_stream_enc_status(txmsg, port->vcpi.vcpi, nonce);
3275
+
3276
+ drm_dp_queue_down_tx(mgr, txmsg);
3277
+
3278
+ ret = drm_dp_mst_wait_tx_reply(mgr->mst_primary, txmsg);
3279
+ if (ret < 0) {
3280
+ goto out;
3281
+ } else if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
3282
+ drm_dbg_kms(mgr->dev, "query encryption status nak received\n");
3283
+ ret = -ENXIO;
3284
+ goto out;
3285
+ }
3286
+
3287
+ ret = 0;
3288
+ memcpy(status, &txmsg->reply.u.enc_status, sizeof(*status));
3289
+
3290
+out:
3291
+ drm_dp_mst_topology_put_port(port);
3292
+out_get_port:
3293
+ kfree(txmsg);
3294
+ return ret;
18233295 }
1824
-EXPORT_SYMBOL_GPL(drm_dp_mst_get_dsc_info);
1825
-
1826
-int drm_dp_mst_update_dsc_info(struct drm_dp_mst_topology_mgr *mgr,
1827
- struct drm_dp_mst_port *port,
1828
- struct drm_dp_mst_dsc_info *dsc_info)
1829
-{
1830
- if (!dsc_info)
1831
- return -EINVAL;
1832
-
1833
- port = drm_dp_get_validated_port_ref(mgr, port);
1834
- if (!port)
1835
- return -EINVAL;
1836
-
1837
- memcpy(&port->dsc_info, dsc_info, sizeof(struct drm_dp_mst_dsc_info));
1838
- drm_dp_put_port(port);
1839
-
1840
- return 0;
1841
-}
1842
-EXPORT_SYMBOL_GPL(drm_dp_mst_update_dsc_info);
3296
+EXPORT_SYMBOL(drm_dp_send_query_stream_enc_status);
18433297
18443298 static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
18453299 int id,
....@@ -1862,6 +3316,7 @@
18623316 struct drm_dp_payload *payload)
18633317 {
18643318 int ret;
3319
+
18653320 ret = drm_dp_payload_send_msg(mgr, port, id, port->vcpi.pbn);
18663321 if (ret < 0)
18673322 return ret;
....@@ -1875,7 +3330,7 @@
18753330 struct drm_dp_payload *payload)
18763331 {
18773332 DRM_DEBUG_KMS("\n");
1878
- /* its okay for these to fail */
3333
+ /* it's okay for these to fail */
18793334 if (port) {
18803335 drm_dp_payload_send_msg(mgr, port, id, 0);
18813336 }
....@@ -1908,72 +3363,114 @@
19083363 */
19093364 int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
19103365 {
1911
- int i, j;
1912
- int cur_slots = 1;
19133366 struct drm_dp_payload req_payload;
19143367 struct drm_dp_mst_port *port;
3368
+ int i, j;
3369
+ int cur_slots = 1;
3370
+ bool skip;
19153371
19163372 mutex_lock(&mgr->payload_lock);
19173373 for (i = 0; i < mgr->max_payloads; i++) {
3374
+ struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
3375
+ struct drm_dp_payload *payload = &mgr->payloads[i];
3376
+ bool put_port = false;
3377
+
19183378 /* solve the current payloads - compare to the hw ones
19193379 - update the hw view */
19203380 req_payload.start_slot = cur_slots;
1921
- if (mgr->proposed_vcpis[i]) {
1922
- port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
1923
- port = drm_dp_get_validated_port_ref(mgr, port);
1924
- if (!port) {
1925
- mutex_unlock(&mgr->payload_lock);
1926
- return -EINVAL;
3381
+ if (vcpi) {
3382
+ port = container_of(vcpi, struct drm_dp_mst_port,
3383
+ vcpi);
3384
+
3385
+ mutex_lock(&mgr->lock);
3386
+ skip = !drm_dp_mst_port_downstream_of_branch(port, mgr->mst_primary);
3387
+ mutex_unlock(&mgr->lock);
3388
+
3389
+ if (skip) {
3390
+ drm_dbg_kms(mgr->dev,
3391
+ "Virtual channel %d is not in current topology\n",
3392
+ i);
3393
+ continue;
19273394 }
1928
- req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots;
1929
- req_payload.vcpi = mgr->proposed_vcpis[i]->vcpi;
3395
+ /* Validated ports don't matter if we're releasing
3396
+ * VCPI
3397
+ */
3398
+ if (vcpi->num_slots) {
3399
+ port = drm_dp_mst_topology_get_port_validated(
3400
+ mgr, port);
3401
+ if (!port) {
3402
+ if (vcpi->num_slots == payload->num_slots) {
3403
+ cur_slots += vcpi->num_slots;
3404
+ payload->start_slot = req_payload.start_slot;
3405
+ continue;
3406
+ } else {
3407
+ drm_dbg_kms(mgr->dev,
3408
+ "Fail:set payload to invalid sink");
3409
+ mutex_unlock(&mgr->payload_lock);
3410
+ return -EINVAL;
3411
+ }
3412
+ }
3413
+ put_port = true;
3414
+ }
3415
+
3416
+ req_payload.num_slots = vcpi->num_slots;
3417
+ req_payload.vcpi = vcpi->vcpi;
19303418 } else {
19313419 port = NULL;
19323420 req_payload.num_slots = 0;
19333421 }
19343422
1935
- if (mgr->payloads[i].start_slot != req_payload.start_slot) {
1936
- mgr->payloads[i].start_slot = req_payload.start_slot;
1937
- }
3423
+ payload->start_slot = req_payload.start_slot;
19383424 /* work out what is required to happen with this payload */
1939
- if (mgr->payloads[i].num_slots != req_payload.num_slots) {
3425
+ if (payload->num_slots != req_payload.num_slots) {
19403426
19413427 /* need to push an update for this payload */
19423428 if (req_payload.num_slots) {
1943
- drm_dp_create_payload_step1(mgr, mgr->proposed_vcpis[i]->vcpi, &req_payload);
1944
- mgr->payloads[i].num_slots = req_payload.num_slots;
1945
- mgr->payloads[i].vcpi = req_payload.vcpi;
1946
- } else if (mgr->payloads[i].num_slots) {
1947
- mgr->payloads[i].num_slots = 0;
1948
- drm_dp_destroy_payload_step1(mgr, port, mgr->payloads[i].vcpi, &mgr->payloads[i]);
1949
- req_payload.payload_state = mgr->payloads[i].payload_state;
1950
- mgr->payloads[i].start_slot = 0;
3429
+ drm_dp_create_payload_step1(mgr, vcpi->vcpi,
3430
+ &req_payload);
3431
+ payload->num_slots = req_payload.num_slots;
3432
+ payload->vcpi = req_payload.vcpi;
3433
+
3434
+ } else if (payload->num_slots) {
3435
+ payload->num_slots = 0;
3436
+ drm_dp_destroy_payload_step1(mgr, port,
3437
+ payload->vcpi,
3438
+ payload);
3439
+ req_payload.payload_state =
3440
+ payload->payload_state;
3441
+ payload->start_slot = 0;
19513442 }
1952
- mgr->payloads[i].payload_state = req_payload.payload_state;
3443
+ payload->payload_state = req_payload.payload_state;
19533444 }
19543445 cur_slots += req_payload.num_slots;
19553446
1956
- if (port)
1957
- drm_dp_put_port(port);
3447
+ if (put_port)
3448
+ drm_dp_mst_topology_put_port(port);
19583449 }
19593450
1960
- for (i = 0; i < mgr->max_payloads; i++) {
1961
- if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
1962
- DRM_DEBUG_KMS("removing payload %d\n", i);
1963
- for (j = i; j < mgr->max_payloads - 1; j++) {
1964
- memcpy(&mgr->payloads[j], &mgr->payloads[j + 1], sizeof(struct drm_dp_payload));
1965
- mgr->proposed_vcpis[j] = mgr->proposed_vcpis[j + 1];
1966
- if (mgr->proposed_vcpis[j] && mgr->proposed_vcpis[j]->num_slots) {
1967
- set_bit(j + 1, &mgr->payload_mask);
1968
- } else {
1969
- clear_bit(j + 1, &mgr->payload_mask);
1970
- }
1971
- }
1972
- memset(&mgr->payloads[mgr->max_payloads - 1], 0, sizeof(struct drm_dp_payload));
1973
- mgr->proposed_vcpis[mgr->max_payloads - 1] = NULL;
1974
- clear_bit(mgr->max_payloads, &mgr->payload_mask);
1975
-
3451
+ for (i = 0; i < mgr->max_payloads; /* do nothing */) {
3452
+ if (mgr->payloads[i].payload_state != DP_PAYLOAD_DELETE_LOCAL) {
3453
+ i++;
3454
+ continue;
19763455 }
3456
+
3457
+ DRM_DEBUG_KMS("removing payload %d\n", i);
3458
+ for (j = i; j < mgr->max_payloads - 1; j++) {
3459
+ mgr->payloads[j] = mgr->payloads[j + 1];
3460
+ mgr->proposed_vcpis[j] = mgr->proposed_vcpis[j + 1];
3461
+
3462
+ if (mgr->proposed_vcpis[j] &&
3463
+ mgr->proposed_vcpis[j]->num_slots) {
3464
+ set_bit(j + 1, &mgr->payload_mask);
3465
+ } else {
3466
+ clear_bit(j + 1, &mgr->payload_mask);
3467
+ }
3468
+ }
3469
+
3470
+ memset(&mgr->payloads[mgr->max_payloads - 1], 0,
3471
+ sizeof(struct drm_dp_payload));
3472
+ mgr->proposed_vcpis[mgr->max_payloads - 1] = NULL;
3473
+ clear_bit(mgr->max_payloads, &mgr->payload_mask);
19773474 }
19783475 mutex_unlock(&mgr->payload_lock);
19793476
....@@ -1995,6 +3492,8 @@
19953492 struct drm_dp_mst_port *port;
19963493 int i;
19973494 int ret = 0;
3495
+ bool skip;
3496
+
19983497 mutex_lock(&mgr->payload_lock);
19993498 for (i = 0; i < mgr->max_payloads; i++) {
20003499
....@@ -2002,6 +3501,13 @@
20023501 continue;
20033502
20043503 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
3504
+
3505
+ mutex_lock(&mgr->lock);
3506
+ skip = !drm_dp_mst_port_downstream_of_branch(port, mgr->mst_primary);
3507
+ mutex_unlock(&mgr->lock);
3508
+
3509
+ if (skip)
3510
+ continue;
20053511
20063512 DRM_DEBUG_KMS("payload %d %d\n", i, mgr->payloads[i].payload_state);
20073513 if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) {
....@@ -2019,36 +3525,15 @@
20193525 }
20203526 EXPORT_SYMBOL(drm_dp_update_payload_part2);
20213527
2022
-int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
2023
- struct drm_dp_mst_port *port,
2024
- int offset, int size, u8 *bytes)
3528
+static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
3529
+ struct drm_dp_mst_port *port,
3530
+ int offset, int size, u8 *bytes)
20253531 {
2026
- int len;
2027
- struct drm_dp_sideband_msg_tx *txmsg;
2028
-
2029
- txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2030
- if (!txmsg)
2031
- return -ENOMEM;
2032
-
2033
- len = build_dpcd_read(txmsg, port->port_num, offset, size);
2034
- txmsg->dst = port->parent;
2035
-
2036
- drm_dp_queue_down_tx(mgr, txmsg);
2037
-
2038
- return 0;
2039
-}
2040
-EXPORT_SYMBOL_GPL(drm_dp_send_dpcd_read);
2041
-
2042
-int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
2043
- struct drm_dp_mst_port *port,
2044
- int offset, int size, u8 *bytes)
2045
-{
2046
- int len;
2047
- int ret;
3532
+ int ret = 0;
20483533 struct drm_dp_sideband_msg_tx *txmsg;
20493534 struct drm_dp_mst_branch *mstb;
20503535
2051
- mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
3536
+ mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
20523537 if (!mstb)
20533538 return -EINVAL;
20543539
....@@ -2058,45 +3543,82 @@
20583543 goto fail_put;
20593544 }
20603545
2061
- len = build_dpcd_write(txmsg, port->port_num, offset, size, bytes);
3546
+ build_dpcd_read(txmsg, port->port_num, offset, size);
3547
+ txmsg->dst = port->parent;
3548
+
3549
+ drm_dp_queue_down_tx(mgr, txmsg);
3550
+
3551
+ ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3552
+ if (ret < 0)
3553
+ goto fail_free;
3554
+
3555
+ /* DPCD read should never be NACKed */
3556
+ if (txmsg->reply.reply_type == 1) {
3557
+ DRM_ERROR("mstb %p port %d: DPCD read on addr 0x%x for %d bytes NAKed\n",
3558
+ mstb, port->port_num, offset, size);
3559
+ ret = -EIO;
3560
+ goto fail_free;
3561
+ }
3562
+
3563
+ if (txmsg->reply.u.remote_dpcd_read_ack.num_bytes != size) {
3564
+ ret = -EPROTO;
3565
+ goto fail_free;
3566
+ }
3567
+
3568
+ ret = min_t(size_t, txmsg->reply.u.remote_dpcd_read_ack.num_bytes,
3569
+ size);
3570
+ memcpy(bytes, txmsg->reply.u.remote_dpcd_read_ack.bytes, ret);
3571
+
3572
+fail_free:
3573
+ kfree(txmsg);
3574
+fail_put:
3575
+ drm_dp_mst_topology_put_mstb(mstb);
3576
+
3577
+ return ret;
3578
+}
3579
+
3580
+static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
3581
+ struct drm_dp_mst_port *port,
3582
+ int offset, int size, u8 *bytes)
3583
+{
3584
+ int ret;
3585
+ struct drm_dp_sideband_msg_tx *txmsg;
3586
+ struct drm_dp_mst_branch *mstb;
3587
+
3588
+ mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
3589
+ if (!mstb)
3590
+ return -EINVAL;
3591
+
3592
+ txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3593
+ if (!txmsg) {
3594
+ ret = -ENOMEM;
3595
+ goto fail_put;
3596
+ }
3597
+
3598
+ build_dpcd_write(txmsg, port->port_num, offset, size, bytes);
20623599 txmsg->dst = mstb;
20633600
20643601 drm_dp_queue_down_tx(mgr, txmsg);
20653602
20663603 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
20673604 if (ret > 0) {
2068
- if (txmsg->reply.reply_type == 1) {
2069
- ret = -EINVAL;
2070
- } else
2071
- ret = 0;
3605
+ if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
3606
+ ret = -EIO;
3607
+ else
3608
+ ret = size;
20723609 }
3610
+
20733611 kfree(txmsg);
20743612 fail_put:
2075
- drm_dp_put_mst_branch_device(mstb);
3613
+ drm_dp_mst_topology_put_mstb(mstb);
20763614 return ret;
20773615 }
2078
-EXPORT_SYMBOL_GPL(drm_dp_send_dpcd_write);
2079
-
2080
-int drm_dp_mst_get_max_sdp_streams_supported(
2081
- struct drm_dp_mst_topology_mgr *mgr,
2082
- struct drm_dp_mst_port *port)
2083
-{
2084
- int ret = -1;
2085
-
2086
- port = drm_dp_get_validated_port_ref(mgr, port);
2087
- if (!port)
2088
- return ret;
2089
- ret = port->num_sdp_streams;
2090
- drm_dp_put_port(port);
2091
- return ret;
2092
-}
2093
-EXPORT_SYMBOL_GPL(drm_dp_mst_get_max_sdp_streams_supported);
20943616
20953617 static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type)
20963618 {
20973619 struct drm_dp_sideband_msg_reply_body reply;
20983620
2099
- reply.reply_type = 0;
3621
+ reply.reply_type = DP_SIDEBAND_REPLY_ACK;
21003622 reply.req_type = req_type;
21013623 drm_dp_encode_sideband_reply(&reply, msg);
21023624 return 0;
....@@ -2104,7 +3626,7 @@
21043626
21053627 static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
21063628 struct drm_dp_mst_branch *mstb,
2107
- int req_type, int seqno, bool broadcast)
3629
+ int req_type, bool broadcast)
21083630 {
21093631 struct drm_dp_sideband_msg_tx *txmsg;
21103632
....@@ -2113,44 +3635,59 @@
21133635 return -ENOMEM;
21143636
21153637 txmsg->dst = mstb;
2116
- txmsg->seqno = seqno;
21173638 drm_dp_encode_up_ack_reply(txmsg, req_type);
21183639
21193640 mutex_lock(&mgr->qlock);
2120
-
2121
- process_single_up_tx_qlock(mgr, txmsg);
2122
-
3641
+ /* construct a chunk from the first msg in the tx_msg queue */
3642
+ process_single_tx_qlock(mgr, txmsg, true);
21233643 mutex_unlock(&mgr->qlock);
21243644
21253645 kfree(txmsg);
21263646 return 0;
21273647 }
21283648
2129
-static bool drm_dp_get_vc_payload_bw(int dp_link_bw,
2130
- int dp_link_count,
2131
- int *out)
3649
+/**
3650
+ * drm_dp_get_vc_payload_bw - get the VC payload BW for an MST link
3651
+ * @link_rate: link rate in 10kbits/s units
3652
+ * @link_lane_count: lane count
3653
+ *
3654
+ * Calculate the total bandwidth of a MultiStream Transport link. The returned
3655
+ * value is in units of PBNs/(timeslots/1 MTP). This value can be used to
3656
+ * convert the number of PBNs required for a given stream to the number of
3657
+ * timeslots this stream requires in each MTP.
3658
+ */
3659
+int drm_dp_get_vc_payload_bw(int link_rate, int link_lane_count)
21323660 {
2133
- switch (dp_link_bw) {
2134
- default:
2135
- DRM_DEBUG_KMS("invalid link bandwidth in DPCD: %x (link count: %d)\n",
2136
- dp_link_bw, dp_link_count);
3661
+ if (link_rate == 0 || link_lane_count == 0)
3662
+ DRM_DEBUG_KMS("invalid link rate/lane count: (%d / %d)\n",
3663
+ link_rate, link_lane_count);
3664
+
3665
+ /* See DP v2.0 2.6.4.2, VCPayload_Bandwidth_for_OneTimeSlotPer_MTP_Allocation */
3666
+ return link_rate * link_lane_count / 54000;
3667
+}
3668
+EXPORT_SYMBOL(drm_dp_get_vc_payload_bw);
3669
+
3670
+/**
3671
+ * drm_dp_read_mst_cap() - check whether or not a sink supports MST
3672
+ * @aux: The DP AUX channel to use
3673
+ * @dpcd: A cached copy of the DPCD capabilities for this sink
3674
+ *
3675
+ * Returns: %True if the sink supports MST, %false otherwise
3676
+ */
3677
+bool drm_dp_read_mst_cap(struct drm_dp_aux *aux,
3678
+ const u8 dpcd[DP_RECEIVER_CAP_SIZE])
3679
+{
3680
+ u8 mstm_cap;
3681
+
3682
+ if (dpcd[DP_DPCD_REV] < DP_DPCD_REV_12)
21373683 return false;
21383684
2139
- case DP_LINK_BW_1_62:
2140
- *out = 3 * dp_link_count;
2141
- break;
2142
- case DP_LINK_BW_2_7:
2143
- *out = 5 * dp_link_count;
2144
- break;
2145
- case DP_LINK_BW_5_4:
2146
- *out = 10 * dp_link_count;
2147
- break;
2148
- case DP_LINK_BW_8_1:
2149
- *out = 15 * dp_link_count;
2150
- break;
2151
- }
2152
- return true;
3685
+ if (drm_dp_dpcd_readb(aux, DP_MSTM_CAP, &mstm_cap) != 1)
3686
+ return false;
3687
+
3688
+ return mstm_cap & DP_MST_CAP;
21533689 }
3690
+EXPORT_SYMBOL(drm_dp_read_mst_cap);
21543691
21553692 /**
21563693 * drm_dp_mst_topology_mgr_set_mst() - Set the MST state for a topology manager
....@@ -2173,6 +3710,8 @@
21733710 mgr->mst_state = mst_state;
21743711 /* set the device into MST mode */
21753712 if (mst_state) {
3713
+ struct drm_dp_payload reset_pay;
3714
+
21763715 WARN_ON(mgr->mst_primary);
21773716
21783717 /* get dpcd info */
....@@ -2182,9 +3721,9 @@
21823721 goto out_unlock;
21833722 }
21843723
2185
- if (!drm_dp_get_vc_payload_bw(mgr->dpcd[1],
2186
- mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK,
2187
- &mgr->pbn_div)) {
3724
+ mgr->pbn_div = drm_dp_get_vc_payload_bw(drm_dp_bw_code_to_link_rate(mgr->dpcd[1]),
3725
+ mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK);
3726
+ if (mgr->pbn_div == 0) {
21883727 ret = -EINVAL;
21893728 goto out_unlock;
21903729 }
....@@ -2199,20 +3738,18 @@
21993738
22003739 /* give this the main reference */
22013740 mgr->mst_primary = mstb;
2202
- kref_get(&mgr->mst_primary->kref);
3741
+ drm_dp_mst_topology_get_mstb(mgr->mst_primary);
22033742
22043743 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
2205
- DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
2206
- if (ret < 0) {
3744
+ DP_MST_EN |
3745
+ DP_UP_REQ_EN |
3746
+ DP_UPSTREAM_IS_SRC);
3747
+ if (ret < 0)
22073748 goto out_unlock;
2208
- }
22093749
2210
- {
2211
- struct drm_dp_payload reset_pay;
2212
- reset_pay.start_slot = 0;
2213
- reset_pay.num_slots = 0x3f;
2214
- drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
2215
- }
3750
+ reset_pay.start_slot = 0;
3751
+ reset_pay.num_slots = 0x3f;
3752
+ drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
22163753
22173754 queue_work(system_long_wq, &mgr->work);
22183755
....@@ -2231,17 +3768,31 @@
22313768 mgr->payload_mask = 0;
22323769 set_bit(0, &mgr->payload_mask);
22333770 mgr->vcpi_mask = 0;
3771
+ mgr->payload_id_table_cleared = false;
22343772 }
22353773
22363774 out_unlock:
22373775 mutex_unlock(&mgr->lock);
22383776 mutex_unlock(&mgr->payload_lock);
22393777 if (mstb)
2240
- drm_dp_put_mst_branch_device(mstb);
3778
+ drm_dp_mst_topology_put_mstb(mstb);
22413779 return ret;
22423780
22433781 }
22443782 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst);
3783
+
3784
+static void
3785
+drm_dp_mst_topology_mgr_invalidate_mstb(struct drm_dp_mst_branch *mstb)
3786
+{
3787
+ struct drm_dp_mst_port *port;
3788
+
3789
+ /* The link address will need to be re-sent on resume */
3790
+ mstb->link_address_sent = false;
3791
+
3792
+ list_for_each_entry(port, &mstb->ports, next)
3793
+ if (port->mstb)
3794
+ drm_dp_mst_topology_mgr_invalidate_mstb(port->mstb);
3795
+}
22453796
22463797 /**
22473798 * drm_dp_mst_topology_mgr_suspend() - suspend the MST manager
....@@ -2256,91 +3807,154 @@
22563807 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
22573808 DP_MST_EN | DP_UPSTREAM_IS_SRC);
22583809 mutex_unlock(&mgr->lock);
3810
+ flush_work(&mgr->up_req_work);
22593811 flush_work(&mgr->work);
2260
- flush_work(&mgr->destroy_connector_work);
3812
+ flush_work(&mgr->delayed_destroy_work);
3813
+
3814
+ mutex_lock(&mgr->lock);
3815
+ if (mgr->mst_state && mgr->mst_primary)
3816
+ drm_dp_mst_topology_mgr_invalidate_mstb(mgr->mst_primary);
3817
+ mutex_unlock(&mgr->lock);
22613818 }
22623819 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
22633820
22643821 /**
22653822 * drm_dp_mst_topology_mgr_resume() - resume the MST manager
22663823 * @mgr: manager to resume
3824
+ * @sync: whether or not to perform topology reprobing synchronously
22673825 *
22683826 * This will fetch DPCD and see if the device is still there,
22693827 * if it is, it will rewrite the MSTM control bits, and return.
22703828 *
2271
- * if the device fails this returns -1, and the driver should do
3829
+ * If the device fails this returns -1, and the driver should do
22723830 * a full MST reprobe, in case we were undocked.
3831
+ *
3832
+ * During system resume (where it is assumed that the driver will be calling
3833
+ * drm_atomic_helper_resume()) this function should be called beforehand with
3834
+ * @sync set to true. In contexts like runtime resume where the driver is not
3835
+ * expected to be calling drm_atomic_helper_resume(), this function should be
3836
+ * called with @sync set to false in order to avoid deadlocking.
3837
+ *
3838
+ * Returns: -1 if the MST topology was removed while we were suspended, 0
3839
+ * otherwise.
22733840 */
2274
-int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
3841
+int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
3842
+ bool sync)
22753843 {
2276
- int ret = 0;
3844
+ int ret;
3845
+ u8 guid[16];
22773846
22783847 mutex_lock(&mgr->lock);
3848
+ if (!mgr->mst_primary)
3849
+ goto out_fail;
22793850
2280
- if (mgr->mst_primary) {
2281
- int sret;
2282
- u8 guid[16];
3851
+ ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd,
3852
+ DP_RECEIVER_CAP_SIZE);
3853
+ if (ret != DP_RECEIVER_CAP_SIZE) {
3854
+ DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
3855
+ goto out_fail;
3856
+ }
22833857
2284
- sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
2285
- if (sret != DP_RECEIVER_CAP_SIZE) {
2286
- DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
2287
- ret = -1;
2288
- goto out_unlock;
2289
- }
3858
+ ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
3859
+ DP_MST_EN |
3860
+ DP_UP_REQ_EN |
3861
+ DP_UPSTREAM_IS_SRC);
3862
+ if (ret < 0) {
3863
+ DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n");
3864
+ goto out_fail;
3865
+ }
22903866
2291
- ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
2292
- DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
2293
- if (ret < 0) {
2294
- DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n");
2295
- ret = -1;
2296
- goto out_unlock;
2297
- }
3867
+ /* Some hubs forget their guids after they resume */
3868
+ ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
3869
+ if (ret != 16) {
3870
+ DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
3871
+ goto out_fail;
3872
+ }
22983873
2299
- /* Some hubs forget their guids after they resume */
2300
- sret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
2301
- if (sret != 16) {
2302
- DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
2303
- ret = -1;
2304
- goto out_unlock;
2305
- }
2306
- drm_dp_check_mstb_guid(mgr->mst_primary, guid);
3874
+ ret = drm_dp_check_mstb_guid(mgr->mst_primary, guid);
3875
+ if (ret) {
3876
+ DRM_DEBUG_KMS("check mstb failed - undocked during suspend?\n");
3877
+ goto out_fail;
3878
+ }
23073879
2308
- ret = 0;
2309
- } else
2310
- ret = -1;
2311
-
2312
-out_unlock:
3880
+ /*
3881
+ * For the final step of resuming the topology, we need to bring the
3882
+ * state of our in-memory topology back into sync with reality. So,
3883
+ * restart the probing process as if we're probing a new hub
3884
+ */
3885
+ queue_work(system_long_wq, &mgr->work);
23133886 mutex_unlock(&mgr->lock);
2314
- return ret;
3887
+
3888
+ if (sync) {
3889
+ DRM_DEBUG_KMS("Waiting for link probe work to finish re-syncing topology...\n");
3890
+ flush_work(&mgr->work);
3891
+ }
3892
+
3893
+ return 0;
3894
+
3895
+out_fail:
3896
+ mutex_unlock(&mgr->lock);
3897
+ return -1;
23153898 }
23163899 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
23173900
2318
-static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
3901
+static bool
3902
+drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up,
3903
+ struct drm_dp_mst_branch **mstb)
23193904 {
23203905 int len;
23213906 u8 replyblock[32];
2322
- int replylen, origlen, curreply;
3907
+ int replylen, curreply;
23233908 int ret;
2324
- struct drm_dp_sideband_msg_rx *msg;
2325
- int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE : DP_SIDEBAND_MSG_DOWN_REP_BASE;
2326
- msg = up ? &mgr->up_req_recv : &mgr->down_rep_recv;
3909
+ u8 hdrlen;
3910
+ struct drm_dp_sideband_msg_hdr hdr;
3911
+ struct drm_dp_sideband_msg_rx *msg =
3912
+ up ? &mgr->up_req_recv : &mgr->down_rep_recv;
3913
+ int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE :
3914
+ DP_SIDEBAND_MSG_DOWN_REP_BASE;
3915
+
3916
+ if (!up)
3917
+ *mstb = NULL;
23273918
23283919 len = min(mgr->max_dpcd_transaction_bytes, 16);
2329
- ret = drm_dp_dpcd_read(mgr->aux, basereg,
2330
- replyblock, len);
3920
+ ret = drm_dp_dpcd_read(mgr->aux, basereg, replyblock, len);
23313921 if (ret != len) {
23323922 DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret);
23333923 return false;
23343924 }
2335
- ret = drm_dp_sideband_msg_build(msg, replyblock, len, true);
3925
+
3926
+ ret = drm_dp_decode_sideband_msg_hdr(&hdr, replyblock, len, &hdrlen);
3927
+ if (ret == false) {
3928
+ print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16,
3929
+ 1, replyblock, len, false);
3930
+ DRM_DEBUG_KMS("ERROR: failed header\n");
3931
+ return false;
3932
+ }
3933
+
3934
+ if (!up) {
3935
+ /* Caller is responsible for giving back this reference */
3936
+ *mstb = drm_dp_get_mst_branch_device(mgr, hdr.lct, hdr.rad);
3937
+ if (!*mstb) {
3938
+ DRM_DEBUG_KMS("Got MST reply from unknown device %d\n",
3939
+ hdr.lct);
3940
+ return false;
3941
+ }
3942
+ }
3943
+
3944
+ if (!drm_dp_sideband_msg_set_header(msg, &hdr, hdrlen)) {
3945
+ DRM_DEBUG_KMS("sideband msg set header failed %d\n",
3946
+ replyblock[0]);
3947
+ return false;
3948
+ }
3949
+
3950
+ replylen = min(msg->curchunk_len, (u8)(len - hdrlen));
3951
+ ret = drm_dp_sideband_append_payload(msg, replyblock + hdrlen, replylen);
23363952 if (!ret) {
23373953 DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]);
23383954 return false;
23393955 }
2340
- replylen = msg->curchunk_len + msg->curchunk_hdrlen;
23413956
2342
- origlen = replylen;
2343
- replylen -= len;
3957
+ replylen = msg->curchunk_len + msg->curchunk_hdrlen - len;
23443958 curreply = len;
23453959 while (replylen > 0) {
23463960 len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16);
....@@ -2352,7 +3966,7 @@
23523966 return false;
23533967 }
23543968
2355
- ret = drm_dp_sideband_msg_build(msg, replyblock, len, false);
3969
+ ret = drm_dp_sideband_append_payload(msg, replyblock, len);
23563970 if (!ret) {
23573971 DRM_DEBUG_KMS("failed to build sideband msg\n");
23583972 return false;
....@@ -2366,131 +3980,194 @@
23663980
23673981 static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
23683982 {
2369
- int ret = 0;
3983
+ struct drm_dp_sideband_msg_tx *txmsg;
3984
+ struct drm_dp_mst_branch *mstb = NULL;
3985
+ struct drm_dp_sideband_msg_rx *msg = &mgr->down_rep_recv;
23703986
2371
- if (!drm_dp_get_one_sb_msg(mgr, false)) {
2372
- memset(&mgr->down_rep_recv, 0,
2373
- sizeof(struct drm_dp_sideband_msg_rx));
2374
- return 0;
3987
+ if (!drm_dp_get_one_sb_msg(mgr, false, &mstb))
3988
+ goto out;
3989
+
3990
+ /* Multi-packet message transmission, don't clear the reply */
3991
+ if (!msg->have_eomt)
3992
+ goto out;
3993
+
3994
+ /* find the message */
3995
+ mutex_lock(&mgr->qlock);
3996
+ txmsg = list_first_entry_or_null(&mgr->tx_msg_downq,
3997
+ struct drm_dp_sideband_msg_tx, next);
3998
+ mutex_unlock(&mgr->qlock);
3999
+
4000
+ /* Were we actually expecting a response, and from this mstb? */
4001
+ if (!txmsg || txmsg->dst != mstb) {
4002
+ struct drm_dp_sideband_msg_hdr *hdr;
4003
+
4004
+ hdr = &msg->initial_hdr;
4005
+ DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n",
4006
+ mstb, hdr->seqno, hdr->lct, hdr->rad[0],
4007
+ msg->msg[0]);
4008
+ goto out_clear_reply;
23754009 }
23764010
2377
- if (mgr->down_rep_recv.have_eomt) {
2378
- struct drm_dp_sideband_msg_tx *txmsg;
2379
- struct drm_dp_mst_branch *mstb;
2380
- int slot = -1;
2381
- mstb = drm_dp_get_mst_branch_device(mgr,
2382
- mgr->down_rep_recv.initial_hdr.lct,
2383
- mgr->down_rep_recv.initial_hdr.rad);
4011
+ drm_dp_sideband_parse_reply(msg, &txmsg->reply);
23844012
2385
- if (!mstb) {
2386
- DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->down_rep_recv.initial_hdr.lct);
2387
- memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2388
- return 0;
2389
- }
2390
-
2391
- /* find the message */
2392
- slot = mgr->down_rep_recv.initial_hdr.seqno;
2393
- mutex_lock(&mgr->qlock);
2394
- txmsg = mstb->tx_slots[slot];
2395
- /* remove from slots */
2396
- mutex_unlock(&mgr->qlock);
2397
-
2398
- if (!txmsg) {
2399
- DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n",
2400
- mstb,
2401
- mgr->down_rep_recv.initial_hdr.seqno,
2402
- mgr->down_rep_recv.initial_hdr.lct,
2403
- mgr->down_rep_recv.initial_hdr.rad[0],
2404
- mgr->down_rep_recv.msg[0]);
2405
- drm_dp_put_mst_branch_device(mstb);
2406
- memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2407
- return 0;
2408
- }
2409
-
2410
- drm_dp_sideband_parse_reply(&mgr->down_rep_recv, &txmsg->reply);
2411
- if (txmsg->reply.reply_type == 1) {
2412
- DRM_DEBUG_KMS("Got NAK reply: req 0x%02x, reason 0x%02x, nak data 0x%02x\n", txmsg->reply.req_type, txmsg->reply.u.nak.reason, txmsg->reply.u.nak.nak_data);
2413
- }
2414
-
2415
- memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2416
- drm_dp_put_mst_branch_device(mstb);
2417
-
2418
- mutex_lock(&mgr->qlock);
2419
- txmsg->state = DRM_DP_SIDEBAND_TX_RX;
2420
- mstb->tx_slots[slot] = NULL;
2421
- mutex_unlock(&mgr->qlock);
2422
-
2423
- wake_up_all(&mgr->tx_waitq);
4013
+ if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
4014
+ DRM_DEBUG_KMS("Got NAK reply: req 0x%02x (%s), reason 0x%02x (%s), nak data 0x%02x\n",
4015
+ txmsg->reply.req_type,
4016
+ drm_dp_mst_req_type_str(txmsg->reply.req_type),
4017
+ txmsg->reply.u.nak.reason,
4018
+ drm_dp_mst_nak_reason_str(txmsg->reply.u.nak.reason),
4019
+ txmsg->reply.u.nak.nak_data);
24244020 }
2425
- return ret;
4021
+
4022
+ memset(msg, 0, sizeof(struct drm_dp_sideband_msg_rx));
4023
+ drm_dp_mst_topology_put_mstb(mstb);
4024
+
4025
+ mutex_lock(&mgr->qlock);
4026
+ txmsg->state = DRM_DP_SIDEBAND_TX_RX;
4027
+ list_del(&txmsg->next);
4028
+ mutex_unlock(&mgr->qlock);
4029
+
4030
+ wake_up_all(&mgr->tx_waitq);
4031
+
4032
+ return 0;
4033
+
4034
+out_clear_reply:
4035
+ memset(msg, 0, sizeof(struct drm_dp_sideband_msg_rx));
4036
+out:
4037
+ if (mstb)
4038
+ drm_dp_mst_topology_put_mstb(mstb);
4039
+
4040
+ return 0;
4041
+}
4042
+
4043
+static inline bool
4044
+drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr,
4045
+ struct drm_dp_pending_up_req *up_req)
4046
+{
4047
+ struct drm_dp_mst_branch *mstb = NULL;
4048
+ struct drm_dp_sideband_msg_req_body *msg = &up_req->msg;
4049
+ struct drm_dp_sideband_msg_hdr *hdr = &up_req->hdr;
4050
+ bool hotplug = false;
4051
+
4052
+ if (hdr->broadcast) {
4053
+ const u8 *guid = NULL;
4054
+
4055
+ if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY)
4056
+ guid = msg->u.conn_stat.guid;
4057
+ else if (msg->req_type == DP_RESOURCE_STATUS_NOTIFY)
4058
+ guid = msg->u.resource_stat.guid;
4059
+
4060
+ if (guid)
4061
+ mstb = drm_dp_get_mst_branch_device_by_guid(mgr, guid);
4062
+ } else {
4063
+ mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad);
4064
+ }
4065
+
4066
+ if (!mstb) {
4067
+ DRM_DEBUG_KMS("Got MST reply from unknown device %d\n",
4068
+ hdr->lct);
4069
+ return false;
4070
+ }
4071
+
4072
+ /* TODO: Add missing handler for DP_RESOURCE_STATUS_NOTIFY events */
4073
+ if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY) {
4074
+ drm_dp_mst_handle_conn_stat(mstb, &msg->u.conn_stat);
4075
+ hotplug = true;
4076
+ }
4077
+
4078
+ drm_dp_mst_topology_put_mstb(mstb);
4079
+ return hotplug;
4080
+}
4081
+
4082
+static void drm_dp_mst_up_req_work(struct work_struct *work)
4083
+{
4084
+ struct drm_dp_mst_topology_mgr *mgr =
4085
+ container_of(work, struct drm_dp_mst_topology_mgr,
4086
+ up_req_work);
4087
+ struct drm_dp_pending_up_req *up_req;
4088
+ bool send_hotplug = false;
4089
+
4090
+ mutex_lock(&mgr->probe_lock);
4091
+ while (true) {
4092
+ mutex_lock(&mgr->up_req_lock);
4093
+ up_req = list_first_entry_or_null(&mgr->up_req_list,
4094
+ struct drm_dp_pending_up_req,
4095
+ next);
4096
+ if (up_req)
4097
+ list_del(&up_req->next);
4098
+ mutex_unlock(&mgr->up_req_lock);
4099
+
4100
+ if (!up_req)
4101
+ break;
4102
+
4103
+ send_hotplug |= drm_dp_mst_process_up_req(mgr, up_req);
4104
+ kfree(up_req);
4105
+ }
4106
+ mutex_unlock(&mgr->probe_lock);
4107
+
4108
+ if (send_hotplug)
4109
+ drm_kms_helper_hotplug_event(mgr->dev);
24264110 }
24274111
24284112 static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
24294113 {
2430
- int ret = 0;
4114
+ struct drm_dp_pending_up_req *up_req;
24314115
2432
- if (!drm_dp_get_one_sb_msg(mgr, true)) {
2433
- memset(&mgr->up_req_recv, 0,
2434
- sizeof(struct drm_dp_sideband_msg_rx));
4116
+ if (!drm_dp_get_one_sb_msg(mgr, true, NULL))
4117
+ goto out;
4118
+
4119
+ if (!mgr->up_req_recv.have_eomt)
24354120 return 0;
4121
+
4122
+ up_req = kzalloc(sizeof(*up_req), GFP_KERNEL);
4123
+ if (!up_req) {
4124
+ DRM_ERROR("Not enough memory to process MST up req\n");
4125
+ return -ENOMEM;
4126
+ }
4127
+ INIT_LIST_HEAD(&up_req->next);
4128
+
4129
+ drm_dp_sideband_parse_req(&mgr->up_req_recv, &up_req->msg);
4130
+
4131
+ if (up_req->msg.req_type != DP_CONNECTION_STATUS_NOTIFY &&
4132
+ up_req->msg.req_type != DP_RESOURCE_STATUS_NOTIFY) {
4133
+ DRM_DEBUG_KMS("Received unknown up req type, ignoring: %x\n",
4134
+ up_req->msg.req_type);
4135
+ kfree(up_req);
4136
+ goto out;
24364137 }
24374138
2438
- if (mgr->up_req_recv.have_eomt) {
2439
- struct drm_dp_sideband_msg_req_body msg;
2440
- struct drm_dp_mst_branch *mstb = NULL;
2441
- bool seqno;
4139
+ drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, up_req->msg.req_type,
4140
+ false);
24424141
2443
- if (!mgr->up_req_recv.initial_hdr.broadcast) {
2444
- mstb = drm_dp_get_mst_branch_device(mgr,
2445
- mgr->up_req_recv.initial_hdr.lct,
2446
- mgr->up_req_recv.initial_hdr.rad);
2447
- if (!mstb) {
2448
- DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
2449
- memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2450
- return 0;
2451
- }
2452
- }
4142
+ if (up_req->msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
4143
+ const struct drm_dp_connection_status_notify *conn_stat =
4144
+ &up_req->msg.u.conn_stat;
24534145
2454
- seqno = mgr->up_req_recv.initial_hdr.seqno;
2455
- drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg);
4146
+ DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n",
4147
+ conn_stat->port_number,
4148
+ conn_stat->legacy_device_plug_status,
4149
+ conn_stat->displayport_device_plug_status,
4150
+ conn_stat->message_capability_status,
4151
+ conn_stat->input_port,
4152
+ conn_stat->peer_device_type);
4153
+ } else if (up_req->msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
4154
+ const struct drm_dp_resource_status_notify *res_stat =
4155
+ &up_req->msg.u.resource_stat;
24564156
2457
- if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
2458
- drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
2459
-
2460
- if (!mstb)
2461
- mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.conn_stat.guid);
2462
-
2463
- if (!mstb) {
2464
- DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
2465
- memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2466
- return 0;
2467
- }
2468
-
2469
- drm_dp_update_port(mstb, &msg.u.conn_stat);
2470
-
2471
- DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type);
2472
- (*mgr->cbs->hotplug)(mgr);
2473
-
2474
- } else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
2475
- drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
2476
- if (!mstb)
2477
- mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.resource_stat.guid);
2478
-
2479
- if (!mstb) {
2480
- DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
2481
- memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2482
- return 0;
2483
- }
2484
-
2485
- DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn);
2486
- }
2487
-
2488
- if (mstb)
2489
- drm_dp_put_mst_branch_device(mstb);
2490
-
2491
- memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
4157
+ DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n",
4158
+ res_stat->port_number,
4159
+ res_stat->available_pbn);
24924160 }
2493
- return ret;
4161
+
4162
+ up_req->hdr = mgr->up_req_recv.initial_hdr;
4163
+ mutex_lock(&mgr->up_req_lock);
4164
+ list_add_tail(&up_req->next, &mgr->up_req_list);
4165
+ mutex_unlock(&mgr->up_req_lock);
4166
+ queue_work(system_long_wq, &mgr->up_req_work);
4167
+
4168
+out:
4169
+ memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
4170
+ return 0;
24944171 }
24954172
24964173 /**
....@@ -2534,32 +4211,44 @@
25344211 /**
25354212 * drm_dp_mst_detect_port() - get connection status for an MST port
25364213 * @connector: DRM connector for this port
4214
+ * @ctx: The acquisition context to use for grabbing locks
25374215 * @mgr: manager for this port
2538
- * @port: unverified pointer to a port
4216
+ * @port: pointer to a port
25394217 *
2540
- * This returns the current connection state for a port. It validates the
2541
- * port pointer still exists so the caller doesn't require a reference
4218
+ * This returns the current connection state for a port.
25424219 */
2543
-enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector,
2544
- struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
4220
+int
4221
+drm_dp_mst_detect_port(struct drm_connector *connector,
4222
+ struct drm_modeset_acquire_ctx *ctx,
4223
+ struct drm_dp_mst_topology_mgr *mgr,
4224
+ struct drm_dp_mst_port *port)
25454225 {
2546
- enum drm_connector_status status = connector_status_disconnected;
4226
+ int ret;
25474227
2548
- /* we need to search for the port in the mgr in case its gone */
2549
- port = drm_dp_get_validated_port_ref(mgr, port);
4228
+ /* we need to search for the port in the mgr in case it's gone */
4229
+ port = drm_dp_mst_topology_get_port_validated(mgr, port);
25504230 if (!port)
25514231 return connector_status_disconnected;
4232
+
4233
+ ret = drm_modeset_lock(&mgr->base.lock, ctx);
4234
+ if (ret)
4235
+ goto out;
4236
+
4237
+ ret = connector_status_disconnected;
25524238
25534239 if (!port->ddps)
25544240 goto out;
25554241
25564242 switch (port->pdt) {
25574243 case DP_PEER_DEVICE_NONE:
4244
+ break;
25584245 case DP_PEER_DEVICE_MST_BRANCHING:
4246
+ if (!port->mcs)
4247
+ ret = connector_status_connected;
25594248 break;
25604249
25614250 case DP_PEER_DEVICE_SST_SINK:
2562
- status = connector_status_connected;
4251
+ ret = connector_status_connected;
25634252 /* for logical ports - cache the EDID */
25644253 if (port->port_num >= 8 && !port->cached_edid) {
25654254 port->cached_edid = drm_get_edid(connector, &port->aux.ddc);
....@@ -2567,49 +4256,14 @@
25674256 break;
25684257 case DP_PEER_DEVICE_DP_LEGACY_CONV:
25694258 if (port->ldps)
2570
- status = connector_status_connected;
4259
+ ret = connector_status_connected;
25714260 break;
25724261 }
25734262 out:
2574
- drm_dp_put_port(port);
2575
- return status;
4263
+ drm_dp_mst_topology_put_port(port);
4264
+ return ret;
25764265 }
25774266 EXPORT_SYMBOL(drm_dp_mst_detect_port);
2578
-
2579
-/**
2580
- * drm_dp_mst_port_has_audio() - Check whether port has audio capability or not
2581
- * @mgr: manager for this port
2582
- * @port: unverified pointer to a port.
2583
- *
2584
- * This returns whether the port supports audio or not.
2585
- */
2586
-bool drm_dp_mst_port_has_audio(struct drm_dp_mst_topology_mgr *mgr,
2587
- struct drm_dp_mst_port *port)
2588
-{
2589
- bool ret = false;
2590
-
2591
- port = drm_dp_get_validated_port_ref(mgr, port);
2592
- if (!port)
2593
- return ret;
2594
- ret = port->has_audio;
2595
- drm_dp_put_port(port);
2596
- return ret;
2597
-}
2598
-EXPORT_SYMBOL(drm_dp_mst_port_has_audio);
2599
-
2600
-bool drm_dp_mst_has_fec(struct drm_dp_mst_topology_mgr *mgr,
2601
- struct drm_dp_mst_port *port)
2602
-{
2603
- bool ret = false;
2604
-
2605
- port = drm_dp_get_validated_port_ref(mgr, port);
2606
- if (!port)
2607
- return ret;
2608
- ret = port->fec_capable;
2609
- drm_dp_put_port(port);
2610
- return ret;
2611
-}
2612
-EXPORT_SYMBOL_GPL(drm_dp_mst_has_fec);
26134267
26144268 /**
26154269 * drm_dp_mst_get_edid() - get EDID for an MST port
....@@ -2625,8 +4279,8 @@
26254279 {
26264280 struct edid *edid = NULL;
26274281
2628
- /* we need to search for the port in the mgr in case its gone */
2629
- port = drm_dp_get_validated_port_ref(mgr, port);
4282
+ /* we need to search for the port in the mgr in case it's gone */
4283
+ port = drm_dp_mst_topology_get_port_validated(mgr, port);
26304284 if (!port)
26314285 return NULL;
26324286
....@@ -2634,18 +4288,24 @@
26344288 edid = drm_edid_duplicate(port->cached_edid);
26354289 else {
26364290 edid = drm_get_edid(connector, &port->aux.ddc);
2637
- drm_connector_set_tile_property(connector);
26384291 }
26394292 port->has_audio = drm_detect_monitor_audio(edid);
2640
- drm_dp_put_port(port);
4293
+ drm_dp_mst_topology_put_port(port);
26414294 return edid;
26424295 }
26434296 EXPORT_SYMBOL(drm_dp_mst_get_edid);
26444297
26454298 /**
2646
- * drm_dp_find_vcpi_slots() - find slots for this PBN value
4299
+ * drm_dp_find_vcpi_slots() - Find VCPI slots for this PBN value
26474300 * @mgr: manager to use
26484301 * @pbn: payload bandwidth to convert into slots.
4302
+ *
4303
+ * Calculate the number of VCPI slots that will be required for the given PBN
4304
+ * value. This function is deprecated, and should not be used in atomic
4305
+ * drivers.
4306
+ *
4307
+ * RETURNS:
4308
+ * The total slots required for this port, or error.
26494309 */
26504310 int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
26514311 int pbn)
....@@ -2681,42 +4341,100 @@
26814341 }
26824342
26834343 /**
2684
- * drm_dp_atomic_find_vcpi_slots() - Find and add vcpi slots to the state
4344
+ * drm_dp_atomic_find_vcpi_slots() - Find and add VCPI slots to the state
26854345 * @state: global atomic state
26864346 * @mgr: MST topology manager for the port
26874347 * @port: port to find vcpi slots for
26884348 * @pbn: bandwidth required for the mode in PBN
4349
+ * @pbn_div: divider for DSC mode that takes FEC into account
26894350 *
2690
- * RETURNS:
2691
- * Total slots in the atomic state assigned for this port or error
4351
+ * Allocates VCPI slots to @port, replacing any previous VCPI allocations it
4352
+ * may have had. Any atomic drivers which support MST must call this function
4353
+ * in their &drm_encoder_helper_funcs.atomic_check() callback to change the
4354
+ * current VCPI allocation for the new state, but only when
4355
+ * &drm_crtc_state.mode_changed or &drm_crtc_state.connectors_changed is set
4356
+ * to ensure compatibility with userspace applications that still use the
4357
+ * legacy modesetting UAPI.
4358
+ *
4359
+ * Allocations set by this function are not checked against the bandwidth
4360
+ * restraints of @mgr until the driver calls drm_dp_mst_atomic_check().
4361
+ *
4362
+ * Additionally, it is OK to call this function multiple times on the same
4363
+ * @port as needed. It is not OK however, to call this function and
4364
+ * drm_dp_atomic_release_vcpi_slots() in the same atomic check phase.
4365
+ *
4366
+ * See also:
4367
+ * drm_dp_atomic_release_vcpi_slots()
4368
+ * drm_dp_mst_atomic_check()
4369
+ *
4370
+ * Returns:
4371
+ * Total slots in the atomic state assigned for this port, or a negative error
4372
+ * code if the port no longer exists
26924373 */
26934374 int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
26944375 struct drm_dp_mst_topology_mgr *mgr,
2695
- struct drm_dp_mst_port *port, int pbn)
4376
+ struct drm_dp_mst_port *port, int pbn,
4377
+ int pbn_div)
26964378 {
26974379 struct drm_dp_mst_topology_state *topology_state;
2698
- int req_slots;
4380
+ struct drm_dp_vcpi_allocation *pos, *vcpi = NULL;
4381
+ int prev_slots, prev_bw, req_slots;
26994382
27004383 topology_state = drm_atomic_get_mst_topology_state(state, mgr);
27014384 if (IS_ERR(topology_state))
27024385 return PTR_ERR(topology_state);
27034386
2704
- port = drm_dp_get_validated_port_ref(mgr, port);
2705
- if (port == NULL)
2706
- return -EINVAL;
2707
- req_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
2708
- DRM_DEBUG_KMS("vcpi slots req=%d, avail=%d\n",
2709
- req_slots, topology_state->avail_slots);
4387
+ /* Find the current allocation for this port, if any */
4388
+ list_for_each_entry(pos, &topology_state->vcpis, next) {
4389
+ if (pos->port == port) {
4390
+ vcpi = pos;
4391
+ prev_slots = vcpi->vcpi;
4392
+ prev_bw = vcpi->pbn;
27104393
2711
- if (req_slots > topology_state->avail_slots) {
2712
- drm_dp_put_port(port);
2713
- return -ENOSPC;
4394
+ /*
4395
+ * This should never happen, unless the driver tries
4396
+ * releasing and allocating the same VCPI allocation,
4397
+ * which is an error
4398
+ */
4399
+ if (WARN_ON(!prev_slots)) {
4400
+ DRM_ERROR("cannot allocate and release VCPI on [MST PORT:%p] in the same state\n",
4401
+ port);
4402
+ return -EINVAL;
4403
+ }
4404
+
4405
+ break;
4406
+ }
4407
+ }
4408
+ if (!vcpi) {
4409
+ prev_slots = 0;
4410
+ prev_bw = 0;
27144411 }
27154412
2716
- topology_state->avail_slots -= req_slots;
2717
- DRM_DEBUG_KMS("vcpi slots avail=%d", topology_state->avail_slots);
4413
+ if (pbn_div <= 0)
4414
+ pbn_div = mgr->pbn_div;
27184415
2719
- drm_dp_put_port(port);
4416
+ req_slots = DIV_ROUND_UP(pbn, pbn_div);
4417
+
4418
+ DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] [MST PORT:%p] VCPI %d -> %d\n",
4419
+ port->connector->base.id, port->connector->name,
4420
+ port, prev_slots, req_slots);
4421
+ DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] [MST PORT:%p] PBN %d -> %d\n",
4422
+ port->connector->base.id, port->connector->name,
4423
+ port, prev_bw, pbn);
4424
+
4425
+ /* Add the new allocation to the state */
4426
+ if (!vcpi) {
4427
+ vcpi = kzalloc(sizeof(*vcpi), GFP_KERNEL);
4428
+ if (!vcpi)
4429
+ return -ENOMEM;
4430
+
4431
+ drm_dp_mst_get_port_malloc(port);
4432
+ vcpi->port = port;
4433
+ list_add(&vcpi->next, &topology_state->vcpis);
4434
+ }
4435
+ vcpi->vcpi = req_slots;
4436
+ vcpi->pbn = pbn;
4437
+
27204438 return req_slots;
27214439 }
27224440 EXPORT_SYMBOL(drm_dp_atomic_find_vcpi_slots);
....@@ -2725,31 +4443,58 @@
27254443 * drm_dp_atomic_release_vcpi_slots() - Release allocated vcpi slots
27264444 * @state: global atomic state
27274445 * @mgr: MST topology manager for the port
2728
- * @slots: number of vcpi slots to release
4446
+ * @port: The port to release the VCPI slots from
27294447 *
2730
- * RETURNS:
2731
- * 0 if @slots were added back to &drm_dp_mst_topology_state->avail_slots or
2732
- * negative error code
4448
+ * Releases any VCPI slots that have been allocated to a port in the atomic
4449
+ * state. Any atomic drivers which support MST must call this function in
4450
+ * their &drm_connector_helper_funcs.atomic_check() callback when the
4451
+ * connector will no longer have VCPI allocated (e.g. because its CRTC was
4452
+ * removed) when it had VCPI allocated in the previous atomic state.
4453
+ *
4454
+ * It is OK to call this even if @port has been removed from the system.
4455
+ * Additionally, it is OK to call this function multiple times on the same
4456
+ * @port as needed. It is not OK however, to call this function and
4457
+ * drm_dp_atomic_find_vcpi_slots() on the same @port in a single atomic check
4458
+ * phase.
4459
+ *
4460
+ * See also:
4461
+ * drm_dp_atomic_find_vcpi_slots()
4462
+ * drm_dp_mst_atomic_check()
4463
+ *
4464
+ * Returns:
4465
+ * 0 if all slots for this port were added back to
4466
+ * &drm_dp_mst_topology_state.avail_slots or negative error code
27334467 */
27344468 int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
27354469 struct drm_dp_mst_topology_mgr *mgr,
2736
- int slots)
4470
+ struct drm_dp_mst_port *port)
27374471 {
27384472 struct drm_dp_mst_topology_state *topology_state;
4473
+ struct drm_dp_vcpi_allocation *pos;
4474
+ bool found = false;
27394475
27404476 topology_state = drm_atomic_get_mst_topology_state(state, mgr);
27414477 if (IS_ERR(topology_state))
27424478 return PTR_ERR(topology_state);
27434479
2744
- /* We cannot rely on port->vcpi.num_slots to update
2745
- * topology_state->avail_slots as the port may not exist if the parent
2746
- * branch device was unplugged. This should be fixed by tracking
2747
- * per-port slot allocation in drm_dp_mst_topology_state instead of
2748
- * depending on the caller to tell us how many slots to release.
2749
- */
2750
- topology_state->avail_slots += slots;
2751
- DRM_DEBUG_KMS("vcpi slots released=%d, avail=%d\n",
2752
- slots, topology_state->avail_slots);
4480
+ list_for_each_entry(pos, &topology_state->vcpis, next) {
4481
+ if (pos->port == port) {
4482
+ found = true;
4483
+ break;
4484
+ }
4485
+ }
4486
+ if (WARN_ON(!found)) {
4487
+ DRM_ERROR("no VCPI for [MST PORT:%p] found in mst state %p\n",
4488
+ port, &topology_state->base);
4489
+ return -EINVAL;
4490
+ }
4491
+
4492
+ DRM_DEBUG_ATOMIC("[MST PORT:%p] VCPI %d -> 0\n", port, pos->vcpi);
4493
+ if (pos->vcpi) {
4494
+ drm_dp_mst_put_port_malloc(port);
4495
+ pos->vcpi = 0;
4496
+ pos->pbn = 0;
4497
+ }
27534498
27544499 return 0;
27554500 }
....@@ -2770,14 +4515,15 @@
27704515 if (slots < 0)
27714516 return false;
27724517
2773
- port = drm_dp_get_validated_port_ref(mgr, port);
4518
+ port = drm_dp_mst_topology_get_port_validated(mgr, port);
27744519 if (!port)
27754520 return false;
27764521
27774522 if (port->vcpi.vcpi > 0) {
2778
- DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", port->vcpi.vcpi, port->vcpi.pbn, pbn);
4523
+ DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n",
4524
+ port->vcpi.vcpi, port->vcpi.pbn, pbn);
27794525 if (pbn == port->vcpi.pbn) {
2780
- drm_dp_put_port(port);
4526
+ drm_dp_mst_topology_put_port(port);
27814527 return true;
27824528 }
27834529 }
....@@ -2785,14 +4531,16 @@
27854531 ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn, slots);
27864532 if (ret) {
27874533 DRM_DEBUG_KMS("failed to init vcpi slots=%d max=63 ret=%d\n",
2788
- DIV_ROUND_UP(pbn, mgr->pbn_div), ret);
2789
- drm_dp_put_port(port);
4534
+ DIV_ROUND_UP(pbn, mgr->pbn_div), ret);
4535
+ drm_dp_mst_topology_put_port(port);
27904536 goto out;
27914537 }
27924538 DRM_DEBUG_KMS("initing vcpi for pbn=%d slots=%d\n",
2793
- pbn, port->vcpi.num_slots);
4539
+ pbn, port->vcpi.num_slots);
27944540
2795
- drm_dp_put_port(port);
4541
+ /* Keep port allocated until its payload has been removed */
4542
+ drm_dp_mst_get_port_malloc(port);
4543
+ drm_dp_mst_topology_put_port(port);
27964544 return true;
27974545 out:
27984546 return false;
....@@ -2802,12 +4550,13 @@
28024550 int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
28034551 {
28044552 int slots = 0;
2805
- port = drm_dp_get_validated_port_ref(mgr, port);
4553
+
4554
+ port = drm_dp_mst_topology_get_port_validated(mgr, port);
28064555 if (!port)
28074556 return slots;
28084557
28094558 slots = port->vcpi.num_slots;
2810
- drm_dp_put_port(port);
4559
+ drm_dp_mst_topology_put_port(port);
28114560 return slots;
28124561 }
28134562 EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots);
....@@ -2821,23 +4570,36 @@
28214570 */
28224571 void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
28234572 {
2824
- port = drm_dp_get_validated_port_ref(mgr, port);
2825
- if (!port)
2826
- return;
4573
+ /*
4574
+ * A port with VCPI will remain allocated until its VCPI is
4575
+ * released, no verified ref needed
4576
+ */
4577
+
28274578 port->vcpi.num_slots = 0;
2828
- drm_dp_put_port(port);
28294579 }
28304580 EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots);
28314581
28324582 /**
28334583 * drm_dp_mst_deallocate_vcpi() - deallocate a VCPI
28344584 * @mgr: manager for this port
2835
- * @port: unverified port to deallocate vcpi for
4585
+ * @port: port to deallocate vcpi for
4586
+ *
4587
+ * This can be called unconditionally, regardless of whether
4588
+ * drm_dp_mst_allocate_vcpi() succeeded or not.
28364589 */
2837
-void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
4590
+void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
4591
+ struct drm_dp_mst_port *port)
28384592 {
2839
- port = drm_dp_get_validated_port_ref(mgr, port);
2840
- if (!port)
4593
+ bool skip;
4594
+
4595
+ if (!port->vcpi.vcpi)
4596
+ return;
4597
+
4598
+ mutex_lock(&mgr->lock);
4599
+ skip = !drm_dp_mst_port_downstream_of_branch(port, mgr->mst_primary);
4600
+ mutex_unlock(&mgr->lock);
4601
+
4602
+ if (skip)
28414603 return;
28424604
28434605 drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
....@@ -2845,7 +4607,7 @@
28454607 port->vcpi.pbn = 0;
28464608 port->vcpi.aligned_pbn = 0;
28474609 port->vcpi.vcpi = 0;
2848
- drm_dp_put_port(port);
4610
+ drm_dp_mst_put_port_malloc(port);
28494611 }
28504612 EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi);
28514613
....@@ -2904,10 +4666,15 @@
29044666 }
29054667
29064668 /**
2907
- * drm_dp_check_act_status() - Check ACT handled status.
4669
+ * drm_dp_check_act_status() - Polls for ACT handled status.
29084670 * @mgr: manager to use
29094671 *
2910
- * Check the payload status bits in the DPCD for ACT handled completion.
4672
+ * Tries waiting for the MST hub to finish updating it's payload table by
4673
+ * polling for the ACT handled bit for up to 3 seconds (yes-some hubs really
4674
+ * take that long).
4675
+ *
4676
+ * Returns:
4677
+ * 0 if the ACT was handled in time, negative error code on failure.
29114678 */
29124679 int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr)
29134680 {
....@@ -2924,10 +4691,14 @@
29244691 status & DP_PAYLOAD_ACT_HANDLED || status < 0,
29254692 200, timeout_ms * USEC_PER_MSEC);
29264693 if (ret < 0 && status >= 0) {
2927
- DRM_DEBUG_KMS("Failed to get ACT after %dms, last status: %02x\n",
2928
- timeout_ms, status);
4694
+ DRM_ERROR("Failed to get ACT after %dms, last status: %02x\n",
4695
+ timeout_ms, status);
29294696 return -EINVAL;
29304697 } else if (status < 0) {
4698
+ /*
4699
+ * Failure here isn't unexpected - the hub may have
4700
+ * just been unplugged
4701
+ */
29314702 DRM_DEBUG_KMS("Failed to read payload table status: %d\n",
29324703 status);
29334704 return status;
....@@ -2941,18 +4712,12 @@
29414712 * drm_dp_calc_pbn_mode() - Calculate the PBN for a mode.
29424713 * @clock: dot clock for the mode
29434714 * @bpp: bpp for the mode.
4715
+ * @dsc: DSC mode. If true, bpp has units of 1/16 of a bit per pixel
29444716 *
29454717 * This uses the formula in the spec to calculate the PBN value for a mode.
29464718 */
2947
-int drm_dp_calc_pbn_mode(int clock, int bpp)
4719
+int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc)
29484720 {
2949
- u64 kbps;
2950
- s64 peak_kbps;
2951
- u32 numerator;
2952
- u32 denominator;
2953
-
2954
- kbps = clock * bpp;
2955
-
29564721 /*
29574722 * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
29584723 * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
....@@ -2962,41 +4727,20 @@
29624727 * peak_kbps *= (1006/1000)
29634728 * peak_kbps *= (64/54)
29644729 * peak_kbps *= 8 convert to bytes
4730
+ *
4731
+ * If the bpp is in units of 1/16, further divide by 16. Put this
4732
+ * factor in the numerator rather than the denominator to avoid
4733
+ * integer overflow
29654734 */
29664735
2967
- numerator = 64 * 1006;
2968
- denominator = 54 * 8 * 1000 * 1000;
4736
+ if (dsc)
4737
+ return DIV_ROUND_UP_ULL(mul_u32_u32(clock * (bpp / 16), 64 * 1006),
4738
+ 8 * 54 * 1000 * 1000);
29694739
2970
- kbps *= numerator;
2971
- peak_kbps = drm_fixp_from_fraction(kbps, denominator);
2972
-
2973
- return drm_fixp2int_ceil(peak_kbps);
4740
+ return DIV_ROUND_UP_ULL(mul_u32_u32(clock * bpp, 64 * 1006),
4741
+ 8 * 54 * 1000 * 1000);
29744742 }
29754743 EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
2976
-
2977
-static int test_calc_pbn_mode(void)
2978
-{
2979
- int ret;
2980
- ret = drm_dp_calc_pbn_mode(154000, 30);
2981
- if (ret != 689) {
2982
- DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
2983
- 154000, 30, 689, ret);
2984
- return -EINVAL;
2985
- }
2986
- ret = drm_dp_calc_pbn_mode(234000, 30);
2987
- if (ret != 1047) {
2988
- DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
2989
- 234000, 30, 1047, ret);
2990
- return -EINVAL;
2991
- }
2992
- ret = drm_dp_calc_pbn_mode(297000, 24);
2993
- if (ret != 1063) {
2994
- DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
2995
- 297000, 24, 1063, ret);
2996
- return -EINVAL;
2997
- }
2998
- return 0;
2999
-}
30004744
30014745 /* we want to kick the TX after we've ack the up/down IRQs. */
30024746 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr)
....@@ -3048,6 +4792,7 @@
30484792
30494793 mst_edid = drm_dp_mst_get_edid(port->connector, mgr, port);
30504794 drm_edid_get_monitor_name(mst_edid, name, namelen);
4795
+ kfree(mst_edid);
30514796 }
30524797
30534798 /**
....@@ -3104,15 +4849,34 @@
31044849 int ret;
31054850
31064851 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, buf, DP_RECEIVER_CAP_SIZE);
4852
+ if (ret) {
4853
+ seq_printf(m, "dpcd read failed\n");
4854
+ goto out;
4855
+ }
31074856 seq_printf(m, "dpcd: %*ph\n", DP_RECEIVER_CAP_SIZE, buf);
4857
+
31084858 ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2);
4859
+ if (ret != 2) {
4860
+ seq_printf(m, "faux/mst read failed\n");
4861
+ goto out;
4862
+ }
31094863 seq_printf(m, "faux/mst: %*ph\n", 2, buf);
4864
+
31104865 ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1);
4866
+ if (ret != 1) {
4867
+ seq_printf(m, "mst ctrl read failed\n");
4868
+ goto out;
4869
+ }
31114870 seq_printf(m, "mst ctrl: %*ph\n", 1, buf);
31124871
31134872 /* dump the standard OUI branch header */
31144873 ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE);
4874
+ if (ret != DP_BRANCH_OUI_HEADER_SIZE) {
4875
+ seq_printf(m, "branch oui read failed\n");
4876
+ goto out;
4877
+ }
31154878 seq_printf(m, "branch oui: %*phN devid: ", 3, buf);
4879
+
31164880 for (i = 0x3; i < 0x8 && buf[i]; i++)
31174881 seq_printf(m, "%c", buf[i]);
31184882 seq_printf(m, " revision: hw: %x.%x sw: %x.%x\n",
....@@ -3121,6 +4885,7 @@
31214885 seq_printf(m, "payload table: %*ph\n", DP_PAYLOAD_TABLE_SIZE, buf);
31224886 }
31234887
4888
+out:
31244889 mutex_unlock(&mgr->lock);
31254890
31264891 }
....@@ -3136,66 +4901,147 @@
31364901 mutex_unlock(&mgr->qlock);
31374902 }
31384903
3139
-static void drm_dp_free_mst_port(struct kref *kref)
4904
+static inline void
4905
+drm_dp_delayed_destroy_port(struct drm_dp_mst_port *port)
31404906 {
3141
- struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
3142
- kref_put(&port->parent->kref, drm_dp_free_mst_branch_device);
3143
- kfree(port);
4907
+ drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE, port->mcs);
4908
+
4909
+ if (port->connector) {
4910
+ drm_connector_unregister(port->connector);
4911
+ drm_connector_put(port->connector);
4912
+ }
4913
+
4914
+ drm_dp_mst_put_port_malloc(port);
31444915 }
31454916
3146
-static void drm_dp_destroy_connector_work(struct work_struct *work)
4917
+static inline void
4918
+drm_dp_delayed_destroy_mstb(struct drm_dp_mst_branch *mstb)
31474919 {
3148
- struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
3149
- struct drm_dp_mst_port *port;
3150
- bool send_hotplug = false;
4920
+ struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
4921
+ struct drm_dp_mst_port *port, *port_tmp;
4922
+ struct drm_dp_sideband_msg_tx *txmsg, *txmsg_tmp;
4923
+ bool wake_tx = false;
4924
+
4925
+ mutex_lock(&mgr->lock);
4926
+ list_for_each_entry_safe(port, port_tmp, &mstb->ports, next) {
4927
+ list_del(&port->next);
4928
+ drm_dp_mst_topology_put_port(port);
4929
+ }
4930
+ mutex_unlock(&mgr->lock);
4931
+
4932
+ /* drop any tx slot msg */
4933
+ mutex_lock(&mstb->mgr->qlock);
4934
+ list_for_each_entry_safe(txmsg, txmsg_tmp, &mgr->tx_msg_downq, next) {
4935
+ if (txmsg->dst != mstb)
4936
+ continue;
4937
+
4938
+ txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
4939
+ list_del(&txmsg->next);
4940
+ wake_tx = true;
4941
+ }
4942
+ mutex_unlock(&mstb->mgr->qlock);
4943
+
4944
+ if (wake_tx)
4945
+ wake_up_all(&mstb->mgr->tx_waitq);
4946
+
4947
+ drm_dp_mst_put_mstb_malloc(mstb);
4948
+}
4949
+
4950
+static void drm_dp_delayed_destroy_work(struct work_struct *work)
4951
+{
4952
+ struct drm_dp_mst_topology_mgr *mgr =
4953
+ container_of(work, struct drm_dp_mst_topology_mgr,
4954
+ delayed_destroy_work);
4955
+ bool send_hotplug = false, go_again;
4956
+
31514957 /*
31524958 * Not a regular list traverse as we have to drop the destroy
3153
- * connector lock before destroying the connector, to avoid AB->BA
4959
+ * connector lock before destroying the mstb/port, to avoid AB->BA
31544960 * ordering between this lock and the config mutex.
31554961 */
3156
- for (;;) {
3157
- mutex_lock(&mgr->destroy_connector_lock);
3158
- port = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_dp_mst_port, next);
3159
- if (!port) {
3160
- mutex_unlock(&mgr->destroy_connector_lock);
3161
- break;
3162
- }
3163
- list_del(&port->next);
3164
- mutex_unlock(&mgr->destroy_connector_lock);
4962
+ do {
4963
+ go_again = false;
31654964
3166
- kref_init(&port->kref);
3167
- INIT_LIST_HEAD(&port->next);
4965
+ for (;;) {
4966
+ struct drm_dp_mst_branch *mstb;
31684967
3169
- mgr->cbs->destroy_connector(mgr, port->connector);
4968
+ mutex_lock(&mgr->delayed_destroy_lock);
4969
+ mstb = list_first_entry_or_null(&mgr->destroy_branch_device_list,
4970
+ struct drm_dp_mst_branch,
4971
+ destroy_next);
4972
+ if (mstb)
4973
+ list_del(&mstb->destroy_next);
4974
+ mutex_unlock(&mgr->delayed_destroy_lock);
31704975
3171
- drm_dp_port_teardown_pdt(port, port->pdt);
3172
- port->pdt = DP_PEER_DEVICE_NONE;
4976
+ if (!mstb)
4977
+ break;
31734978
3174
- if (!port->input && port->vcpi.vcpi > 0) {
3175
- drm_dp_mst_reset_vcpi_slots(mgr, port);
3176
- drm_dp_update_payload_part1(mgr);
3177
- drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
4979
+ drm_dp_delayed_destroy_mstb(mstb);
4980
+ go_again = true;
31784981 }
31794982
3180
- kref_put(&port->kref, drm_dp_free_mst_port);
3181
- send_hotplug = true;
3182
- }
4983
+ for (;;) {
4984
+ struct drm_dp_mst_port *port;
4985
+
4986
+ mutex_lock(&mgr->delayed_destroy_lock);
4987
+ port = list_first_entry_or_null(&mgr->destroy_port_list,
4988
+ struct drm_dp_mst_port,
4989
+ next);
4990
+ if (port)
4991
+ list_del(&port->next);
4992
+ mutex_unlock(&mgr->delayed_destroy_lock);
4993
+
4994
+ if (!port)
4995
+ break;
4996
+
4997
+ drm_dp_delayed_destroy_port(port);
4998
+ send_hotplug = true;
4999
+ go_again = true;
5000
+ }
5001
+ } while (go_again);
5002
+
31835003 if (send_hotplug)
3184
- (*mgr->cbs->hotplug)(mgr);
5004
+ drm_kms_helper_hotplug_event(mgr->dev);
31855005 }
31865006
31875007 static struct drm_private_state *
31885008 drm_dp_mst_duplicate_state(struct drm_private_obj *obj)
31895009 {
3190
- struct drm_dp_mst_topology_state *state;
5010
+ struct drm_dp_mst_topology_state *state, *old_state =
5011
+ to_dp_mst_topology_state(obj->state);
5012
+ struct drm_dp_vcpi_allocation *pos, *vcpi;
31915013
3192
- state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
5014
+ state = kmemdup(old_state, sizeof(*state), GFP_KERNEL);
31935015 if (!state)
31945016 return NULL;
31955017
31965018 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
31975019
5020
+ INIT_LIST_HEAD(&state->vcpis);
5021
+
5022
+ list_for_each_entry(pos, &old_state->vcpis, next) {
5023
+ /* Prune leftover freed VCPI allocations */
5024
+ if (!pos->vcpi)
5025
+ continue;
5026
+
5027
+ vcpi = kmemdup(pos, sizeof(*vcpi), GFP_KERNEL);
5028
+ if (!vcpi)
5029
+ goto fail;
5030
+
5031
+ drm_dp_mst_get_port_malloc(vcpi->port);
5032
+ list_add(&vcpi->next, &state->vcpis);
5033
+ }
5034
+
31985035 return &state->base;
5036
+
5037
+fail:
5038
+ list_for_each_entry_safe(pos, vcpi, &state->vcpis, next) {
5039
+ drm_dp_mst_put_port_malloc(pos->port);
5040
+ kfree(pos);
5041
+ }
5042
+ kfree(state);
5043
+
5044
+ return NULL;
31995045 }
32005046
32015047 static void drm_dp_mst_destroy_state(struct drm_private_obj *obj,
....@@ -3203,14 +5049,346 @@
32035049 {
32045050 struct drm_dp_mst_topology_state *mst_state =
32055051 to_dp_mst_topology_state(state);
5052
+ struct drm_dp_vcpi_allocation *pos, *tmp;
5053
+
5054
+ list_for_each_entry_safe(pos, tmp, &mst_state->vcpis, next) {
5055
+ /* We only keep references to ports with non-zero VCPIs */
5056
+ if (pos->vcpi)
5057
+ drm_dp_mst_put_port_malloc(pos->port);
5058
+ kfree(pos);
5059
+ }
32065060
32075061 kfree(mst_state);
32085062 }
32095063
3210
-static const struct drm_private_state_funcs mst_state_funcs = {
5064
+static bool drm_dp_mst_port_downstream_of_branch(struct drm_dp_mst_port *port,
5065
+ struct drm_dp_mst_branch *branch)
5066
+{
5067
+ while (port->parent) {
5068
+ if (port->parent == branch)
5069
+ return true;
5070
+
5071
+ if (port->parent->port_parent)
5072
+ port = port->parent->port_parent;
5073
+ else
5074
+ break;
5075
+ }
5076
+ return false;
5077
+}
5078
+
5079
+static int
5080
+drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port,
5081
+ struct drm_dp_mst_topology_state *state);
5082
+
5083
+static int
5084
+drm_dp_mst_atomic_check_mstb_bw_limit(struct drm_dp_mst_branch *mstb,
5085
+ struct drm_dp_mst_topology_state *state)
5086
+{
5087
+ struct drm_dp_vcpi_allocation *vcpi;
5088
+ struct drm_dp_mst_port *port;
5089
+ int pbn_used = 0, ret;
5090
+ bool found = false;
5091
+
5092
+ /* Check that we have at least one port in our state that's downstream
5093
+ * of this branch, otherwise we can skip this branch
5094
+ */
5095
+ list_for_each_entry(vcpi, &state->vcpis, next) {
5096
+ if (!vcpi->pbn ||
5097
+ !drm_dp_mst_port_downstream_of_branch(vcpi->port, mstb))
5098
+ continue;
5099
+
5100
+ found = true;
5101
+ break;
5102
+ }
5103
+ if (!found)
5104
+ return 0;
5105
+
5106
+ if (mstb->port_parent)
5107
+ DRM_DEBUG_ATOMIC("[MSTB:%p] [MST PORT:%p] Checking bandwidth limits on [MSTB:%p]\n",
5108
+ mstb->port_parent->parent, mstb->port_parent,
5109
+ mstb);
5110
+ else
5111
+ DRM_DEBUG_ATOMIC("[MSTB:%p] Checking bandwidth limits\n",
5112
+ mstb);
5113
+
5114
+ list_for_each_entry(port, &mstb->ports, next) {
5115
+ ret = drm_dp_mst_atomic_check_port_bw_limit(port, state);
5116
+ if (ret < 0)
5117
+ return ret;
5118
+
5119
+ pbn_used += ret;
5120
+ }
5121
+
5122
+ return pbn_used;
5123
+}
5124
+
5125
+static int
5126
+drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port,
5127
+ struct drm_dp_mst_topology_state *state)
5128
+{
5129
+ struct drm_dp_vcpi_allocation *vcpi;
5130
+ int pbn_used = 0;
5131
+
5132
+ if (port->pdt == DP_PEER_DEVICE_NONE)
5133
+ return 0;
5134
+
5135
+ if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
5136
+ bool found = false;
5137
+
5138
+ list_for_each_entry(vcpi, &state->vcpis, next) {
5139
+ if (vcpi->port != port)
5140
+ continue;
5141
+ if (!vcpi->pbn)
5142
+ return 0;
5143
+
5144
+ found = true;
5145
+ break;
5146
+ }
5147
+ if (!found)
5148
+ return 0;
5149
+
5150
+ /* This should never happen, as it means we tried to
5151
+ * set a mode before querying the full_pbn
5152
+ */
5153
+ if (WARN_ON(!port->full_pbn))
5154
+ return -EINVAL;
5155
+
5156
+ pbn_used = vcpi->pbn;
5157
+ } else {
5158
+ pbn_used = drm_dp_mst_atomic_check_mstb_bw_limit(port->mstb,
5159
+ state);
5160
+ if (pbn_used <= 0)
5161
+ return pbn_used;
5162
+ }
5163
+
5164
+ if (pbn_used > port->full_pbn) {
5165
+ DRM_DEBUG_ATOMIC("[MSTB:%p] [MST PORT:%p] required PBN of %d exceeds port limit of %d\n",
5166
+ port->parent, port, pbn_used,
5167
+ port->full_pbn);
5168
+ return -ENOSPC;
5169
+ }
5170
+
5171
+ DRM_DEBUG_ATOMIC("[MSTB:%p] [MST PORT:%p] uses %d out of %d PBN\n",
5172
+ port->parent, port, pbn_used, port->full_pbn);
5173
+
5174
+ return pbn_used;
5175
+}
5176
+
5177
+static inline int
5178
+drm_dp_mst_atomic_check_vcpi_alloc_limit(struct drm_dp_mst_topology_mgr *mgr,
5179
+ struct drm_dp_mst_topology_state *mst_state)
5180
+{
5181
+ struct drm_dp_vcpi_allocation *vcpi;
5182
+ int avail_slots = 63, payload_count = 0;
5183
+
5184
+ list_for_each_entry(vcpi, &mst_state->vcpis, next) {
5185
+ /* Releasing VCPI is always OK-even if the port is gone */
5186
+ if (!vcpi->vcpi) {
5187
+ DRM_DEBUG_ATOMIC("[MST PORT:%p] releases all VCPI slots\n",
5188
+ vcpi->port);
5189
+ continue;
5190
+ }
5191
+
5192
+ DRM_DEBUG_ATOMIC("[MST PORT:%p] requires %d vcpi slots\n",
5193
+ vcpi->port, vcpi->vcpi);
5194
+
5195
+ avail_slots -= vcpi->vcpi;
5196
+ if (avail_slots < 0) {
5197
+ DRM_DEBUG_ATOMIC("[MST PORT:%p] not enough VCPI slots in mst state %p (avail=%d)\n",
5198
+ vcpi->port, mst_state,
5199
+ avail_slots + vcpi->vcpi);
5200
+ return -ENOSPC;
5201
+ }
5202
+
5203
+ if (++payload_count > mgr->max_payloads) {
5204
+ DRM_DEBUG_ATOMIC("[MST MGR:%p] state %p has too many payloads (max=%d)\n",
5205
+ mgr, mst_state, mgr->max_payloads);
5206
+ return -EINVAL;
5207
+ }
5208
+ }
5209
+ DRM_DEBUG_ATOMIC("[MST MGR:%p] mst state %p VCPI avail=%d used=%d\n",
5210
+ mgr, mst_state, avail_slots,
5211
+ 63 - avail_slots);
5212
+
5213
+ return 0;
5214
+}
5215
+
5216
+/**
5217
+ * drm_dp_mst_add_affected_dsc_crtcs
5218
+ * @state: Pointer to the new struct drm_dp_mst_topology_state
5219
+ * @mgr: MST topology manager
5220
+ *
5221
+ * Whenever there is a change in mst topology
5222
+ * DSC configuration would have to be recalculated
5223
+ * therefore we need to trigger modeset on all affected
5224
+ * CRTCs in that topology
5225
+ *
5226
+ * See also:
5227
+ * drm_dp_mst_atomic_enable_dsc()
5228
+ */
5229
+int drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state, struct drm_dp_mst_topology_mgr *mgr)
5230
+{
5231
+ struct drm_dp_mst_topology_state *mst_state;
5232
+ struct drm_dp_vcpi_allocation *pos;
5233
+ struct drm_connector *connector;
5234
+ struct drm_connector_state *conn_state;
5235
+ struct drm_crtc *crtc;
5236
+ struct drm_crtc_state *crtc_state;
5237
+
5238
+ mst_state = drm_atomic_get_mst_topology_state(state, mgr);
5239
+
5240
+ if (IS_ERR(mst_state))
5241
+ return PTR_ERR(mst_state);
5242
+
5243
+ list_for_each_entry(pos, &mst_state->vcpis, next) {
5244
+
5245
+ connector = pos->port->connector;
5246
+
5247
+ if (!connector)
5248
+ return -EINVAL;
5249
+
5250
+ conn_state = drm_atomic_get_connector_state(state, connector);
5251
+
5252
+ if (IS_ERR(conn_state))
5253
+ return PTR_ERR(conn_state);
5254
+
5255
+ crtc = conn_state->crtc;
5256
+
5257
+ if (!crtc)
5258
+ continue;
5259
+
5260
+ if (!drm_dp_mst_dsc_aux_for_port(pos->port))
5261
+ continue;
5262
+
5263
+ crtc_state = drm_atomic_get_crtc_state(mst_state->base.state, crtc);
5264
+
5265
+ if (IS_ERR(crtc_state))
5266
+ return PTR_ERR(crtc_state);
5267
+
5268
+ DRM_DEBUG_ATOMIC("[MST MGR:%p] Setting mode_changed flag on CRTC %p\n",
5269
+ mgr, crtc);
5270
+
5271
+ crtc_state->mode_changed = true;
5272
+ }
5273
+ return 0;
5274
+}
5275
+EXPORT_SYMBOL(drm_dp_mst_add_affected_dsc_crtcs);
5276
+
5277
+/**
5278
+ * drm_dp_mst_atomic_enable_dsc - Set DSC Enable Flag to On/Off
5279
+ * @state: Pointer to the new drm_atomic_state
5280
+ * @port: Pointer to the affected MST Port
5281
+ * @pbn: Newly recalculated bw required for link with DSC enabled
5282
+ * @pbn_div: Divider to calculate correct number of pbn per slot
5283
+ * @enable: Boolean flag to enable or disable DSC on the port
5284
+ *
5285
+ * This function enables DSC on the given Port
5286
+ * by recalculating its vcpi from pbn provided
5287
+ * and sets dsc_enable flag to keep track of which
5288
+ * ports have DSC enabled
5289
+ *
5290
+ */
5291
+int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state,
5292
+ struct drm_dp_mst_port *port,
5293
+ int pbn, int pbn_div,
5294
+ bool enable)
5295
+{
5296
+ struct drm_dp_mst_topology_state *mst_state;
5297
+ struct drm_dp_vcpi_allocation *pos;
5298
+ bool found = false;
5299
+ int vcpi = 0;
5300
+
5301
+ mst_state = drm_atomic_get_mst_topology_state(state, port->mgr);
5302
+
5303
+ if (IS_ERR(mst_state))
5304
+ return PTR_ERR(mst_state);
5305
+
5306
+ list_for_each_entry(pos, &mst_state->vcpis, next) {
5307
+ if (pos->port == port) {
5308
+ found = true;
5309
+ break;
5310
+ }
5311
+ }
5312
+
5313
+ if (!found) {
5314
+ DRM_DEBUG_ATOMIC("[MST PORT:%p] Couldn't find VCPI allocation in mst state %p\n",
5315
+ port, mst_state);
5316
+ return -EINVAL;
5317
+ }
5318
+
5319
+ if (pos->dsc_enabled == enable) {
5320
+ DRM_DEBUG_ATOMIC("[MST PORT:%p] DSC flag is already set to %d, returning %d VCPI slots\n",
5321
+ port, enable, pos->vcpi);
5322
+ vcpi = pos->vcpi;
5323
+ }
5324
+
5325
+ if (enable) {
5326
+ vcpi = drm_dp_atomic_find_vcpi_slots(state, port->mgr, port, pbn, pbn_div);
5327
+ DRM_DEBUG_ATOMIC("[MST PORT:%p] Enabling DSC flag, reallocating %d VCPI slots on the port\n",
5328
+ port, vcpi);
5329
+ if (vcpi < 0)
5330
+ return -EINVAL;
5331
+ }
5332
+
5333
+ pos->dsc_enabled = enable;
5334
+
5335
+ return vcpi;
5336
+}
5337
+EXPORT_SYMBOL(drm_dp_mst_atomic_enable_dsc);
5338
+/**
5339
+ * drm_dp_mst_atomic_check - Check that the new state of an MST topology in an
5340
+ * atomic update is valid
5341
+ * @state: Pointer to the new &struct drm_dp_mst_topology_state
5342
+ *
5343
+ * Checks the given topology state for an atomic update to ensure that it's
5344
+ * valid. This includes checking whether there's enough bandwidth to support
5345
+ * the new VCPI allocations in the atomic update.
5346
+ *
5347
+ * Any atomic drivers supporting DP MST must make sure to call this after
5348
+ * checking the rest of their state in their
5349
+ * &drm_mode_config_funcs.atomic_check() callback.
5350
+ *
5351
+ * See also:
5352
+ * drm_dp_atomic_find_vcpi_slots()
5353
+ * drm_dp_atomic_release_vcpi_slots()
5354
+ *
5355
+ * Returns:
5356
+ *
5357
+ * 0 if the new state is valid, negative error code otherwise.
5358
+ */
5359
+int drm_dp_mst_atomic_check(struct drm_atomic_state *state)
5360
+{
5361
+ struct drm_dp_mst_topology_mgr *mgr;
5362
+ struct drm_dp_mst_topology_state *mst_state;
5363
+ int i, ret = 0;
5364
+
5365
+ for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
5366
+ if (!mgr->mst_state)
5367
+ continue;
5368
+
5369
+ ret = drm_dp_mst_atomic_check_vcpi_alloc_limit(mgr, mst_state);
5370
+ if (ret)
5371
+ break;
5372
+
5373
+ mutex_lock(&mgr->lock);
5374
+ ret = drm_dp_mst_atomic_check_mstb_bw_limit(mgr->mst_primary,
5375
+ mst_state);
5376
+ mutex_unlock(&mgr->lock);
5377
+ if (ret < 0)
5378
+ break;
5379
+ else
5380
+ ret = 0;
5381
+ }
5382
+
5383
+ return ret;
5384
+}
5385
+EXPORT_SYMBOL(drm_dp_mst_atomic_check);
5386
+
5387
+const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs = {
32115388 .atomic_duplicate_state = drm_dp_mst_duplicate_state,
32125389 .atomic_destroy_state = drm_dp_mst_destroy_state,
32135390 };
5391
+EXPORT_SYMBOL(drm_dp_mst_topology_state_funcs);
32145392
32155393 /**
32165394 * drm_atomic_get_mst_topology_state: get MST topology state
....@@ -3230,9 +5408,6 @@
32305408 struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
32315409 struct drm_dp_mst_topology_mgr *mgr)
32325410 {
3233
- struct drm_device *dev = mgr->dev;
3234
-
3235
- WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
32365411 return to_dp_mst_topology_state(drm_atomic_get_private_obj_state(state, &mgr->base));
32375412 }
32385413 EXPORT_SYMBOL(drm_atomic_get_mst_topology_state);
....@@ -3258,12 +5433,29 @@
32585433 mutex_init(&mgr->lock);
32595434 mutex_init(&mgr->qlock);
32605435 mutex_init(&mgr->payload_lock);
3261
- mutex_init(&mgr->destroy_connector_lock);
5436
+ mutex_init(&mgr->delayed_destroy_lock);
5437
+ mutex_init(&mgr->up_req_lock);
5438
+ mutex_init(&mgr->probe_lock);
5439
+#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
5440
+ mutex_init(&mgr->topology_ref_history_lock);
5441
+#endif
32625442 INIT_LIST_HEAD(&mgr->tx_msg_downq);
3263
- INIT_LIST_HEAD(&mgr->destroy_connector_list);
5443
+ INIT_LIST_HEAD(&mgr->destroy_port_list);
5444
+ INIT_LIST_HEAD(&mgr->destroy_branch_device_list);
5445
+ INIT_LIST_HEAD(&mgr->up_req_list);
5446
+
5447
+ /*
5448
+ * delayed_destroy_work will be queued on a dedicated WQ, so that any
5449
+ * requeuing will be also flushed when deiniting the topology manager.
5450
+ */
5451
+ mgr->delayed_destroy_wq = alloc_ordered_workqueue("drm_dp_mst_wq", 0);
5452
+ if (mgr->delayed_destroy_wq == NULL)
5453
+ return -ENOMEM;
5454
+
32645455 INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
32655456 INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
3266
- INIT_WORK(&mgr->destroy_connector_work, drm_dp_destroy_connector_work);
5457
+ INIT_WORK(&mgr->delayed_destroy_work, drm_dp_delayed_destroy_work);
5458
+ INIT_WORK(&mgr->up_req_work, drm_dp_mst_up_req_work);
32675459 init_waitqueue_head(&mgr->tx_waitq);
32685460 mgr->dev = dev;
32695461 mgr->aux = aux;
....@@ -3280,21 +5472,17 @@
32805472 if (!mgr->proposed_vcpis)
32815473 return -ENOMEM;
32825474 set_bit(0, &mgr->payload_mask);
3283
- if (test_calc_pbn_mode() < 0)
3284
- DRM_ERROR("MST PBN self-test failed\n");
32855475
32865476 mst_state = kzalloc(sizeof(*mst_state), GFP_KERNEL);
32875477 if (mst_state == NULL)
32885478 return -ENOMEM;
32895479
32905480 mst_state->mgr = mgr;
5481
+ INIT_LIST_HEAD(&mst_state->vcpis);
32915482
3292
- /* max. time slots - one slot for MTP header */
3293
- mst_state->avail_slots = 63;
3294
-
3295
- drm_atomic_private_obj_init(&mgr->base,
5483
+ drm_atomic_private_obj_init(dev, &mgr->base,
32965484 &mst_state->base,
3297
- &mst_state_funcs);
5485
+ &drm_dp_mst_topology_state_funcs);
32985486
32995487 return 0;
33005488 }
....@@ -3306,8 +5494,13 @@
33065494 */
33075495 void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
33085496 {
5497
+ drm_dp_mst_topology_mgr_set_mst(mgr, false);
33095498 flush_work(&mgr->work);
3310
- flush_work(&mgr->destroy_connector_work);
5499
+ /* The following will also drain any requeued work on the WQ. */
5500
+ if (mgr->delayed_destroy_wq) {
5501
+ destroy_workqueue(mgr->delayed_destroy_wq);
5502
+ mgr->delayed_destroy_wq = NULL;
5503
+ }
33115504 mutex_lock(&mgr->payload_lock);
33125505 kfree(mgr->payloads);
33135506 mgr->payloads = NULL;
....@@ -3318,37 +5511,58 @@
33185511 mgr->aux = NULL;
33195512 drm_atomic_private_obj_fini(&mgr->base);
33205513 mgr->funcs = NULL;
5514
+
5515
+ mutex_destroy(&mgr->delayed_destroy_lock);
5516
+ mutex_destroy(&mgr->payload_lock);
5517
+ mutex_destroy(&mgr->qlock);
5518
+ mutex_destroy(&mgr->lock);
5519
+ mutex_destroy(&mgr->up_req_lock);
5520
+ mutex_destroy(&mgr->probe_lock);
5521
+#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
5522
+ mutex_destroy(&mgr->topology_ref_history_lock);
5523
+#endif
33215524 }
33225525 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy);
33235526
3324
-/* I2C device */
3325
-static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
3326
- int num)
5527
+static bool remote_i2c_read_ok(const struct i2c_msg msgs[], int num)
33275528 {
3328
- struct drm_dp_aux *aux = adapter->algo_data;
3329
- struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port, aux);
3330
- struct drm_dp_mst_branch *mstb;
5529
+ int i;
5530
+
5531
+ if (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS)
5532
+ return false;
5533
+
5534
+ for (i = 0; i < num - 1; i++) {
5535
+ if (msgs[i].flags & I2C_M_RD ||
5536
+ msgs[i].len > 0xff)
5537
+ return false;
5538
+ }
5539
+
5540
+ return msgs[num - 1].flags & I2C_M_RD &&
5541
+ msgs[num - 1].len <= 0xff;
5542
+}
5543
+
5544
+static bool remote_i2c_write_ok(const struct i2c_msg msgs[], int num)
5545
+{
5546
+ int i;
5547
+
5548
+ for (i = 0; i < num - 1; i++) {
5549
+ if (msgs[i].flags & I2C_M_RD || !(msgs[i].flags & I2C_M_STOP) ||
5550
+ msgs[i].len > 0xff)
5551
+ return false;
5552
+ }
5553
+
5554
+ return !(msgs[num - 1].flags & I2C_M_RD) && msgs[num - 1].len <= 0xff;
5555
+}
5556
+
5557
+static int drm_dp_mst_i2c_read(struct drm_dp_mst_branch *mstb,
5558
+ struct drm_dp_mst_port *port,
5559
+ struct i2c_msg *msgs, int num)
5560
+{
33315561 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
33325562 unsigned int i;
3333
- bool reading = false;
33345563 struct drm_dp_sideband_msg_req_body msg;
33355564 struct drm_dp_sideband_msg_tx *txmsg = NULL;
33365565 int ret;
3337
-
3338
- mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
3339
- if (!mstb)
3340
- return -EREMOTEIO;
3341
-
3342
- /* construct i2c msg */
3343
- /* see if last msg is a read */
3344
- if (msgs[num - 1].flags & I2C_M_RD)
3345
- reading = true;
3346
-
3347
- if (!reading || (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS)) {
3348
- DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n");
3349
- ret = -EIO;
3350
- goto out;
3351
- }
33525566
33535567 memset(&msg, 0, sizeof(msg));
33545568 msg.req_type = DP_REMOTE_I2C_READ;
....@@ -3377,7 +5591,7 @@
33775591 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
33785592 if (ret > 0) {
33795593
3380
- if (txmsg->reply.reply_type == 1) { /* got a NAK back */
5594
+ if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
33815595 ret = -EREMOTEIO;
33825596 goto out;
33835597 }
....@@ -3390,7 +5604,79 @@
33905604 }
33915605 out:
33925606 kfree(txmsg);
3393
- drm_dp_put_mst_branch_device(mstb);
5607
+ return ret;
5608
+}
5609
+
5610
+static int drm_dp_mst_i2c_write(struct drm_dp_mst_branch *mstb,
5611
+ struct drm_dp_mst_port *port,
5612
+ struct i2c_msg *msgs, int num)
5613
+{
5614
+ struct drm_dp_mst_topology_mgr *mgr = port->mgr;
5615
+ unsigned int i;
5616
+ struct drm_dp_sideband_msg_req_body msg;
5617
+ struct drm_dp_sideband_msg_tx *txmsg = NULL;
5618
+ int ret;
5619
+
5620
+ txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
5621
+ if (!txmsg) {
5622
+ ret = -ENOMEM;
5623
+ goto out;
5624
+ }
5625
+ for (i = 0; i < num; i++) {
5626
+ memset(&msg, 0, sizeof(msg));
5627
+ msg.req_type = DP_REMOTE_I2C_WRITE;
5628
+ msg.u.i2c_write.port_number = port->port_num;
5629
+ msg.u.i2c_write.write_i2c_device_id = msgs[i].addr;
5630
+ msg.u.i2c_write.num_bytes = msgs[i].len;
5631
+ msg.u.i2c_write.bytes = msgs[i].buf;
5632
+
5633
+ memset(txmsg, 0, sizeof(*txmsg));
5634
+ txmsg->dst = mstb;
5635
+
5636
+ drm_dp_encode_sideband_req(&msg, txmsg);
5637
+ drm_dp_queue_down_tx(mgr, txmsg);
5638
+
5639
+ ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
5640
+ if (ret > 0) {
5641
+ if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
5642
+ ret = -EREMOTEIO;
5643
+ goto out;
5644
+ }
5645
+ } else {
5646
+ goto out;
5647
+ }
5648
+ }
5649
+ ret = num;
5650
+out:
5651
+ kfree(txmsg);
5652
+ return ret;
5653
+}
5654
+
5655
+/* I2C device */
5656
+static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter,
5657
+ struct i2c_msg *msgs, int num)
5658
+{
5659
+ struct drm_dp_aux *aux = adapter->algo_data;
5660
+ struct drm_dp_mst_port *port =
5661
+ container_of(aux, struct drm_dp_mst_port, aux);
5662
+ struct drm_dp_mst_branch *mstb;
5663
+ struct drm_dp_mst_topology_mgr *mgr = port->mgr;
5664
+ int ret;
5665
+
5666
+ mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
5667
+ if (!mstb)
5668
+ return -EREMOTEIO;
5669
+
5670
+ if (remote_i2c_read_ok(msgs, num)) {
5671
+ ret = drm_dp_mst_i2c_read(mstb, port, msgs, num);
5672
+ } else if (remote_i2c_write_ok(msgs, num)) {
5673
+ ret = drm_dp_mst_i2c_write(mstb, port, msgs, num);
5674
+ } else {
5675
+ DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n");
5676
+ ret = -EIO;
5677
+ }
5678
+
5679
+ drm_dp_mst_topology_put_mstb(mstb);
33945680 return ret;
33955681 }
33965682
....@@ -3409,22 +5695,26 @@
34095695
34105696 /**
34115697 * drm_dp_mst_register_i2c_bus() - register an I2C adapter for I2C-over-AUX
3412
- * @aux: DisplayPort AUX channel
5698
+ * @port: The port to add the I2C bus on
34135699 *
34145700 * Returns 0 on success or a negative error code on failure.
34155701 */
3416
-static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux)
5702
+static int drm_dp_mst_register_i2c_bus(struct drm_dp_mst_port *port)
34175703 {
5704
+ struct drm_dp_aux *aux = &port->aux;
5705
+ struct device *parent_dev = port->mgr->dev->dev;
5706
+
34185707 aux->ddc.algo = &drm_dp_mst_i2c_algo;
34195708 aux->ddc.algo_data = aux;
34205709 aux->ddc.retries = 3;
34215710
34225711 aux->ddc.class = I2C_CLASS_DDC;
34235712 aux->ddc.owner = THIS_MODULE;
3424
- aux->ddc.dev.parent = aux->dev;
3425
- aux->ddc.dev.of_node = aux->dev->of_node;
5713
+ /* FIXME: set the kdev of the port's connector as parent */
5714
+ aux->ddc.dev.parent = parent_dev;
5715
+ aux->ddc.dev.of_node = parent_dev->of_node;
34265716
3427
- strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(aux->dev),
5717
+ strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(parent_dev),
34285718 sizeof(aux->ddc.name));
34295719
34305720 return i2c_add_adapter(&aux->ddc);
....@@ -3432,9 +5722,180 @@
34325722
34335723 /**
34345724 * drm_dp_mst_unregister_i2c_bus() - unregister an I2C-over-AUX adapter
3435
- * @aux: DisplayPort AUX channel
5725
+ * @port: The port to remove the I2C bus from
34365726 */
3437
-static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux)
5727
+static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_mst_port *port)
34385728 {
3439
- i2c_del_adapter(&aux->ddc);
5729
+ i2c_del_adapter(&port->aux.ddc);
34405730 }
5731
+
5732
+/**
5733
+ * drm_dp_mst_is_virtual_dpcd() - Is the given port a virtual DP Peer Device
5734
+ * @port: The port to check
5735
+ *
5736
+ * A single physical MST hub object can be represented in the topology
5737
+ * by multiple branches, with virtual ports between those branches.
5738
+ *
5739
+ * As of DP1.4, An MST hub with internal (virtual) ports must expose
5740
+ * certain DPCD registers over those ports. See sections 2.6.1.1.1
5741
+ * and 2.6.1.1.2 of Display Port specification v1.4 for details.
5742
+ *
5743
+ * May acquire mgr->lock
5744
+ *
5745
+ * Returns:
5746
+ * true if the port is a virtual DP peer device, false otherwise
5747
+ */
5748
+static bool drm_dp_mst_is_virtual_dpcd(struct drm_dp_mst_port *port)
5749
+{
5750
+ struct drm_dp_mst_port *downstream_port;
5751
+
5752
+ if (!port || port->dpcd_rev < DP_DPCD_REV_14)
5753
+ return false;
5754
+
5755
+ /* Virtual DP Sink (Internal Display Panel) */
5756
+ if (port->port_num >= 8)
5757
+ return true;
5758
+
5759
+ /* DP-to-HDMI Protocol Converter */
5760
+ if (port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV &&
5761
+ !port->mcs &&
5762
+ port->ldps)
5763
+ return true;
5764
+
5765
+ /* DP-to-DP */
5766
+ mutex_lock(&port->mgr->lock);
5767
+ if (port->pdt == DP_PEER_DEVICE_MST_BRANCHING &&
5768
+ port->mstb &&
5769
+ port->mstb->num_ports == 2) {
5770
+ list_for_each_entry(downstream_port, &port->mstb->ports, next) {
5771
+ if (downstream_port->pdt == DP_PEER_DEVICE_SST_SINK &&
5772
+ !downstream_port->input) {
5773
+ mutex_unlock(&port->mgr->lock);
5774
+ return true;
5775
+ }
5776
+ }
5777
+ }
5778
+ mutex_unlock(&port->mgr->lock);
5779
+
5780
+ return false;
5781
+}
5782
+
5783
+/**
5784
+ * drm_dp_mst_dsc_aux_for_port() - Find the correct aux for DSC
5785
+ * @port: The port to check. A leaf of the MST tree with an attached display.
5786
+ *
5787
+ * Depending on the situation, DSC may be enabled via the endpoint aux,
5788
+ * the immediately upstream aux, or the connector's physical aux.
5789
+ *
5790
+ * This is both the correct aux to read DSC_CAPABILITY and the
5791
+ * correct aux to write DSC_ENABLED.
5792
+ *
5793
+ * This operation can be expensive (up to four aux reads), so
5794
+ * the caller should cache the return.
5795
+ *
5796
+ * Returns:
5797
+ * NULL if DSC cannot be enabled on this port, otherwise the aux device
5798
+ */
5799
+struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
5800
+{
5801
+ struct drm_dp_mst_port *immediate_upstream_port;
5802
+ struct drm_dp_mst_port *fec_port;
5803
+ struct drm_dp_desc desc = {};
5804
+ u8 endpoint_fec;
5805
+ u8 endpoint_dsc;
5806
+
5807
+ if (!port)
5808
+ return NULL;
5809
+
5810
+ if (port->parent->port_parent)
5811
+ immediate_upstream_port = port->parent->port_parent;
5812
+ else
5813
+ immediate_upstream_port = NULL;
5814
+
5815
+ fec_port = immediate_upstream_port;
5816
+ while (fec_port) {
5817
+ /*
5818
+ * Each physical link (i.e. not a virtual port) between the
5819
+ * output and the primary device must support FEC
5820
+ */
5821
+ if (!drm_dp_mst_is_virtual_dpcd(fec_port) &&
5822
+ !fec_port->fec_capable)
5823
+ return NULL;
5824
+
5825
+ fec_port = fec_port->parent->port_parent;
5826
+ }
5827
+
5828
+ /* DP-to-DP peer device */
5829
+ if (drm_dp_mst_is_virtual_dpcd(immediate_upstream_port)) {
5830
+ u8 upstream_dsc;
5831
+
5832
+ if (drm_dp_dpcd_read(&port->aux,
5833
+ DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1)
5834
+ return NULL;
5835
+ if (drm_dp_dpcd_read(&port->aux,
5836
+ DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1)
5837
+ return NULL;
5838
+ if (drm_dp_dpcd_read(&immediate_upstream_port->aux,
5839
+ DP_DSC_SUPPORT, &upstream_dsc, 1) != 1)
5840
+ return NULL;
5841
+
5842
+ /* Enpoint decompression with DP-to-DP peer device */
5843
+ if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) &&
5844
+ (endpoint_fec & DP_FEC_CAPABLE) &&
5845
+ (upstream_dsc & 0x2) /* DSC passthrough */)
5846
+ return &port->aux;
5847
+
5848
+ /* Virtual DPCD decompression with DP-to-DP peer device */
5849
+ return &immediate_upstream_port->aux;
5850
+ }
5851
+
5852
+ /* Virtual DPCD decompression with DP-to-HDMI or Virtual DP Sink */
5853
+ if (drm_dp_mst_is_virtual_dpcd(port))
5854
+ return &port->aux;
5855
+
5856
+ /*
5857
+ * Synaptics quirk
5858
+ * Applies to ports for which:
5859
+ * - Physical aux has Synaptics OUI
5860
+ * - DPv1.4 or higher
5861
+ * - Port is on primary branch device
5862
+ * - Not a VGA adapter (DP_DWN_STRM_PORT_TYPE_ANALOG)
5863
+ */
5864
+ if (drm_dp_read_desc(port->mgr->aux, &desc, true))
5865
+ return NULL;
5866
+
5867
+ if (drm_dp_has_quirk(&desc, 0,
5868
+ DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) &&
5869
+ port->mgr->dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14 &&
5870
+ port->parent == port->mgr->mst_primary) {
5871
+ u8 downstreamport;
5872
+
5873
+ if (drm_dp_dpcd_read(&port->aux, DP_DOWNSTREAMPORT_PRESENT,
5874
+ &downstreamport, 1) < 0)
5875
+ return NULL;
5876
+
5877
+ if ((downstreamport & DP_DWN_STRM_PORT_PRESENT) &&
5878
+ ((downstreamport & DP_DWN_STRM_PORT_TYPE_MASK)
5879
+ != DP_DWN_STRM_PORT_TYPE_ANALOG))
5880
+ return port->mgr->aux;
5881
+ }
5882
+
5883
+ /*
5884
+ * The check below verifies if the MST sink
5885
+ * connected to the GPU is capable of DSC -
5886
+ * therefore the endpoint needs to be
5887
+ * both DSC and FEC capable.
5888
+ */
5889
+ if (drm_dp_dpcd_read(&port->aux,
5890
+ DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1)
5891
+ return NULL;
5892
+ if (drm_dp_dpcd_read(&port->aux,
5893
+ DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1)
5894
+ return NULL;
5895
+ if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) &&
5896
+ (endpoint_fec & DP_FEC_CAPABLE))
5897
+ return &port->aux;
5898
+
5899
+ return NULL;
5900
+}
5901
+EXPORT_SYMBOL(drm_dp_mst_dsc_aux_for_port);