2 * Copyright © 2014 Red Hat
4 * Permission to use, copy, modify, distribute, and sell this software and its
5 * documentation for any purpose is hereby granted without fee, provided that
6 * the above copyright notice appear in all copies and that both that copyright
7 * notice and this permission notice appear in supporting documentation, and
8 * that the name of the copyright holders not be used in advertising or
9 * publicity pertaining to distribution of the software without specific,
10 * written prior permission. The copyright holders make no representations
11 * about the suitability of this software for any purpose. It is provided "as
12 * is" without express or implied warranty.
14 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
15 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
16 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
17 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
18 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
19 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
23 #include <linux/kernel.h>
24 #include <linux/delay.h>
25 #include <linux/init.h>
26 #include <linux/errno.h>
27 #include <linux/sched.h>
28 #include <linux/seq_file.h>
29 #include <linux/i2c.h>
30 #include <drm/drm_dp_mst_helper.h>
33 #include <drm/drm_fixed.h>
38 * These functions contain parts of the DisplayPort 1.2a MultiStream Transport
39 * protocol. The helpers contain a topology manager and bandwidth manager.
40 * The helpers encapsulate the sending and received of sideband msgs.
42 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr
*mgr
,
44 static int test_calc_pbn_mode(void);
46 static void drm_dp_put_port(struct drm_dp_mst_port
*port
);
48 static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr
*mgr
,
50 struct drm_dp_payload
*payload
);
52 static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr
*mgr
,
53 struct drm_dp_mst_port
*port
,
54 int offset
, int size
, u8
*bytes
);
56 static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr
*mgr
,
57 struct drm_dp_mst_branch
*mstb
);
58 static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr
*mgr
,
59 struct drm_dp_mst_branch
*mstb
,
60 struct drm_dp_mst_port
*port
);
61 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr
*mgr
,
64 static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux
*aux
);
65 static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux
*aux
);
66 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr
*mgr
);
67 /* sideband msg handling */
68 static u8
drm_dp_msg_header_crc4(const uint8_t *data
, size_t num_nibbles
)
73 int number_of_bits
= num_nibbles
* 4;
76 while (number_of_bits
!= 0) {
79 remainder
|= (data
[array_index
] & bitmask
) >> bitshift
;
87 if ((remainder
& 0x10) == 0x10)
92 while (number_of_bits
!= 0) {
95 if ((remainder
& 0x10) != 0)
102 static u8
drm_dp_msg_data_crc4(const uint8_t *data
, u8 number_of_bytes
)
107 int number_of_bits
= number_of_bytes
* 8;
110 while (number_of_bits
!= 0) {
113 remainder
|= (data
[array_index
] & bitmask
) >> bitshift
;
121 if ((remainder
& 0x100) == 0x100)
126 while (number_of_bits
!= 0) {
129 if ((remainder
& 0x100) != 0)
133 return remainder
& 0xff;
135 static inline u8
drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr
*hdr
)
138 size
+= (hdr
->lct
/ 2);
142 static void drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr
*hdr
,
148 buf
[idx
++] = ((hdr
->lct
& 0xf) << 4) | (hdr
->lcr
& 0xf);
149 for (i
= 0; i
< (hdr
->lct
/ 2); i
++)
150 buf
[idx
++] = hdr
->rad
[i
];
151 buf
[idx
++] = (hdr
->broadcast
<< 7) | (hdr
->path_msg
<< 6) |
152 (hdr
->msg_len
& 0x3f);
153 buf
[idx
++] = (hdr
->somt
<< 7) | (hdr
->eomt
<< 6) | (hdr
->seqno
<< 4);
155 crc4
= drm_dp_msg_header_crc4(buf
, (idx
* 2) - 1);
156 buf
[idx
- 1] |= (crc4
& 0xf);
161 static bool drm_dp_decode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr
*hdr
,
162 u8
*buf
, int buflen
, u8
*hdrlen
)
171 len
+= ((buf
[0] & 0xf0) >> 4) / 2;
174 crc4
= drm_dp_msg_header_crc4(buf
, (len
* 2) - 1);
176 if ((crc4
& 0xf) != (buf
[len
- 1] & 0xf)) {
177 DRM_DEBUG_KMS("crc4 mismatch 0x%x 0x%x\n", crc4
, buf
[len
- 1]);
181 hdr
->lct
= (buf
[0] & 0xf0) >> 4;
182 hdr
->lcr
= (buf
[0] & 0xf);
184 for (i
= 0; i
< (hdr
->lct
/ 2); i
++)
185 hdr
->rad
[i
] = buf
[idx
++];
186 hdr
->broadcast
= (buf
[idx
] >> 7) & 0x1;
187 hdr
->path_msg
= (buf
[idx
] >> 6) & 0x1;
188 hdr
->msg_len
= buf
[idx
] & 0x3f;
190 hdr
->somt
= (buf
[idx
] >> 7) & 0x1;
191 hdr
->eomt
= (buf
[idx
] >> 6) & 0x1;
192 hdr
->seqno
= (buf
[idx
] >> 4) & 0x1;
198 static void drm_dp_encode_sideband_req(struct drm_dp_sideband_msg_req_body
*req
,
199 struct drm_dp_sideband_msg_tx
*raw
)
204 buf
[idx
++] = req
->req_type
& 0x7f;
206 switch (req
->req_type
) {
207 case DP_ENUM_PATH_RESOURCES
:
208 buf
[idx
] = (req
->u
.port_num
.port_number
& 0xf) << 4;
211 case DP_ALLOCATE_PAYLOAD
:
212 buf
[idx
] = (req
->u
.allocate_payload
.port_number
& 0xf) << 4 |
213 (req
->u
.allocate_payload
.number_sdp_streams
& 0xf);
215 buf
[idx
] = (req
->u
.allocate_payload
.vcpi
& 0x7f);
217 buf
[idx
] = (req
->u
.allocate_payload
.pbn
>> 8);
219 buf
[idx
] = (req
->u
.allocate_payload
.pbn
& 0xff);
221 for (i
= 0; i
< req
->u
.allocate_payload
.number_sdp_streams
/ 2; i
++) {
222 buf
[idx
] = ((req
->u
.allocate_payload
.sdp_stream_sink
[i
* 2] & 0xf) << 4) |
223 (req
->u
.allocate_payload
.sdp_stream_sink
[i
* 2 + 1] & 0xf);
226 if (req
->u
.allocate_payload
.number_sdp_streams
& 1) {
227 i
= req
->u
.allocate_payload
.number_sdp_streams
- 1;
228 buf
[idx
] = (req
->u
.allocate_payload
.sdp_stream_sink
[i
] & 0xf) << 4;
232 case DP_QUERY_PAYLOAD
:
233 buf
[idx
] = (req
->u
.query_payload
.port_number
& 0xf) << 4;
235 buf
[idx
] = (req
->u
.query_payload
.vcpi
& 0x7f);
238 case DP_REMOTE_DPCD_READ
:
239 buf
[idx
] = (req
->u
.dpcd_read
.port_number
& 0xf) << 4;
240 buf
[idx
] |= ((req
->u
.dpcd_read
.dpcd_address
& 0xf0000) >> 16) & 0xf;
242 buf
[idx
] = (req
->u
.dpcd_read
.dpcd_address
& 0xff00) >> 8;
244 buf
[idx
] = (req
->u
.dpcd_read
.dpcd_address
& 0xff);
246 buf
[idx
] = (req
->u
.dpcd_read
.num_bytes
);
250 case DP_REMOTE_DPCD_WRITE
:
251 buf
[idx
] = (req
->u
.dpcd_write
.port_number
& 0xf) << 4;
252 buf
[idx
] |= ((req
->u
.dpcd_write
.dpcd_address
& 0xf0000) >> 16) & 0xf;
254 buf
[idx
] = (req
->u
.dpcd_write
.dpcd_address
& 0xff00) >> 8;
256 buf
[idx
] = (req
->u
.dpcd_write
.dpcd_address
& 0xff);
258 buf
[idx
] = (req
->u
.dpcd_write
.num_bytes
);
260 memcpy(&buf
[idx
], req
->u
.dpcd_write
.bytes
, req
->u
.dpcd_write
.num_bytes
);
261 idx
+= req
->u
.dpcd_write
.num_bytes
;
263 case DP_REMOTE_I2C_READ
:
264 buf
[idx
] = (req
->u
.i2c_read
.port_number
& 0xf) << 4;
265 buf
[idx
] |= (req
->u
.i2c_read
.num_transactions
& 0x3);
267 for (i
= 0; i
< (req
->u
.i2c_read
.num_transactions
& 0x3); i
++) {
268 buf
[idx
] = req
->u
.i2c_read
.transactions
[i
].i2c_dev_id
& 0x7f;
270 buf
[idx
] = req
->u
.i2c_read
.transactions
[i
].num_bytes
;
272 memcpy(&buf
[idx
], req
->u
.i2c_read
.transactions
[i
].bytes
, req
->u
.i2c_read
.transactions
[i
].num_bytes
);
273 idx
+= req
->u
.i2c_read
.transactions
[i
].num_bytes
;
275 buf
[idx
] = (req
->u
.i2c_read
.transactions
[i
].no_stop_bit
& 0x1) << 5;
276 buf
[idx
] |= (req
->u
.i2c_read
.transactions
[i
].i2c_transaction_delay
& 0xf);
279 buf
[idx
] = (req
->u
.i2c_read
.read_i2c_device_id
) & 0x7f;
281 buf
[idx
] = (req
->u
.i2c_read
.num_bytes_read
);
285 case DP_REMOTE_I2C_WRITE
:
286 buf
[idx
] = (req
->u
.i2c_write
.port_number
& 0xf) << 4;
288 buf
[idx
] = (req
->u
.i2c_write
.write_i2c_device_id
) & 0x7f;
290 buf
[idx
] = (req
->u
.i2c_write
.num_bytes
);
292 memcpy(&buf
[idx
], req
->u
.i2c_write
.bytes
, req
->u
.i2c_write
.num_bytes
);
293 idx
+= req
->u
.i2c_write
.num_bytes
;
299 static void drm_dp_crc_sideband_chunk_req(u8
*msg
, u8 len
)
302 crc4
= drm_dp_msg_data_crc4(msg
, len
);
306 static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body
*rep
,
307 struct drm_dp_sideband_msg_tx
*raw
)
312 buf
[idx
++] = (rep
->reply_type
& 0x1) << 7 | (rep
->req_type
& 0x7f);
317 /* this adds a chunk of msg to the builder to get the final msg */
318 static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx
*msg
,
319 u8
*replybuf
, u8 replybuflen
, bool hdr
)
326 struct drm_dp_sideband_msg_hdr recv_hdr
;
327 ret
= drm_dp_decode_sideband_msg_hdr(&recv_hdr
, replybuf
, replybuflen
, &hdrlen
);
329 print_hex_dump(KERN_DEBUG
, "failed hdr", DUMP_PREFIX_NONE
, 16, 1, replybuf
, replybuflen
, false);
333 /* get length contained in this portion */
334 msg
->curchunk_len
= recv_hdr
.msg_len
;
335 msg
->curchunk_hdrlen
= hdrlen
;
337 /* we have already gotten an somt - don't bother parsing */
338 if (recv_hdr
.somt
&& msg
->have_somt
)
342 memcpy(&msg
->initial_hdr
, &recv_hdr
, sizeof(struct drm_dp_sideband_msg_hdr
));
343 msg
->have_somt
= true;
346 msg
->have_eomt
= true;
348 /* copy the bytes for the remainder of this header chunk */
349 msg
->curchunk_idx
= min(msg
->curchunk_len
, (u8
)(replybuflen
- hdrlen
));
350 memcpy(&msg
->chunk
[0], replybuf
+ hdrlen
, msg
->curchunk_idx
);
352 memcpy(&msg
->chunk
[msg
->curchunk_idx
], replybuf
, replybuflen
);
353 msg
->curchunk_idx
+= replybuflen
;
356 if (msg
->curchunk_idx
>= msg
->curchunk_len
) {
358 crc4
= drm_dp_msg_data_crc4(msg
->chunk
, msg
->curchunk_len
- 1);
359 /* copy chunk into bigger msg */
360 memcpy(&msg
->msg
[msg
->curlen
], msg
->chunk
, msg
->curchunk_len
- 1);
361 msg
->curlen
+= msg
->curchunk_len
- 1;
366 static bool drm_dp_sideband_parse_link_address(struct drm_dp_sideband_msg_rx
*raw
,
367 struct drm_dp_sideband_msg_reply_body
*repmsg
)
371 memcpy(repmsg
->u
.link_addr
.guid
, &raw
->msg
[idx
], 16);
373 repmsg
->u
.link_addr
.nports
= raw
->msg
[idx
] & 0xf;
375 if (idx
> raw
->curlen
)
377 for (i
= 0; i
< repmsg
->u
.link_addr
.nports
; i
++) {
378 if (raw
->msg
[idx
] & 0x80)
379 repmsg
->u
.link_addr
.ports
[i
].input_port
= 1;
381 repmsg
->u
.link_addr
.ports
[i
].peer_device_type
= (raw
->msg
[idx
] >> 4) & 0x7;
382 repmsg
->u
.link_addr
.ports
[i
].port_number
= (raw
->msg
[idx
] & 0xf);
385 if (idx
> raw
->curlen
)
387 repmsg
->u
.link_addr
.ports
[i
].mcs
= (raw
->msg
[idx
] >> 7) & 0x1;
388 repmsg
->u
.link_addr
.ports
[i
].ddps
= (raw
->msg
[idx
] >> 6) & 0x1;
389 if (repmsg
->u
.link_addr
.ports
[i
].input_port
== 0)
390 repmsg
->u
.link_addr
.ports
[i
].legacy_device_plug_status
= (raw
->msg
[idx
] >> 5) & 0x1;
392 if (idx
> raw
->curlen
)
394 if (repmsg
->u
.link_addr
.ports
[i
].input_port
== 0) {
395 repmsg
->u
.link_addr
.ports
[i
].dpcd_revision
= (raw
->msg
[idx
]);
397 if (idx
> raw
->curlen
)
399 memcpy(repmsg
->u
.link_addr
.ports
[i
].peer_guid
, &raw
->msg
[idx
], 16);
401 if (idx
> raw
->curlen
)
403 repmsg
->u
.link_addr
.ports
[i
].num_sdp_streams
= (raw
->msg
[idx
] >> 4) & 0xf;
404 repmsg
->u
.link_addr
.ports
[i
].num_sdp_stream_sinks
= (raw
->msg
[idx
] & 0xf);
408 if (idx
> raw
->curlen
)
414 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx
, raw
->curlen
);
418 static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx
*raw
,
419 struct drm_dp_sideband_msg_reply_body
*repmsg
)
422 repmsg
->u
.remote_dpcd_read_ack
.port_number
= raw
->msg
[idx
] & 0xf;
424 if (idx
> raw
->curlen
)
426 repmsg
->u
.remote_dpcd_read_ack
.num_bytes
= raw
->msg
[idx
];
427 if (idx
> raw
->curlen
)
430 memcpy(repmsg
->u
.remote_dpcd_read_ack
.bytes
, &raw
->msg
[idx
], repmsg
->u
.remote_dpcd_read_ack
.num_bytes
);
433 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx
, raw
->curlen
);
437 static bool drm_dp_sideband_parse_remote_dpcd_write(struct drm_dp_sideband_msg_rx
*raw
,
438 struct drm_dp_sideband_msg_reply_body
*repmsg
)
441 repmsg
->u
.remote_dpcd_write_ack
.port_number
= raw
->msg
[idx
] & 0xf;
443 if (idx
> raw
->curlen
)
447 DRM_DEBUG_KMS("parse length fail %d %d\n", idx
, raw
->curlen
);
451 static bool drm_dp_sideband_parse_remote_i2c_read_ack(struct drm_dp_sideband_msg_rx
*raw
,
452 struct drm_dp_sideband_msg_reply_body
*repmsg
)
456 repmsg
->u
.remote_i2c_read_ack
.port_number
= (raw
->msg
[idx
] & 0xf);
458 if (idx
> raw
->curlen
)
460 repmsg
->u
.remote_i2c_read_ack
.num_bytes
= raw
->msg
[idx
];
463 memcpy(repmsg
->u
.remote_i2c_read_ack
.bytes
, &raw
->msg
[idx
], repmsg
->u
.remote_i2c_read_ack
.num_bytes
);
466 DRM_DEBUG_KMS("remote i2c reply parse length fail %d %d\n", idx
, raw
->curlen
);
470 static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband_msg_rx
*raw
,
471 struct drm_dp_sideband_msg_reply_body
*repmsg
)
474 repmsg
->u
.path_resources
.port_number
= (raw
->msg
[idx
] >> 4) & 0xf;
476 if (idx
> raw
->curlen
)
478 repmsg
->u
.path_resources
.full_payload_bw_number
= (raw
->msg
[idx
] << 8) | (raw
->msg
[idx
+1]);
480 if (idx
> raw
->curlen
)
482 repmsg
->u
.path_resources
.avail_payload_bw_number
= (raw
->msg
[idx
] << 8) | (raw
->msg
[idx
+1]);
484 if (idx
> raw
->curlen
)
488 DRM_DEBUG_KMS("enum resource parse length fail %d %d\n", idx
, raw
->curlen
);
492 static bool drm_dp_sideband_parse_allocate_payload_ack(struct drm_dp_sideband_msg_rx
*raw
,
493 struct drm_dp_sideband_msg_reply_body
*repmsg
)
496 repmsg
->u
.allocate_payload
.port_number
= (raw
->msg
[idx
] >> 4) & 0xf;
498 if (idx
> raw
->curlen
)
500 repmsg
->u
.allocate_payload
.vcpi
= raw
->msg
[idx
];
502 if (idx
> raw
->curlen
)
504 repmsg
->u
.allocate_payload
.allocated_pbn
= (raw
->msg
[idx
] << 8) | (raw
->msg
[idx
+1]);
506 if (idx
> raw
->curlen
)
510 DRM_DEBUG_KMS("allocate payload parse length fail %d %d\n", idx
, raw
->curlen
);
514 static bool drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_rx
*raw
,
515 struct drm_dp_sideband_msg_reply_body
*repmsg
)
518 repmsg
->u
.query_payload
.port_number
= (raw
->msg
[idx
] >> 4) & 0xf;
520 if (idx
> raw
->curlen
)
522 repmsg
->u
.query_payload
.allocated_pbn
= (raw
->msg
[idx
] << 8) | (raw
->msg
[idx
+ 1]);
524 if (idx
> raw
->curlen
)
528 DRM_DEBUG_KMS("query payload parse length fail %d %d\n", idx
, raw
->curlen
);
532 static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx
*raw
,
533 struct drm_dp_sideband_msg_reply_body
*msg
)
535 memset(msg
, 0, sizeof(*msg
));
536 msg
->reply_type
= (raw
->msg
[0] & 0x80) >> 7;
537 msg
->req_type
= (raw
->msg
[0] & 0x7f);
539 if (msg
->reply_type
) {
540 memcpy(msg
->u
.nak
.guid
, &raw
->msg
[1], 16);
541 msg
->u
.nak
.reason
= raw
->msg
[17];
542 msg
->u
.nak
.nak_data
= raw
->msg
[18];
546 switch (msg
->req_type
) {
547 case DP_LINK_ADDRESS
:
548 return drm_dp_sideband_parse_link_address(raw
, msg
);
549 case DP_QUERY_PAYLOAD
:
550 return drm_dp_sideband_parse_query_payload_ack(raw
, msg
);
551 case DP_REMOTE_DPCD_READ
:
552 return drm_dp_sideband_parse_remote_dpcd_read(raw
, msg
);
553 case DP_REMOTE_DPCD_WRITE
:
554 return drm_dp_sideband_parse_remote_dpcd_write(raw
, msg
);
555 case DP_REMOTE_I2C_READ
:
556 return drm_dp_sideband_parse_remote_i2c_read_ack(raw
, msg
);
557 case DP_ENUM_PATH_RESOURCES
:
558 return drm_dp_sideband_parse_enum_path_resources_ack(raw
, msg
);
559 case DP_ALLOCATE_PAYLOAD
:
560 return drm_dp_sideband_parse_allocate_payload_ack(raw
, msg
);
562 DRM_ERROR("Got unknown reply 0x%02x\n", msg
->req_type
);
567 static bool drm_dp_sideband_parse_connection_status_notify(struct drm_dp_sideband_msg_rx
*raw
,
568 struct drm_dp_sideband_msg_req_body
*msg
)
572 msg
->u
.conn_stat
.port_number
= (raw
->msg
[idx
] & 0xf0) >> 4;
574 if (idx
> raw
->curlen
)
577 memcpy(msg
->u
.conn_stat
.guid
, &raw
->msg
[idx
], 16);
579 if (idx
> raw
->curlen
)
582 msg
->u
.conn_stat
.legacy_device_plug_status
= (raw
->msg
[idx
] >> 6) & 0x1;
583 msg
->u
.conn_stat
.displayport_device_plug_status
= (raw
->msg
[idx
] >> 5) & 0x1;
584 msg
->u
.conn_stat
.message_capability_status
= (raw
->msg
[idx
] >> 4) & 0x1;
585 msg
->u
.conn_stat
.input_port
= (raw
->msg
[idx
] >> 3) & 0x1;
586 msg
->u
.conn_stat
.peer_device_type
= (raw
->msg
[idx
] & 0x7);
590 DRM_DEBUG_KMS("connection status reply parse length fail %d %d\n", idx
, raw
->curlen
);
594 static bool drm_dp_sideband_parse_resource_status_notify(struct drm_dp_sideband_msg_rx
*raw
,
595 struct drm_dp_sideband_msg_req_body
*msg
)
599 msg
->u
.resource_stat
.port_number
= (raw
->msg
[idx
] & 0xf0) >> 4;
601 if (idx
> raw
->curlen
)
604 memcpy(msg
->u
.resource_stat
.guid
, &raw
->msg
[idx
], 16);
606 if (idx
> raw
->curlen
)
609 msg
->u
.resource_stat
.available_pbn
= (raw
->msg
[idx
] << 8) | (raw
->msg
[idx
+ 1]);
613 DRM_DEBUG_KMS("resource status reply parse length fail %d %d\n", idx
, raw
->curlen
);
617 static bool drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx
*raw
,
618 struct drm_dp_sideband_msg_req_body
*msg
)
620 memset(msg
, 0, sizeof(*msg
));
621 msg
->req_type
= (raw
->msg
[0] & 0x7f);
623 switch (msg
->req_type
) {
624 case DP_CONNECTION_STATUS_NOTIFY
:
625 return drm_dp_sideband_parse_connection_status_notify(raw
, msg
);
626 case DP_RESOURCE_STATUS_NOTIFY
:
627 return drm_dp_sideband_parse_resource_status_notify(raw
, msg
);
629 DRM_ERROR("Got unknown request 0x%02x\n", msg
->req_type
);
634 static int build_dpcd_write(struct drm_dp_sideband_msg_tx
*msg
, u8 port_num
, u32 offset
, u8 num_bytes
, u8
*bytes
)
636 struct drm_dp_sideband_msg_req_body req
;
638 req
.req_type
= DP_REMOTE_DPCD_WRITE
;
639 req
.u
.dpcd_write
.port_number
= port_num
;
640 req
.u
.dpcd_write
.dpcd_address
= offset
;
641 req
.u
.dpcd_write
.num_bytes
= num_bytes
;
642 req
.u
.dpcd_write
.bytes
= bytes
;
643 drm_dp_encode_sideband_req(&req
, msg
);
648 static int build_link_address(struct drm_dp_sideband_msg_tx
*msg
)
650 struct drm_dp_sideband_msg_req_body req
;
652 req
.req_type
= DP_LINK_ADDRESS
;
653 drm_dp_encode_sideband_req(&req
, msg
);
657 static int build_enum_path_resources(struct drm_dp_sideband_msg_tx
*msg
, int port_num
)
659 struct drm_dp_sideband_msg_req_body req
;
661 req
.req_type
= DP_ENUM_PATH_RESOURCES
;
662 req
.u
.port_num
.port_number
= port_num
;
663 drm_dp_encode_sideband_req(&req
, msg
);
664 msg
->path_msg
= true;
668 static int build_allocate_payload(struct drm_dp_sideband_msg_tx
*msg
, int port_num
,
669 u8 vcpi
, uint16_t pbn
)
671 struct drm_dp_sideband_msg_req_body req
;
672 memset(&req
, 0, sizeof(req
));
673 req
.req_type
= DP_ALLOCATE_PAYLOAD
;
674 req
.u
.allocate_payload
.port_number
= port_num
;
675 req
.u
.allocate_payload
.vcpi
= vcpi
;
676 req
.u
.allocate_payload
.pbn
= pbn
;
677 drm_dp_encode_sideband_req(&req
, msg
);
678 msg
->path_msg
= true;
682 static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr
*mgr
,
683 struct drm_dp_vcpi
*vcpi
)
687 mutex_lock(&mgr
->payload_lock
);
688 ret
= find_first_zero_bit(&mgr
->payload_mask
, mgr
->max_payloads
+ 1);
689 if (ret
> mgr
->max_payloads
) {
691 DRM_DEBUG_KMS("out of payload ids %d\n", ret
);
695 vcpi_ret
= find_first_zero_bit(&mgr
->vcpi_mask
, mgr
->max_payloads
+ 1);
696 if (vcpi_ret
> mgr
->max_payloads
) {
698 DRM_DEBUG_KMS("out of vcpi ids %d\n", ret
);
702 set_bit(ret
, &mgr
->payload_mask
);
703 set_bit(vcpi_ret
, &mgr
->vcpi_mask
);
704 vcpi
->vcpi
= vcpi_ret
+ 1;
705 mgr
->proposed_vcpis
[ret
- 1] = vcpi
;
707 mutex_unlock(&mgr
->payload_lock
);
711 static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr
*mgr
,
718 mutex_lock(&mgr
->payload_lock
);
719 DRM_DEBUG_KMS("putting payload %d\n", vcpi
);
720 clear_bit(vcpi
- 1, &mgr
->vcpi_mask
);
722 for (i
= 0; i
< mgr
->max_payloads
; i
++) {
723 if (mgr
->proposed_vcpis
[i
])
724 if (mgr
->proposed_vcpis
[i
]->vcpi
== vcpi
) {
725 mgr
->proposed_vcpis
[i
] = NULL
;
726 clear_bit(i
+ 1, &mgr
->payload_mask
);
729 mutex_unlock(&mgr
->payload_lock
);
732 static bool check_txmsg_state(struct drm_dp_mst_topology_mgr
*mgr
,
733 struct drm_dp_sideband_msg_tx
*txmsg
)
736 mutex_lock(&mgr
->qlock
);
737 ret
= (txmsg
->state
== DRM_DP_SIDEBAND_TX_RX
||
738 txmsg
->state
== DRM_DP_SIDEBAND_TX_TIMEOUT
);
739 mutex_unlock(&mgr
->qlock
);
743 static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch
*mstb
,
744 struct drm_dp_sideband_msg_tx
*txmsg
)
746 struct drm_dp_mst_topology_mgr
*mgr
= mstb
->mgr
;
749 ret
= wait_event_timeout(mgr
->tx_waitq
,
750 check_txmsg_state(mgr
, txmsg
),
752 mutex_lock(&mstb
->mgr
->qlock
);
754 if (txmsg
->state
== DRM_DP_SIDEBAND_TX_TIMEOUT
) {
759 DRM_DEBUG_KMS("timedout msg send %p %d %d\n", txmsg
, txmsg
->state
, txmsg
->seqno
);
761 /* dump some state */
765 if (txmsg
->state
== DRM_DP_SIDEBAND_TX_QUEUED
||
766 txmsg
->state
== DRM_DP_SIDEBAND_TX_START_SEND
) {
767 list_del(&txmsg
->next
);
770 if (txmsg
->state
== DRM_DP_SIDEBAND_TX_START_SEND
||
771 txmsg
->state
== DRM_DP_SIDEBAND_TX_SENT
) {
772 mstb
->tx_slots
[txmsg
->seqno
] = NULL
;
776 mutex_unlock(&mgr
->qlock
);
781 static struct drm_dp_mst_branch
*drm_dp_add_mst_branch_device(u8 lct
, u8
*rad
)
783 struct drm_dp_mst_branch
*mstb
;
785 mstb
= kzalloc(sizeof(*mstb
), GFP_KERNEL
);
791 memcpy(mstb
->rad
, rad
, lct
/ 2);
792 INIT_LIST_HEAD(&mstb
->ports
);
793 kref_init(&mstb
->kref
);
797 static void drm_dp_destroy_mst_branch_device(struct kref
*kref
)
799 struct drm_dp_mst_branch
*mstb
= container_of(kref
, struct drm_dp_mst_branch
, kref
);
800 struct drm_dp_mst_port
*port
, *tmp
;
801 bool wake_tx
= false;
803 cancel_work_sync(&mstb
->mgr
->work
);
806 * destroy all ports - don't need lock
807 * as there are no more references to the mst branch
808 * device at this point.
810 list_for_each_entry_safe(port
, tmp
, &mstb
->ports
, next
) {
811 list_del(&port
->next
);
812 drm_dp_put_port(port
);
815 /* drop any tx slots msg */
816 mutex_lock(&mstb
->mgr
->qlock
);
817 if (mstb
->tx_slots
[0]) {
818 mstb
->tx_slots
[0]->state
= DRM_DP_SIDEBAND_TX_TIMEOUT
;
819 mstb
->tx_slots
[0] = NULL
;
822 if (mstb
->tx_slots
[1]) {
823 mstb
->tx_slots
[1]->state
= DRM_DP_SIDEBAND_TX_TIMEOUT
;
824 mstb
->tx_slots
[1] = NULL
;
827 mutex_unlock(&mstb
->mgr
->qlock
);
830 wake_up(&mstb
->mgr
->tx_waitq
);
834 static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch
*mstb
)
836 kref_put(&mstb
->kref
, drm_dp_destroy_mst_branch_device
);
840 static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port
*port
, int old_pdt
)
842 struct drm_dp_mst_branch
*mstb
;
845 case DP_PEER_DEVICE_DP_LEGACY_CONV
:
846 case DP_PEER_DEVICE_SST_SINK
:
847 /* remove i2c over sideband */
848 drm_dp_mst_unregister_i2c_bus(&port
->aux
);
850 case DP_PEER_DEVICE_MST_BRANCHING
:
853 drm_dp_put_mst_branch_device(mstb
);
858 static void drm_dp_destroy_port(struct kref
*kref
)
860 struct drm_dp_mst_port
*port
= container_of(kref
, struct drm_dp_mst_port
, kref
);
861 struct drm_dp_mst_topology_mgr
*mgr
= port
->mgr
;
863 port
->vcpi
.num_slots
= 0;
865 (*port
->mgr
->cbs
->destroy_connector
)(mgr
, port
->connector
);
866 drm_dp_port_teardown_pdt(port
, port
->pdt
);
868 if (!port
->input
&& port
->vcpi
.vcpi
> 0)
869 drm_dp_mst_put_payload_id(mgr
, port
->vcpi
.vcpi
);
873 (*mgr
->cbs
->hotplug
)(mgr
);
876 static void drm_dp_put_port(struct drm_dp_mst_port
*port
)
878 kref_put(&port
->kref
, drm_dp_destroy_port
);
881 static struct drm_dp_mst_branch
*drm_dp_mst_get_validated_mstb_ref_locked(struct drm_dp_mst_branch
*mstb
, struct drm_dp_mst_branch
*to_find
)
883 struct drm_dp_mst_port
*port
;
884 struct drm_dp_mst_branch
*rmstb
;
885 if (to_find
== mstb
) {
886 kref_get(&mstb
->kref
);
889 list_for_each_entry(port
, &mstb
->ports
, next
) {
891 rmstb
= drm_dp_mst_get_validated_mstb_ref_locked(port
->mstb
, to_find
);
899 static struct drm_dp_mst_branch
*drm_dp_get_validated_mstb_ref(struct drm_dp_mst_topology_mgr
*mgr
, struct drm_dp_mst_branch
*mstb
)
901 struct drm_dp_mst_branch
*rmstb
= NULL
;
902 mutex_lock(&mgr
->lock
);
903 if (mgr
->mst_primary
)
904 rmstb
= drm_dp_mst_get_validated_mstb_ref_locked(mgr
->mst_primary
, mstb
);
905 mutex_unlock(&mgr
->lock
);
909 static struct drm_dp_mst_port
*drm_dp_mst_get_port_ref_locked(struct drm_dp_mst_branch
*mstb
, struct drm_dp_mst_port
*to_find
)
911 struct drm_dp_mst_port
*port
, *mport
;
913 list_for_each_entry(port
, &mstb
->ports
, next
) {
914 if (port
== to_find
) {
915 kref_get(&port
->kref
);
919 mport
= drm_dp_mst_get_port_ref_locked(port
->mstb
, to_find
);
927 static struct drm_dp_mst_port
*drm_dp_get_validated_port_ref(struct drm_dp_mst_topology_mgr
*mgr
, struct drm_dp_mst_port
*port
)
929 struct drm_dp_mst_port
*rport
= NULL
;
930 mutex_lock(&mgr
->lock
);
931 if (mgr
->mst_primary
)
932 rport
= drm_dp_mst_get_port_ref_locked(mgr
->mst_primary
, port
);
933 mutex_unlock(&mgr
->lock
);
937 static struct drm_dp_mst_port
*drm_dp_get_port(struct drm_dp_mst_branch
*mstb
, u8 port_num
)
939 struct drm_dp_mst_port
*port
;
941 list_for_each_entry(port
, &mstb
->ports
, next
) {
942 if (port
->port_num
== port_num
) {
943 kref_get(&port
->kref
);
952 * calculate a new RAD for this MST branch device
953 * if parent has an LCT of 2 then it has 1 nibble of RAD,
954 * if parent has an LCT of 3 then it has 2 nibbles of RAD,
956 static u8
drm_dp_calculate_rad(struct drm_dp_mst_port
*port
,
959 int lct
= port
->parent
->lct
;
963 memcpy(rad
, port
->parent
->rad
, idx
);
964 shift
= (lct
% 2) ? 4 : 0;
968 rad
[idx
] |= port
->port_num
<< shift
;
973 * return sends link address for new mstb
975 static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port
*port
)
979 bool send_link
= false;
981 case DP_PEER_DEVICE_DP_LEGACY_CONV
:
982 case DP_PEER_DEVICE_SST_SINK
:
983 /* add i2c over sideband */
984 ret
= drm_dp_mst_register_i2c_bus(&port
->aux
);
986 case DP_PEER_DEVICE_MST_BRANCHING
:
987 lct
= drm_dp_calculate_rad(port
, rad
);
989 port
->mstb
= drm_dp_add_mst_branch_device(lct
, rad
);
990 port
->mstb
->mgr
= port
->mgr
;
991 port
->mstb
->port_parent
= port
;
999 static void drm_dp_check_port_guid(struct drm_dp_mst_branch
*mstb
,
1000 struct drm_dp_mst_port
*port
)
1003 if (port
->dpcd_rev
>= 0x12) {
1004 port
->guid_valid
= drm_dp_validate_guid(mstb
->mgr
, port
->guid
);
1005 if (!port
->guid_valid
) {
1006 ret
= drm_dp_send_dpcd_write(mstb
->mgr
,
1010 port
->guid_valid
= true;
1015 static void build_mst_prop_path(struct drm_dp_mst_port
*port
,
1016 struct drm_dp_mst_branch
*mstb
,
1018 size_t proppath_size
)
1022 snprintf(proppath
, proppath_size
, "mst:%d", mstb
->mgr
->conn_base_id
);
1023 for (i
= 0; i
< (mstb
->lct
- 1); i
++) {
1024 int shift
= (i
% 2) ? 0 : 4;
1025 int port_num
= mstb
->rad
[i
/ 2] >> shift
;
1026 snprintf(temp
, sizeof(temp
), "-%d", port_num
);
1027 strlcat(proppath
, temp
, proppath_size
);
1029 snprintf(temp
, sizeof(temp
), "-%d", port
->port_num
);
1030 strlcat(proppath
, temp
, proppath_size
);
1033 static void drm_dp_add_port(struct drm_dp_mst_branch
*mstb
,
1035 struct drm_dp_link_addr_reply_port
*port_msg
)
1037 struct drm_dp_mst_port
*port
;
1039 bool created
= false;
1042 port
= drm_dp_get_port(mstb
, port_msg
->port_number
);
1044 port
= kzalloc(sizeof(*port
), GFP_KERNEL
);
1047 kref_init(&port
->kref
);
1048 port
->parent
= mstb
;
1049 port
->port_num
= port_msg
->port_number
;
1050 port
->mgr
= mstb
->mgr
;
1051 port
->aux
.name
= "DPMST";
1052 port
->aux
.dev
= dev
;
1055 old_pdt
= port
->pdt
;
1056 old_ddps
= port
->ddps
;
1059 port
->pdt
= port_msg
->peer_device_type
;
1060 port
->input
= port_msg
->input_port
;
1061 port
->mcs
= port_msg
->mcs
;
1062 port
->ddps
= port_msg
->ddps
;
1063 port
->ldps
= port_msg
->legacy_device_plug_status
;
1064 port
->dpcd_rev
= port_msg
->dpcd_revision
;
1065 port
->num_sdp_streams
= port_msg
->num_sdp_streams
;
1066 port
->num_sdp_stream_sinks
= port_msg
->num_sdp_stream_sinks
;
1067 memcpy(port
->guid
, port_msg
->peer_guid
, 16);
1069 /* manage mstb port lists with mgr lock - take a reference
1072 mutex_lock(&mstb
->mgr
->lock
);
1073 kref_get(&port
->kref
);
1074 list_add(&port
->next
, &mstb
->ports
);
1075 mutex_unlock(&mstb
->mgr
->lock
);
1078 if (old_ddps
!= port
->ddps
) {
1080 drm_dp_check_port_guid(mstb
, port
);
1082 drm_dp_send_enum_path_resources(mstb
->mgr
, mstb
, port
);
1084 port
->guid_valid
= false;
1085 port
->available_pbn
= 0;
1089 if (old_pdt
!= port
->pdt
&& !port
->input
) {
1090 drm_dp_port_teardown_pdt(port
, old_pdt
);
1092 ret
= drm_dp_port_setup_pdt(port
);
1094 drm_dp_send_link_address(mstb
->mgr
, port
->mstb
);
1095 port
->mstb
->link_address_sent
= true;
1099 if (created
&& !port
->input
) {
1101 build_mst_prop_path(port
, mstb
, proppath
, sizeof(proppath
));
1102 port
->connector
= (*mstb
->mgr
->cbs
->add_connector
)(mstb
->mgr
, port
, proppath
);
1105 /* put reference to this port */
1106 drm_dp_put_port(port
);
1109 static void drm_dp_update_port(struct drm_dp_mst_branch
*mstb
,
1110 struct drm_dp_connection_status_notify
*conn_stat
)
1112 struct drm_dp_mst_port
*port
;
1115 bool dowork
= false;
1116 port
= drm_dp_get_port(mstb
, conn_stat
->port_number
);
1120 old_ddps
= port
->ddps
;
1121 old_pdt
= port
->pdt
;
1122 port
->pdt
= conn_stat
->peer_device_type
;
1123 port
->mcs
= conn_stat
->message_capability_status
;
1124 port
->ldps
= conn_stat
->legacy_device_plug_status
;
1125 port
->ddps
= conn_stat
->displayport_device_plug_status
;
1127 if (old_ddps
!= port
->ddps
) {
1129 drm_dp_check_port_guid(mstb
, port
);
1132 port
->guid_valid
= false;
1133 port
->available_pbn
= 0;
1136 if (old_pdt
!= port
->pdt
&& !port
->input
) {
1137 drm_dp_port_teardown_pdt(port
, old_pdt
);
1139 if (drm_dp_port_setup_pdt(port
))
1143 drm_dp_put_port(port
);
1145 queue_work(system_long_wq
, &mstb
->mgr
->work
);
1149 static struct drm_dp_mst_branch
*drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr
*mgr
,
1152 struct drm_dp_mst_branch
*mstb
;
1153 struct drm_dp_mst_port
*port
;
1155 /* find the port by iterating down */
1156 mstb
= mgr
->mst_primary
;
1158 for (i
= 0; i
< lct
- 1; i
++) {
1159 int shift
= (i
% 2) ? 0 : 4;
1160 int port_num
= rad
[i
/ 2] >> shift
;
1162 list_for_each_entry(port
, &mstb
->ports
, next
) {
1163 if (port
->port_num
== port_num
) {
1165 DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x\n", lct
, rad
[0]);
1174 kref_get(&mstb
->kref
);
1178 static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr
*mgr
,
1179 struct drm_dp_mst_branch
*mstb
)
1181 struct drm_dp_mst_port
*port
;
1183 if (!mstb
->link_address_sent
) {
1184 drm_dp_send_link_address(mgr
, mstb
);
1185 mstb
->link_address_sent
= true;
1187 list_for_each_entry(port
, &mstb
->ports
, next
) {
1194 if (!port
->available_pbn
)
1195 drm_dp_send_enum_path_resources(mgr
, mstb
, port
);
1198 drm_dp_check_and_send_link_address(mgr
, port
->mstb
);
1202 static void drm_dp_mst_link_probe_work(struct work_struct
*work
)
1204 struct drm_dp_mst_topology_mgr
*mgr
= container_of(work
, struct drm_dp_mst_topology_mgr
, work
);
1206 drm_dp_check_and_send_link_address(mgr
, mgr
->mst_primary
);
1210 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr
*mgr
,
1213 static u8 zero_guid
[16];
1215 if (!memcmp(guid
, zero_guid
, 16)) {
1216 u64 salt
= get_jiffies_64();
1217 memcpy(&guid
[0], &salt
, sizeof(u64
));
1218 memcpy(&guid
[8], &salt
, sizeof(u64
));
1225 static int build_dpcd_read(struct drm_dp_sideband_msg_tx
*msg
, u8 port_num
, u32 offset
, u8 num_bytes
)
1227 struct drm_dp_sideband_msg_req_body req
;
1229 req
.req_type
= DP_REMOTE_DPCD_READ
;
1230 req
.u
.dpcd_read
.port_number
= port_num
;
1231 req
.u
.dpcd_read
.dpcd_address
= offset
;
1232 req
.u
.dpcd_read
.num_bytes
= num_bytes
;
1233 drm_dp_encode_sideband_req(&req
, msg
);
1239 static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr
*mgr
,
1240 bool up
, u8
*msg
, int len
)
1243 int regbase
= up
? DP_SIDEBAND_MSG_UP_REP_BASE
: DP_SIDEBAND_MSG_DOWN_REQ_BASE
;
1244 int tosend
, total
, offset
;
1251 tosend
= min3(mgr
->max_dpcd_transaction_bytes
, 16, total
);
1253 ret
= drm_dp_dpcd_write(mgr
->aux
, regbase
+ offset
,
1256 if (ret
!= tosend
) {
1257 if (ret
== -EIO
&& retries
< 5) {
1261 DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend
, ret
);
1268 } while (total
> 0);
1272 static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr
*hdr
,
1273 struct drm_dp_sideband_msg_tx
*txmsg
)
1275 struct drm_dp_mst_branch
*mstb
= txmsg
->dst
;
1277 /* both msg slots are full */
1278 if (txmsg
->seqno
== -1) {
1279 if (mstb
->tx_slots
[0] && mstb
->tx_slots
[1]) {
1280 DRM_DEBUG_KMS("%s: failed to find slot\n", __func__
);
1283 if (mstb
->tx_slots
[0] == NULL
&& mstb
->tx_slots
[1] == NULL
) {
1284 txmsg
->seqno
= mstb
->last_seqno
;
1285 mstb
->last_seqno
^= 1;
1286 } else if (mstb
->tx_slots
[0] == NULL
)
1290 mstb
->tx_slots
[txmsg
->seqno
] = txmsg
;
1293 hdr
->path_msg
= txmsg
->path_msg
;
1294 hdr
->lct
= mstb
->lct
;
1295 hdr
->lcr
= mstb
->lct
- 1;
1297 memcpy(hdr
->rad
, mstb
->rad
, mstb
->lct
/ 2);
1298 hdr
->seqno
= txmsg
->seqno
;
1302 * process a single block of the next message in the sideband queue
1304 static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr
*mgr
,
1305 struct drm_dp_sideband_msg_tx
*txmsg
,
1309 struct drm_dp_sideband_msg_hdr hdr
;
1310 int len
, space
, idx
, tosend
;
1313 memset(&hdr
, 0, sizeof(struct drm_dp_sideband_msg_hdr
));
1315 if (txmsg
->state
== DRM_DP_SIDEBAND_TX_QUEUED
) {
1317 txmsg
->state
= DRM_DP_SIDEBAND_TX_START_SEND
;
1320 /* make hdr from dst mst - for replies use seqno
1321 otherwise assign one */
1322 ret
= set_hdr_from_dst_qlock(&hdr
, txmsg
);
1326 /* amount left to send in this message */
1327 len
= txmsg
->cur_len
- txmsg
->cur_offset
;
1329 /* 48 - sideband msg size - 1 byte for data CRC, x header bytes */
1330 space
= 48 - 1 - drm_dp_calc_sb_hdr_size(&hdr
);
1332 tosend
= min(len
, space
);
1333 if (len
== txmsg
->cur_len
)
1339 hdr
.msg_len
= tosend
+ 1;
1340 drm_dp_encode_sideband_msg_hdr(&hdr
, chunk
, &idx
);
1341 memcpy(&chunk
[idx
], &txmsg
->msg
[txmsg
->cur_offset
], tosend
);
1342 /* add crc at end */
1343 drm_dp_crc_sideband_chunk_req(&chunk
[idx
], tosend
);
1346 ret
= drm_dp_send_sideband_msg(mgr
, up
, chunk
, idx
);
1348 DRM_DEBUG_KMS("sideband msg failed to send\n");
1352 txmsg
->cur_offset
+= tosend
;
1353 if (txmsg
->cur_offset
== txmsg
->cur_len
) {
1354 txmsg
->state
= DRM_DP_SIDEBAND_TX_SENT
;
1360 /* must be called holding qlock */
1361 static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr
*mgr
)
1363 struct drm_dp_sideband_msg_tx
*txmsg
;
1366 /* construct a chunk from the first msg in the tx_msg queue */
1367 if (list_empty(&mgr
->tx_msg_downq
)) {
1368 mgr
->tx_down_in_progress
= false;
1371 mgr
->tx_down_in_progress
= true;
1373 txmsg
= list_first_entry(&mgr
->tx_msg_downq
, struct drm_dp_sideband_msg_tx
, next
);
1374 ret
= process_single_tx_qlock(mgr
, txmsg
, false);
1376 /* txmsg is sent it should be in the slots now */
1377 list_del(&txmsg
->next
);
1379 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret
);
1380 list_del(&txmsg
->next
);
1381 if (txmsg
->seqno
!= -1)
1382 txmsg
->dst
->tx_slots
[txmsg
->seqno
] = NULL
;
1383 txmsg
->state
= DRM_DP_SIDEBAND_TX_TIMEOUT
;
1384 wake_up(&mgr
->tx_waitq
);
1386 if (list_empty(&mgr
->tx_msg_downq
)) {
1387 mgr
->tx_down_in_progress
= false;
1392 /* called holding qlock */
1393 static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr
*mgr
)
1395 struct drm_dp_sideband_msg_tx
*txmsg
;
1398 /* construct a chunk from the first msg in the tx_msg queue */
1399 if (list_empty(&mgr
->tx_msg_upq
)) {
1400 mgr
->tx_up_in_progress
= false;
1404 txmsg
= list_first_entry(&mgr
->tx_msg_upq
, struct drm_dp_sideband_msg_tx
, next
);
1405 ret
= process_single_tx_qlock(mgr
, txmsg
, true);
1407 /* up txmsgs aren't put in slots - so free after we send it */
1408 list_del(&txmsg
->next
);
1411 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret
);
1412 mgr
->tx_up_in_progress
= true;
1415 static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr
*mgr
,
1416 struct drm_dp_sideband_msg_tx
*txmsg
)
1418 mutex_lock(&mgr
->qlock
);
1419 list_add_tail(&txmsg
->next
, &mgr
->tx_msg_downq
);
1420 if (!mgr
->tx_down_in_progress
)
1421 process_single_down_tx_qlock(mgr
);
1422 mutex_unlock(&mgr
->qlock
);
1425 static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr
*mgr
,
1426 struct drm_dp_mst_branch
*mstb
)
1429 struct drm_dp_sideband_msg_tx
*txmsg
;
1432 txmsg
= kzalloc(sizeof(*txmsg
), GFP_KERNEL
);
1437 len
= build_link_address(txmsg
);
1439 drm_dp_queue_down_tx(mgr
, txmsg
);
1441 ret
= drm_dp_mst_wait_tx_reply(mstb
, txmsg
);
1445 if (txmsg
->reply
.reply_type
== 1)
1446 DRM_DEBUG_KMS("link address nak received\n");
1448 DRM_DEBUG_KMS("link address reply: %d\n", txmsg
->reply
.u
.link_addr
.nports
);
1449 for (i
= 0; i
< txmsg
->reply
.u
.link_addr
.nports
; i
++) {
1450 DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n", i
,
1451 txmsg
->reply
.u
.link_addr
.ports
[i
].input_port
,
1452 txmsg
->reply
.u
.link_addr
.ports
[i
].peer_device_type
,
1453 txmsg
->reply
.u
.link_addr
.ports
[i
].port_number
,
1454 txmsg
->reply
.u
.link_addr
.ports
[i
].dpcd_revision
,
1455 txmsg
->reply
.u
.link_addr
.ports
[i
].mcs
,
1456 txmsg
->reply
.u
.link_addr
.ports
[i
].ddps
,
1457 txmsg
->reply
.u
.link_addr
.ports
[i
].legacy_device_plug_status
,
1458 txmsg
->reply
.u
.link_addr
.ports
[i
].num_sdp_streams
,
1459 txmsg
->reply
.u
.link_addr
.ports
[i
].num_sdp_stream_sinks
);
1461 for (i
= 0; i
< txmsg
->reply
.u
.link_addr
.nports
; i
++) {
1462 drm_dp_add_port(mstb
, mgr
->dev
, &txmsg
->reply
.u
.link_addr
.ports
[i
]);
1464 (*mgr
->cbs
->hotplug
)(mgr
);
1467 DRM_DEBUG_KMS("link address failed %d\n", ret
);
1473 static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr
*mgr
,
1474 struct drm_dp_mst_branch
*mstb
,
1475 struct drm_dp_mst_port
*port
)
1478 struct drm_dp_sideband_msg_tx
*txmsg
;
1481 txmsg
= kzalloc(sizeof(*txmsg
), GFP_KERNEL
);
1486 len
= build_enum_path_resources(txmsg
, port
->port_num
);
1488 drm_dp_queue_down_tx(mgr
, txmsg
);
1490 ret
= drm_dp_mst_wait_tx_reply(mstb
, txmsg
);
1492 if (txmsg
->reply
.reply_type
== 1)
1493 DRM_DEBUG_KMS("enum path resources nak received\n");
1495 if (port
->port_num
!= txmsg
->reply
.u
.path_resources
.port_number
)
1496 DRM_ERROR("got incorrect port in response\n");
1497 DRM_DEBUG_KMS("enum path resources %d: %d %d\n", txmsg
->reply
.u
.path_resources
.port_number
, txmsg
->reply
.u
.path_resources
.full_payload_bw_number
,
1498 txmsg
->reply
.u
.path_resources
.avail_payload_bw_number
);
1499 port
->available_pbn
= txmsg
->reply
.u
.path_resources
.avail_payload_bw_number
;
1507 static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr
*mgr
,
1508 struct drm_dp_mst_port
*port
,
1512 struct drm_dp_sideband_msg_tx
*txmsg
;
1513 struct drm_dp_mst_branch
*mstb
;
1516 mstb
= drm_dp_get_validated_mstb_ref(mgr
, port
->parent
);
1520 txmsg
= kzalloc(sizeof(*txmsg
), GFP_KERNEL
);
1527 len
= build_allocate_payload(txmsg
, port
->port_num
,
1531 drm_dp_queue_down_tx(mgr
, txmsg
);
1533 ret
= drm_dp_mst_wait_tx_reply(mstb
, txmsg
);
1535 if (txmsg
->reply
.reply_type
== 1) {
1542 drm_dp_put_mst_branch_device(mstb
);
1546 static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr
*mgr
,
1548 struct drm_dp_payload
*payload
)
1552 ret
= drm_dp_dpcd_write_payload(mgr
, id
, payload
);
1554 payload
->payload_state
= 0;
1557 payload
->payload_state
= DP_PAYLOAD_LOCAL
;
1561 static int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr
*mgr
,
1562 struct drm_dp_mst_port
*port
,
1564 struct drm_dp_payload
*payload
)
1567 ret
= drm_dp_payload_send_msg(mgr
, port
, id
, port
->vcpi
.pbn
);
1570 payload
->payload_state
= DP_PAYLOAD_REMOTE
;
1574 static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr
*mgr
,
1575 struct drm_dp_mst_port
*port
,
1577 struct drm_dp_payload
*payload
)
1579 DRM_DEBUG_KMS("\n");
1580 /* its okay for these to fail */
1582 drm_dp_payload_send_msg(mgr
, port
, id
, 0);
1585 drm_dp_dpcd_write_payload(mgr
, id
, payload
);
1586 payload
->payload_state
= DP_PAYLOAD_DELETE_LOCAL
;
1590 static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr
*mgr
,
1592 struct drm_dp_payload
*payload
)
1594 payload
->payload_state
= 0;
1599 * drm_dp_update_payload_part1() - Execute payload update part 1
1600 * @mgr: manager to use.
1602 * This iterates over all proposed virtual channels, and tries to
1603 * allocate space in the link for them. For 0->slots transitions,
1604 * this step just writes the VCPI to the MST device. For slots->0
1605 * transitions, this writes the updated VCPIs and removes the
1606 * remote VC payloads.
1608 * after calling this the driver should generate ACT and payload
1611 int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr
*mgr
)
1615 struct drm_dp_payload req_payload
;
1616 struct drm_dp_mst_port
*port
;
1618 mutex_lock(&mgr
->payload_lock
);
1619 for (i
= 0; i
< mgr
->max_payloads
; i
++) {
1620 /* solve the current payloads - compare to the hw ones
1621 - update the hw view */
1622 req_payload
.start_slot
= cur_slots
;
1623 if (mgr
->proposed_vcpis
[i
]) {
1624 port
= container_of(mgr
->proposed_vcpis
[i
], struct drm_dp_mst_port
, vcpi
);
1625 req_payload
.num_slots
= mgr
->proposed_vcpis
[i
]->num_slots
;
1628 req_payload
.num_slots
= 0;
1631 if (mgr
->payloads
[i
].start_slot
!= req_payload
.start_slot
) {
1632 mgr
->payloads
[i
].start_slot
= req_payload
.start_slot
;
1634 /* work out what is required to happen with this payload */
1635 if (mgr
->payloads
[i
].num_slots
!= req_payload
.num_slots
) {
1637 /* need to push an update for this payload */
1638 if (req_payload
.num_slots
) {
1639 drm_dp_create_payload_step1(mgr
, mgr
->proposed_vcpis
[i
]->vcpi
, &req_payload
);
1640 mgr
->payloads
[i
].num_slots
= req_payload
.num_slots
;
1641 } else if (mgr
->payloads
[i
].num_slots
) {
1642 mgr
->payloads
[i
].num_slots
= 0;
1643 drm_dp_destroy_payload_step1(mgr
, port
, port
->vcpi
.vcpi
, &mgr
->payloads
[i
]);
1644 req_payload
.payload_state
= mgr
->payloads
[i
].payload_state
;
1645 mgr
->payloads
[i
].start_slot
= 0;
1647 mgr
->payloads
[i
].payload_state
= req_payload
.payload_state
;
1649 cur_slots
+= req_payload
.num_slots
;
1652 for (i
= 0; i
< mgr
->max_payloads
; i
++) {
1653 if (mgr
->payloads
[i
].payload_state
== DP_PAYLOAD_DELETE_LOCAL
) {
1654 DRM_DEBUG_KMS("removing payload %d\n", i
);
1655 for (j
= i
; j
< mgr
->max_payloads
- 1; j
++) {
1656 memcpy(&mgr
->payloads
[j
], &mgr
->payloads
[j
+ 1], sizeof(struct drm_dp_payload
));
1657 mgr
->proposed_vcpis
[j
] = mgr
->proposed_vcpis
[j
+ 1];
1658 if (mgr
->proposed_vcpis
[j
] && mgr
->proposed_vcpis
[j
]->num_slots
) {
1659 set_bit(j
+ 1, &mgr
->payload_mask
);
1661 clear_bit(j
+ 1, &mgr
->payload_mask
);
1664 memset(&mgr
->payloads
[mgr
->max_payloads
- 1], 0, sizeof(struct drm_dp_payload
));
1665 mgr
->proposed_vcpis
[mgr
->max_payloads
- 1] = NULL
;
1666 clear_bit(mgr
->max_payloads
, &mgr
->payload_mask
);
1670 mutex_unlock(&mgr
->payload_lock
);
1674 EXPORT_SYMBOL(drm_dp_update_payload_part1
);
1677 * drm_dp_update_payload_part2() - Execute payload update part 2
1678 * @mgr: manager to use.
1680 * This iterates over all proposed virtual channels, and tries to
1681 * allocate space in the link for them. For 0->slots transitions,
1682 * this step writes the remote VC payload commands. For slots->0
1683 * this just resets some internal state.
1685 int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr
*mgr
)
1687 struct drm_dp_mst_port
*port
;
1690 mutex_lock(&mgr
->payload_lock
);
1691 for (i
= 0; i
< mgr
->max_payloads
; i
++) {
1693 if (!mgr
->proposed_vcpis
[i
])
1696 port
= container_of(mgr
->proposed_vcpis
[i
], struct drm_dp_mst_port
, vcpi
);
1698 DRM_DEBUG_KMS("payload %d %d\n", i
, mgr
->payloads
[i
].payload_state
);
1699 if (mgr
->payloads
[i
].payload_state
== DP_PAYLOAD_LOCAL
) {
1700 ret
= drm_dp_create_payload_step2(mgr
, port
, mgr
->proposed_vcpis
[i
]->vcpi
, &mgr
->payloads
[i
]);
1701 } else if (mgr
->payloads
[i
].payload_state
== DP_PAYLOAD_DELETE_LOCAL
) {
1702 ret
= drm_dp_destroy_payload_step2(mgr
, mgr
->proposed_vcpis
[i
]->vcpi
, &mgr
->payloads
[i
]);
1705 mutex_unlock(&mgr
->payload_lock
);
1709 mutex_unlock(&mgr
->payload_lock
);
1712 EXPORT_SYMBOL(drm_dp_update_payload_part2
);
1714 #if 0 /* unused as of yet */
1715 static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr
*mgr
,
1716 struct drm_dp_mst_port
*port
,
1717 int offset
, int size
)
1720 struct drm_dp_sideband_msg_tx
*txmsg
;
1722 txmsg
= kzalloc(sizeof(*txmsg
), GFP_KERNEL
);
1726 len
= build_dpcd_read(txmsg
, port
->port_num
, 0, 8);
1727 txmsg
->dst
= port
->parent
;
1729 drm_dp_queue_down_tx(mgr
, txmsg
);
1735 static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr
*mgr
,
1736 struct drm_dp_mst_port
*port
,
1737 int offset
, int size
, u8
*bytes
)
1741 struct drm_dp_sideband_msg_tx
*txmsg
;
1742 struct drm_dp_mst_branch
*mstb
;
1744 mstb
= drm_dp_get_validated_mstb_ref(mgr
, port
->parent
);
1748 txmsg
= kzalloc(sizeof(*txmsg
), GFP_KERNEL
);
1754 len
= build_dpcd_write(txmsg
, port
->port_num
, offset
, size
, bytes
);
1757 drm_dp_queue_down_tx(mgr
, txmsg
);
1759 ret
= drm_dp_mst_wait_tx_reply(mstb
, txmsg
);
1761 if (txmsg
->reply
.reply_type
== 1) {
1768 drm_dp_put_mst_branch_device(mstb
);
1772 static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx
*msg
, u8 req_type
)
1774 struct drm_dp_sideband_msg_reply_body reply
;
1776 reply
.reply_type
= 1;
1777 reply
.req_type
= req_type
;
1778 drm_dp_encode_sideband_reply(&reply
, msg
);
1782 static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr
*mgr
,
1783 struct drm_dp_mst_branch
*mstb
,
1784 int req_type
, int seqno
, bool broadcast
)
1786 struct drm_dp_sideband_msg_tx
*txmsg
;
1788 txmsg
= kzalloc(sizeof(*txmsg
), GFP_KERNEL
);
1793 txmsg
->seqno
= seqno
;
1794 drm_dp_encode_up_ack_reply(txmsg
, req_type
);
1796 mutex_lock(&mgr
->qlock
);
1797 list_add_tail(&txmsg
->next
, &mgr
->tx_msg_upq
);
1798 if (!mgr
->tx_up_in_progress
) {
1799 process_single_up_tx_qlock(mgr
);
1801 mutex_unlock(&mgr
->qlock
);
1805 static bool drm_dp_get_vc_payload_bw(int dp_link_bw
,
1809 switch (dp_link_bw
) {
1811 DRM_DEBUG_KMS("invalid link bandwidth in DPCD: %x (link count: %d)\n",
1812 dp_link_bw
, dp_link_count
);
1815 case DP_LINK_BW_1_62
:
1816 *out
= 3 * dp_link_count
;
1818 case DP_LINK_BW_2_7
:
1819 *out
= 5 * dp_link_count
;
1821 case DP_LINK_BW_5_4
:
1822 *out
= 10 * dp_link_count
;
1829 * drm_dp_mst_topology_mgr_set_mst() - Set the MST state for a topology manager
1830 * @mgr: manager to set state for
1831 * @mst_state: true to enable MST on this connector - false to disable.
1833 * This is called by the driver when it detects an MST capable device plugged
1834 * into a DP MST capable port, or when a DP MST capable device is unplugged.
1836 int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr
*mgr
, bool mst_state
)
1839 struct drm_dp_mst_branch
*mstb
= NULL
;
1841 mutex_lock(&mgr
->lock
);
1842 if (mst_state
== mgr
->mst_state
)
1845 mgr
->mst_state
= mst_state
;
1846 /* set the device into MST mode */
1848 WARN_ON(mgr
->mst_primary
);
1851 ret
= drm_dp_dpcd_read(mgr
->aux
, DP_DPCD_REV
, mgr
->dpcd
, DP_RECEIVER_CAP_SIZE
);
1852 if (ret
!= DP_RECEIVER_CAP_SIZE
) {
1853 DRM_DEBUG_KMS("failed to read DPCD\n");
1857 if (!drm_dp_get_vc_payload_bw(mgr
->dpcd
[1],
1858 mgr
->dpcd
[2] & DP_MAX_LANE_COUNT_MASK
,
1864 mgr
->total_pbn
= 2560;
1865 mgr
->total_slots
= DIV_ROUND_UP(mgr
->total_pbn
, mgr
->pbn_div
);
1866 mgr
->avail_slots
= mgr
->total_slots
;
1868 /* add initial branch device at LCT 1 */
1869 mstb
= drm_dp_add_mst_branch_device(1, NULL
);
1876 /* give this the main reference */
1877 mgr
->mst_primary
= mstb
;
1878 kref_get(&mgr
->mst_primary
->kref
);
1881 struct drm_dp_payload reset_pay
;
1882 reset_pay
.start_slot
= 0;
1883 reset_pay
.num_slots
= 0x3f;
1884 drm_dp_dpcd_write_payload(mgr
, 0, &reset_pay
);
1887 ret
= drm_dp_dpcd_writeb(mgr
->aux
, DP_MSTM_CTRL
,
1888 DP_MST_EN
| DP_UP_REQ_EN
| DP_UPSTREAM_IS_SRC
);
1895 ret
= drm_dp_dpcd_read(mgr
->aux
, DP_GUID
, mgr
->guid
, 16);
1897 DRM_DEBUG_KMS("failed to read DP GUID %d\n", ret
);
1901 mgr
->guid_valid
= drm_dp_validate_guid(mgr
, mgr
->guid
);
1902 if (!mgr
->guid_valid
) {
1903 ret
= drm_dp_dpcd_write(mgr
->aux
, DP_GUID
, mgr
->guid
, 16);
1904 mgr
->guid_valid
= true;
1907 queue_work(system_long_wq
, &mgr
->work
);
1911 /* disable MST on the device */
1912 mstb
= mgr
->mst_primary
;
1913 mgr
->mst_primary
= NULL
;
1914 /* this can fail if the device is gone */
1915 drm_dp_dpcd_writeb(mgr
->aux
, DP_MSTM_CTRL
, 0);
1917 memset(mgr
->payloads
, 0, mgr
->max_payloads
* sizeof(struct drm_dp_payload
));
1918 mgr
->payload_mask
= 0;
1919 set_bit(0, &mgr
->payload_mask
);
1924 mutex_unlock(&mgr
->lock
);
1926 drm_dp_put_mst_branch_device(mstb
);
1930 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst
);
1933 * drm_dp_mst_topology_mgr_suspend() - suspend the MST manager
1934 * @mgr: manager to suspend
1936 * This function tells the MST device that we can't handle UP messages
1937 * anymore. This should stop it from sending any since we are suspended.
1939 void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr
*mgr
)
1941 mutex_lock(&mgr
->lock
);
1942 drm_dp_dpcd_writeb(mgr
->aux
, DP_MSTM_CTRL
,
1943 DP_MST_EN
| DP_UPSTREAM_IS_SRC
);
1944 mutex_unlock(&mgr
->lock
);
1946 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend
);
1949 * drm_dp_mst_topology_mgr_resume() - resume the MST manager
1950 * @mgr: manager to resume
1952 * This will fetch DPCD and see if the device is still there,
1953 * if it is, it will rewrite the MSTM control bits, and return.
1955 * if the device fails this returns -1, and the driver should do
1956 * a full MST reprobe, in case we were undocked.
1958 int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr
*mgr
)
1962 mutex_lock(&mgr
->lock
);
1964 if (mgr
->mst_primary
) {
1966 sret
= drm_dp_dpcd_read(mgr
->aux
, DP_DPCD_REV
, mgr
->dpcd
, DP_RECEIVER_CAP_SIZE
);
1967 if (sret
!= DP_RECEIVER_CAP_SIZE
) {
1968 DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
1973 ret
= drm_dp_dpcd_writeb(mgr
->aux
, DP_MSTM_CTRL
,
1974 DP_MST_EN
| DP_UP_REQ_EN
| DP_UPSTREAM_IS_SRC
);
1976 DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n");
1985 mutex_unlock(&mgr
->lock
);
1988 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume
);
1990 static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr
*mgr
, bool up
)
1994 int replylen
, origlen
, curreply
;
1996 struct drm_dp_sideband_msg_rx
*msg
;
1997 int basereg
= up
? DP_SIDEBAND_MSG_UP_REQ_BASE
: DP_SIDEBAND_MSG_DOWN_REP_BASE
;
1998 msg
= up
? &mgr
->up_req_recv
: &mgr
->down_rep_recv
;
2000 len
= min(mgr
->max_dpcd_transaction_bytes
, 16);
2001 ret
= drm_dp_dpcd_read(mgr
->aux
, basereg
,
2004 DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len
, ret
);
2007 ret
= drm_dp_sideband_msg_build(msg
, replyblock
, len
, true);
2009 DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock
[0]);
2012 replylen
= msg
->curchunk_len
+ msg
->curchunk_hdrlen
;
2017 while (replylen
> 0) {
2018 len
= min3(replylen
, mgr
->max_dpcd_transaction_bytes
, 16);
2019 ret
= drm_dp_dpcd_read(mgr
->aux
, basereg
+ curreply
,
2022 DRM_DEBUG_KMS("failed to read a chunk\n");
2024 ret
= drm_dp_sideband_msg_build(msg
, replyblock
, len
, false);
2026 DRM_DEBUG_KMS("failed to build sideband msg\n");
2032 static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr
*mgr
)
2036 drm_dp_get_one_sb_msg(mgr
, false);
2038 if (mgr
->down_rep_recv
.have_eomt
) {
2039 struct drm_dp_sideband_msg_tx
*txmsg
;
2040 struct drm_dp_mst_branch
*mstb
;
2042 mstb
= drm_dp_get_mst_branch_device(mgr
,
2043 mgr
->down_rep_recv
.initial_hdr
.lct
,
2044 mgr
->down_rep_recv
.initial_hdr
.rad
);
2047 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr
->down_rep_recv
.initial_hdr
.lct
);
2048 memset(&mgr
->down_rep_recv
, 0, sizeof(struct drm_dp_sideband_msg_rx
));
2052 /* find the message */
2053 slot
= mgr
->down_rep_recv
.initial_hdr
.seqno
;
2054 mutex_lock(&mgr
->qlock
);
2055 txmsg
= mstb
->tx_slots
[slot
];
2056 /* remove from slots */
2057 mutex_unlock(&mgr
->qlock
);
2060 DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n",
2062 mgr
->down_rep_recv
.initial_hdr
.seqno
,
2063 mgr
->down_rep_recv
.initial_hdr
.lct
,
2064 mgr
->down_rep_recv
.initial_hdr
.rad
[0],
2065 mgr
->down_rep_recv
.msg
[0]);
2066 drm_dp_put_mst_branch_device(mstb
);
2067 memset(&mgr
->down_rep_recv
, 0, sizeof(struct drm_dp_sideband_msg_rx
));
2071 drm_dp_sideband_parse_reply(&mgr
->down_rep_recv
, &txmsg
->reply
);
2072 if (txmsg
->reply
.reply_type
== 1) {
2073 DRM_DEBUG_KMS("Got NAK reply: req 0x%02x, reason 0x%02x, nak data 0x%02x\n", txmsg
->reply
.req_type
, txmsg
->reply
.u
.nak
.reason
, txmsg
->reply
.u
.nak
.nak_data
);
2076 memset(&mgr
->down_rep_recv
, 0, sizeof(struct drm_dp_sideband_msg_rx
));
2077 drm_dp_put_mst_branch_device(mstb
);
2079 mutex_lock(&mgr
->qlock
);
2080 txmsg
->state
= DRM_DP_SIDEBAND_TX_RX
;
2081 mstb
->tx_slots
[slot
] = NULL
;
2082 mutex_unlock(&mgr
->qlock
);
2084 wake_up(&mgr
->tx_waitq
);
2089 static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr
*mgr
)
2092 drm_dp_get_one_sb_msg(mgr
, true);
2094 if (mgr
->up_req_recv
.have_eomt
) {
2095 struct drm_dp_sideband_msg_req_body msg
;
2096 struct drm_dp_mst_branch
*mstb
;
2098 mstb
= drm_dp_get_mst_branch_device(mgr
,
2099 mgr
->up_req_recv
.initial_hdr
.lct
,
2100 mgr
->up_req_recv
.initial_hdr
.rad
);
2102 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr
->up_req_recv
.initial_hdr
.lct
);
2103 memset(&mgr
->up_req_recv
, 0, sizeof(struct drm_dp_sideband_msg_rx
));
2107 seqno
= mgr
->up_req_recv
.initial_hdr
.seqno
;
2108 drm_dp_sideband_parse_req(&mgr
->up_req_recv
, &msg
);
2110 if (msg
.req_type
== DP_CONNECTION_STATUS_NOTIFY
) {
2111 drm_dp_send_up_ack_reply(mgr
, mstb
, msg
.req_type
, seqno
, false);
2112 drm_dp_update_port(mstb
, &msg
.u
.conn_stat
);
2113 DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg
.u
.conn_stat
.port_number
, msg
.u
.conn_stat
.legacy_device_plug_status
, msg
.u
.conn_stat
.displayport_device_plug_status
, msg
.u
.conn_stat
.message_capability_status
, msg
.u
.conn_stat
.input_port
, msg
.u
.conn_stat
.peer_device_type
);
2114 (*mgr
->cbs
->hotplug
)(mgr
);
2116 } else if (msg
.req_type
== DP_RESOURCE_STATUS_NOTIFY
) {
2117 drm_dp_send_up_ack_reply(mgr
, mstb
, msg
.req_type
, seqno
, false);
2118 DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg
.u
.resource_stat
.port_number
, msg
.u
.resource_stat
.available_pbn
);
2121 drm_dp_put_mst_branch_device(mstb
);
2122 memset(&mgr
->up_req_recv
, 0, sizeof(struct drm_dp_sideband_msg_rx
));
2128 * drm_dp_mst_hpd_irq() - MST hotplug IRQ notify
2129 * @mgr: manager to notify irq for.
2130 * @esi: 4 bytes from SINK_COUNT_ESI
2131 * @handled: whether the hpd interrupt was consumed or not
2133 * This should be called from the driver when it detects a short IRQ,
2134 * along with the value of the DEVICE_SERVICE_IRQ_VECTOR_ESI0. The
2135 * topology manager will process the sideband messages received as a result
2138 int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr
*mgr
, u8
*esi
, bool *handled
)
2145 if (sc
!= mgr
->sink_count
) {
2146 mgr
->sink_count
= sc
;
2150 if (esi
[1] & DP_DOWN_REP_MSG_RDY
) {
2151 ret
= drm_dp_mst_handle_down_rep(mgr
);
2155 if (esi
[1] & DP_UP_REQ_MSG_RDY
) {
2156 ret
|= drm_dp_mst_handle_up_req(mgr
);
2160 drm_dp_mst_kick_tx(mgr
);
2163 EXPORT_SYMBOL(drm_dp_mst_hpd_irq
);
2166 * drm_dp_mst_detect_port() - get connection status for an MST port
2167 * @mgr: manager for this port
2168 * @port: unverified pointer to a port
2170 * This returns the current connection state for a port. It validates the
2171 * port pointer still exists so the caller doesn't require a reference
2173 enum drm_connector_status
drm_dp_mst_detect_port(struct drm_dp_mst_topology_mgr
*mgr
, struct drm_dp_mst_port
*port
)
2175 enum drm_connector_status status
= connector_status_disconnected
;
2177 /* we need to search for the port in the mgr in case its gone */
2178 port
= drm_dp_get_validated_port_ref(mgr
, port
);
2180 return connector_status_disconnected
;
2185 switch (port
->pdt
) {
2186 case DP_PEER_DEVICE_NONE
:
2187 case DP_PEER_DEVICE_MST_BRANCHING
:
2190 case DP_PEER_DEVICE_SST_SINK
:
2191 status
= connector_status_connected
;
2193 case DP_PEER_DEVICE_DP_LEGACY_CONV
:
2195 status
= connector_status_connected
;
2199 drm_dp_put_port(port
);
2202 EXPORT_SYMBOL(drm_dp_mst_detect_port
);
2205 * drm_dp_mst_get_edid() - get EDID for an MST port
2206 * @connector: toplevel connector to get EDID for
2207 * @mgr: manager for this port
2208 * @port: unverified pointer to a port.
2210 * This returns an EDID for the port connected to a connector,
2211 * It validates the pointer still exists so the caller doesn't require a
2214 struct edid
*drm_dp_mst_get_edid(struct drm_connector
*connector
, struct drm_dp_mst_topology_mgr
*mgr
, struct drm_dp_mst_port
*port
)
2216 struct edid
*edid
= NULL
;
2218 /* we need to search for the port in the mgr in case its gone */
2219 port
= drm_dp_get_validated_port_ref(mgr
, port
);
2223 edid
= drm_get_edid(connector
, &port
->aux
.ddc
);
2224 drm_dp_put_port(port
);
2227 EXPORT_SYMBOL(drm_dp_mst_get_edid
);
2230 * drm_dp_find_vcpi_slots() - find slots for this PBN value
2231 * @mgr: manager to use
2232 * @pbn: payload bandwidth to convert into slots.
2234 int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr
*mgr
,
2239 num_slots
= DIV_ROUND_UP(pbn
, mgr
->pbn_div
);
2241 if (num_slots
> mgr
->avail_slots
)
2245 EXPORT_SYMBOL(drm_dp_find_vcpi_slots
);
2247 static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr
*mgr
,
2248 struct drm_dp_vcpi
*vcpi
, int pbn
)
2253 num_slots
= DIV_ROUND_UP(pbn
, mgr
->pbn_div
);
2255 if (num_slots
> mgr
->avail_slots
)
2259 vcpi
->aligned_pbn
= num_slots
* mgr
->pbn_div
;
2260 vcpi
->num_slots
= num_slots
;
2262 ret
= drm_dp_mst_assign_payload_id(mgr
, vcpi
);
2269 * drm_dp_mst_allocate_vcpi() - Allocate a virtual channel
2270 * @mgr: manager for this port
2271 * @port: port to allocate a virtual channel for.
2272 * @pbn: payload bandwidth number to request
2273 * @slots: returned number of slots for this PBN.
2275 bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr
*mgr
, struct drm_dp_mst_port
*port
, int pbn
, int *slots
)
2279 port
= drm_dp_get_validated_port_ref(mgr
, port
);
2283 if (port
->vcpi
.vcpi
> 0) {
2284 DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", port
->vcpi
.vcpi
, port
->vcpi
.pbn
, pbn
);
2285 if (pbn
== port
->vcpi
.pbn
) {
2286 *slots
= port
->vcpi
.num_slots
;
2291 ret
= drm_dp_init_vcpi(mgr
, &port
->vcpi
, pbn
);
2293 DRM_DEBUG_KMS("failed to init vcpi %d %d %d\n", DIV_ROUND_UP(pbn
, mgr
->pbn_div
), mgr
->avail_slots
, ret
);
2296 DRM_DEBUG_KMS("initing vcpi for %d %d\n", pbn
, port
->vcpi
.num_slots
);
2297 *slots
= port
->vcpi
.num_slots
;
2299 drm_dp_put_port(port
);
2304 EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi
);
2307 * drm_dp_mst_reset_vcpi_slots() - Reset number of slots to 0 for VCPI
2308 * @mgr: manager for this port
2309 * @port: unverified pointer to a port.
2311 * This just resets the number of slots for the ports VCPI for later programming.
2313 void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr
*mgr
, struct drm_dp_mst_port
*port
)
2315 port
= drm_dp_get_validated_port_ref(mgr
, port
);
2318 port
->vcpi
.num_slots
= 0;
2319 drm_dp_put_port(port
);
2321 EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots
);
2324 * drm_dp_mst_deallocate_vcpi() - deallocate a VCPI
2325 * @mgr: manager for this port
2326 * @port: unverified port to deallocate vcpi for
2328 void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr
*mgr
, struct drm_dp_mst_port
*port
)
2330 port
= drm_dp_get_validated_port_ref(mgr
, port
);
2334 drm_dp_mst_put_payload_id(mgr
, port
->vcpi
.vcpi
);
2335 port
->vcpi
.num_slots
= 0;
2337 port
->vcpi
.aligned_pbn
= 0;
2338 port
->vcpi
.vcpi
= 0;
2339 drm_dp_put_port(port
);
2341 EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi
);
2343 static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr
*mgr
,
2344 int id
, struct drm_dp_payload
*payload
)
2346 u8 payload_alloc
[3], status
;
2350 drm_dp_dpcd_writeb(mgr
->aux
, DP_PAYLOAD_TABLE_UPDATE_STATUS
,
2351 DP_PAYLOAD_TABLE_UPDATED
);
2353 payload_alloc
[0] = id
;
2354 payload_alloc
[1] = payload
->start_slot
;
2355 payload_alloc
[2] = payload
->num_slots
;
2357 ret
= drm_dp_dpcd_write(mgr
->aux
, DP_PAYLOAD_ALLOCATE_SET
, payload_alloc
, 3);
2359 DRM_DEBUG_KMS("failed to write payload allocation %d\n", ret
);
2364 ret
= drm_dp_dpcd_readb(mgr
->aux
, DP_PAYLOAD_TABLE_UPDATE_STATUS
, &status
);
2366 DRM_DEBUG_KMS("failed to read payload table status %d\n", ret
);
2370 if (!(status
& DP_PAYLOAD_TABLE_UPDATED
)) {
2373 usleep_range(10000, 20000);
2376 DRM_DEBUG_KMS("status not set after read payload table status %d\n", status
);
2387 * drm_dp_check_act_status() - Check ACT handled status.
2388 * @mgr: manager to use
2390 * Check the payload status bits in the DPCD for ACT handled completion.
2392 int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr
*mgr
)
2399 ret
= drm_dp_dpcd_readb(mgr
->aux
, DP_PAYLOAD_TABLE_UPDATE_STATUS
, &status
);
2402 DRM_DEBUG_KMS("failed to read payload table status %d\n", ret
);
2406 if (status
& DP_PAYLOAD_ACT_HANDLED
)
2411 } while (count
< 30);
2413 if (!(status
& DP_PAYLOAD_ACT_HANDLED
)) {
2414 DRM_DEBUG_KMS("failed to get ACT bit %d after %d retries\n", status
, count
);
2422 EXPORT_SYMBOL(drm_dp_check_act_status
);
2425 * drm_dp_calc_pbn_mode() - Calculate the PBN for a mode.
2426 * @clock: dot clock for the mode
2427 * @bpp: bpp for the mode.
2429 * This uses the formula in the spec to calculate the PBN value for a mode.
2431 int drm_dp_calc_pbn_mode(int clock
, int bpp
)
2436 fixed20_12 margin
, tmp
;
2439 pix_bw
.full
= dfixed_const(clock
);
2440 fbpp
.full
= dfixed_const(bpp
);
2441 tmp
.full
= dfixed_const(8);
2442 fbpp
.full
= dfixed_div(fbpp
, tmp
);
2444 result
.full
= dfixed_mul(pix_bw
, fbpp
);
2445 margin
.full
= dfixed_const(54);
2446 tmp
.full
= dfixed_const(64);
2447 margin
.full
= dfixed_div(margin
, tmp
);
2448 result
.full
= dfixed_div(result
, margin
);
2450 margin
.full
= dfixed_const(1006);
2451 tmp
.full
= dfixed_const(1000);
2452 margin
.full
= dfixed_div(margin
, tmp
);
2453 result
.full
= dfixed_mul(result
, margin
);
2455 result
.full
= dfixed_div(result
, tmp
);
2456 result
.full
= dfixed_ceil(result
);
2457 res
= dfixed_trunc(result
);
2460 EXPORT_SYMBOL(drm_dp_calc_pbn_mode
);
2462 static int test_calc_pbn_mode(void)
2465 ret
= drm_dp_calc_pbn_mode(154000, 30);
2468 ret
= drm_dp_calc_pbn_mode(234000, 30);
2474 /* we want to kick the TX after we've ack the up/down IRQs. */
2475 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr
*mgr
)
2477 queue_work(system_long_wq
, &mgr
->tx_work
);
2480 static void drm_dp_mst_dump_mstb(struct seq_file
*m
,
2481 struct drm_dp_mst_branch
*mstb
)
2483 struct drm_dp_mst_port
*port
;
2484 int tabs
= mstb
->lct
;
2488 for (i
= 0; i
< tabs
; i
++)
2492 seq_printf(m
, "%smst: %p, %d\n", prefix
, mstb
, mstb
->num_ports
);
2493 list_for_each_entry(port
, &mstb
->ports
, next
) {
2494 seq_printf(m
, "%sport: %d: ddps: %d ldps: %d, %p, conn: %p\n", prefix
, port
->port_num
, port
->ddps
, port
->ldps
, port
, port
->connector
);
2496 drm_dp_mst_dump_mstb(m
, port
->mstb
);
2500 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr
*mgr
,
2505 for (i
= 0; i
< 4; i
++) {
2506 ret
= drm_dp_dpcd_read(mgr
->aux
, DP_PAYLOAD_TABLE_UPDATE_STATUS
+ (i
* 16), &buf
[i
* 16], 16);
2516 * drm_dp_mst_dump_topology(): dump topology to seq file.
2517 * @m: seq_file to dump output to
2518 * @mgr: manager to dump current topology for.
2520 * helper to dump MST topology to a seq file for debugfs.
2522 void drm_dp_mst_dump_topology(struct seq_file
*m
,
2523 struct drm_dp_mst_topology_mgr
*mgr
)
2526 struct drm_dp_mst_port
*port
;
2527 mutex_lock(&mgr
->lock
);
2528 if (mgr
->mst_primary
)
2529 drm_dp_mst_dump_mstb(m
, mgr
->mst_primary
);
2532 mutex_unlock(&mgr
->lock
);
2534 mutex_lock(&mgr
->payload_lock
);
2535 seq_printf(m
, "vcpi: %lx %lx\n", mgr
->payload_mask
, mgr
->vcpi_mask
);
2537 for (i
= 0; i
< mgr
->max_payloads
; i
++) {
2538 if (mgr
->proposed_vcpis
[i
]) {
2539 port
= container_of(mgr
->proposed_vcpis
[i
], struct drm_dp_mst_port
, vcpi
);
2540 seq_printf(m
, "vcpi %d: %d %d %d\n", i
, port
->port_num
, port
->vcpi
.vcpi
, port
->vcpi
.num_slots
);
2542 seq_printf(m
, "vcpi %d:unsed\n", i
);
2544 for (i
= 0; i
< mgr
->max_payloads
; i
++) {
2545 seq_printf(m
, "payload %d: %d, %d, %d\n",
2547 mgr
->payloads
[i
].payload_state
,
2548 mgr
->payloads
[i
].start_slot
,
2549 mgr
->payloads
[i
].num_slots
);
2553 mutex_unlock(&mgr
->payload_lock
);
2555 mutex_lock(&mgr
->lock
);
2556 if (mgr
->mst_primary
) {
2560 ret
= drm_dp_dpcd_read(mgr
->aux
, DP_DPCD_REV
, buf
, DP_RECEIVER_CAP_SIZE
);
2561 seq_printf(m
, "dpcd: ");
2562 for (i
= 0; i
< DP_RECEIVER_CAP_SIZE
; i
++)
2563 seq_printf(m
, "%02x ", buf
[i
]);
2564 seq_printf(m
, "\n");
2565 ret
= drm_dp_dpcd_read(mgr
->aux
, DP_FAUX_CAP
, buf
, 2);
2566 seq_printf(m
, "faux/mst: ");
2567 for (i
= 0; i
< 2; i
++)
2568 seq_printf(m
, "%02x ", buf
[i
]);
2569 seq_printf(m
, "\n");
2570 ret
= drm_dp_dpcd_read(mgr
->aux
, DP_MSTM_CTRL
, buf
, 1);
2571 seq_printf(m
, "mst ctrl: ");
2572 for (i
= 0; i
< 1; i
++)
2573 seq_printf(m
, "%02x ", buf
[i
]);
2574 seq_printf(m
, "\n");
2576 bret
= dump_dp_payload_table(mgr
, buf
);
2578 seq_printf(m
, "payload table: ");
2579 for (i
= 0; i
< 63; i
++)
2580 seq_printf(m
, "%02x ", buf
[i
]);
2581 seq_printf(m
, "\n");
2586 mutex_unlock(&mgr
->lock
);
2589 EXPORT_SYMBOL(drm_dp_mst_dump_topology
);
2591 static void drm_dp_tx_work(struct work_struct
*work
)
2593 struct drm_dp_mst_topology_mgr
*mgr
= container_of(work
, struct drm_dp_mst_topology_mgr
, tx_work
);
2595 mutex_lock(&mgr
->qlock
);
2596 if (mgr
->tx_down_in_progress
)
2597 process_single_down_tx_qlock(mgr
);
2598 mutex_unlock(&mgr
->qlock
);
2602 * drm_dp_mst_topology_mgr_init - initialise a topology manager
2603 * @mgr: manager struct to initialise
2604 * @dev: device providing this structure - for i2c addition.
2605 * @aux: DP helper aux channel to talk to this device
2606 * @max_dpcd_transaction_bytes: hw specific DPCD transaction limit
2607 * @max_payloads: maximum number of payloads this GPU can source
2608 * @conn_base_id: the connector object ID the MST device is connected to.
2610 * Return 0 for success, or negative error code on failure
2612 int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr
*mgr
,
2613 struct device
*dev
, struct drm_dp_aux
*aux
,
2614 int max_dpcd_transaction_bytes
,
2615 int max_payloads
, int conn_base_id
)
2617 mutex_init(&mgr
->lock
);
2618 mutex_init(&mgr
->qlock
);
2619 mutex_init(&mgr
->payload_lock
);
2620 INIT_LIST_HEAD(&mgr
->tx_msg_upq
);
2621 INIT_LIST_HEAD(&mgr
->tx_msg_downq
);
2622 INIT_WORK(&mgr
->work
, drm_dp_mst_link_probe_work
);
2623 INIT_WORK(&mgr
->tx_work
, drm_dp_tx_work
);
2624 init_waitqueue_head(&mgr
->tx_waitq
);
2627 mgr
->max_dpcd_transaction_bytes
= max_dpcd_transaction_bytes
;
2628 mgr
->max_payloads
= max_payloads
;
2629 mgr
->conn_base_id
= conn_base_id
;
2630 mgr
->payloads
= kcalloc(max_payloads
, sizeof(struct drm_dp_payload
), GFP_KERNEL
);
2633 mgr
->proposed_vcpis
= kcalloc(max_payloads
, sizeof(struct drm_dp_vcpi
*), GFP_KERNEL
);
2634 if (!mgr
->proposed_vcpis
)
2636 set_bit(0, &mgr
->payload_mask
);
2637 test_calc_pbn_mode();
2640 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init
);
2643 * drm_dp_mst_topology_mgr_destroy() - destroy topology manager.
2644 * @mgr: manager to destroy
2646 void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr
*mgr
)
2648 mutex_lock(&mgr
->payload_lock
);
2649 kfree(mgr
->payloads
);
2650 mgr
->payloads
= NULL
;
2651 kfree(mgr
->proposed_vcpis
);
2652 mgr
->proposed_vcpis
= NULL
;
2653 mutex_unlock(&mgr
->payload_lock
);
2657 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy
);
2660 static int drm_dp_mst_i2c_xfer(struct i2c_adapter
*adapter
, struct i2c_msg
*msgs
,
2663 struct drm_dp_aux
*aux
= adapter
->algo_data
;
2664 struct drm_dp_mst_port
*port
= container_of(aux
, struct drm_dp_mst_port
, aux
);
2665 struct drm_dp_mst_branch
*mstb
;
2666 struct drm_dp_mst_topology_mgr
*mgr
= port
->mgr
;
2668 bool reading
= false;
2669 struct drm_dp_sideband_msg_req_body msg
;
2670 struct drm_dp_sideband_msg_tx
*txmsg
= NULL
;
2673 mstb
= drm_dp_get_validated_mstb_ref(mgr
, port
->parent
);
2677 /* construct i2c msg */
2678 /* see if last msg is a read */
2679 if (msgs
[num
- 1].flags
& I2C_M_RD
)
2683 DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n");
2688 msg
.req_type
= DP_REMOTE_I2C_READ
;
2689 msg
.u
.i2c_read
.num_transactions
= num
- 1;
2690 msg
.u
.i2c_read
.port_number
= port
->port_num
;
2691 for (i
= 0; i
< num
- 1; i
++) {
2692 msg
.u
.i2c_read
.transactions
[i
].i2c_dev_id
= msgs
[i
].addr
;
2693 msg
.u
.i2c_read
.transactions
[i
].num_bytes
= msgs
[i
].len
;
2694 msg
.u
.i2c_read
.transactions
[i
].bytes
= msgs
[i
].buf
;
2696 msg
.u
.i2c_read
.read_i2c_device_id
= msgs
[num
- 1].addr
;
2697 msg
.u
.i2c_read
.num_bytes_read
= msgs
[num
- 1].len
;
2699 txmsg
= kzalloc(sizeof(*txmsg
), GFP_KERNEL
);
2706 drm_dp_encode_sideband_req(&msg
, txmsg
);
2708 drm_dp_queue_down_tx(mgr
, txmsg
);
2710 ret
= drm_dp_mst_wait_tx_reply(mstb
, txmsg
);
2713 if (txmsg
->reply
.reply_type
== 1) { /* got a NAK back */
2717 if (txmsg
->reply
.u
.remote_i2c_read_ack
.num_bytes
!= msgs
[num
- 1].len
) {
2721 memcpy(msgs
[num
- 1].buf
, txmsg
->reply
.u
.remote_i2c_read_ack
.bytes
, msgs
[num
- 1].len
);
2726 drm_dp_put_mst_branch_device(mstb
);
2730 static u32
drm_dp_mst_i2c_functionality(struct i2c_adapter
*adapter
)
2732 return I2C_FUNC_I2C
| I2C_FUNC_SMBUS_EMUL
|
2733 I2C_FUNC_SMBUS_READ_BLOCK_DATA
|
2734 I2C_FUNC_SMBUS_BLOCK_PROC_CALL
|
2735 I2C_FUNC_10BIT_ADDR
;
2738 static const struct i2c_algorithm drm_dp_mst_i2c_algo
= {
2739 .functionality
= drm_dp_mst_i2c_functionality
,
2740 .master_xfer
= drm_dp_mst_i2c_xfer
,
2744 * drm_dp_mst_register_i2c_bus() - register an I2C adapter for I2C-over-AUX
2745 * @aux: DisplayPort AUX channel
2747 * Returns 0 on success or a negative error code on failure.
2749 static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux
*aux
)
2751 aux
->ddc
.algo
= &drm_dp_mst_i2c_algo
;
2752 aux
->ddc
.algo_data
= aux
;
2753 aux
->ddc
.retries
= 3;
2755 aux
->ddc
.class = I2C_CLASS_DDC
;
2756 aux
->ddc
.owner
= THIS_MODULE
;
2757 aux
->ddc
.dev
.parent
= aux
->dev
;
2758 aux
->ddc
.dev
.of_node
= aux
->dev
->of_node
;
2760 strlcpy(aux
->ddc
.name
, aux
->name
? aux
->name
: dev_name(aux
->dev
),
2761 sizeof(aux
->ddc
.name
));
2763 return i2c_add_adapter(&aux
->ddc
);
2767 * drm_dp_mst_unregister_i2c_bus() - unregister an I2C-over-AUX adapter
2768 * @aux: DisplayPort AUX channel
2770 static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux
*aux
)
2772 i2c_del_adapter(&aux
->ddc
);