Line data Source code
1 : /*
2 : * Copyright © 2014 Red Hat.
3 : *
4 : * Permission to use, copy, modify, distribute, and sell this software and its
5 : * documentation for any purpose is hereby granted without fee, provided that
6 : * the above copyright notice appear in all copies and that both that copyright
7 : * notice and this permission notice appear in supporting documentation, and
8 : * that the name of the copyright holders not be used in advertising or
9 : * publicity pertaining to distribution of the software without specific,
10 : * written prior permission. The copyright holders make no representations
11 : * about the suitability of this software for any purpose. It is provided "as
12 : * is" without express or implied warranty.
13 : *
14 : * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
15 : * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
16 : * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
17 : * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
18 : * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
19 : * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
20 : * OF THIS SOFTWARE.
21 : */
22 : #ifndef _DRM_DP_MST_HELPER_H_
23 : #define _DRM_DP_MST_HELPER_H_
24 :
25 : #include <linux/types.h>
26 : #include <drm/display/drm_dp_helper.h>
27 : #include <drm/drm_atomic.h>
28 :
29 : #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
30 : #include <linux/stackdepot.h>
31 : #include <linux/timekeeping.h>
32 :
33 : enum drm_dp_mst_topology_ref_type {
34 : DRM_DP_MST_TOPOLOGY_REF_GET,
35 : DRM_DP_MST_TOPOLOGY_REF_PUT,
36 : };
37 :
38 : struct drm_dp_mst_topology_ref_history {
39 : struct drm_dp_mst_topology_ref_entry {
40 : enum drm_dp_mst_topology_ref_type type;
41 : int count;
42 : ktime_t ts_nsec;
43 : depot_stack_handle_t backtrace;
44 : } *entries;
45 : int len;
46 : };
47 : #endif /* IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) */
48 :
49 : struct drm_dp_mst_branch;
50 :
51 : /**
52 : * struct drm_dp_vcpi - Virtual Channel Payload Identifier
53 : * @vcpi: Virtual channel ID.
54 : * @pbn: Payload Bandwidth Number for this channel
55 : * @aligned_pbn: PBN aligned with slot size
56 : * @num_slots: number of slots for this PBN
57 : */
58 : struct drm_dp_vcpi {
59 : int vcpi;
60 : int pbn;
61 : int aligned_pbn;
62 : int num_slots;
63 : };
64 :
65 : /**
66 : * struct drm_dp_mst_port - MST port
67 : * @port_num: port number
68 : * @input: if this port is an input port. Protected by
69 : * &drm_dp_mst_topology_mgr.base.lock.
70 : * @mcs: message capability status - DP 1.2 spec. Protected by
71 : * &drm_dp_mst_topology_mgr.base.lock.
72 : * @ddps: DisplayPort Device Plug Status - DP 1.2. Protected by
73 : * &drm_dp_mst_topology_mgr.base.lock.
74 : * @pdt: Peer Device Type. Protected by
75 : * &drm_dp_mst_topology_mgr.base.lock.
76 : * @ldps: Legacy Device Plug Status. Protected by
77 : * &drm_dp_mst_topology_mgr.base.lock.
78 : * @dpcd_rev: DPCD revision of device on this port. Protected by
79 : * &drm_dp_mst_topology_mgr.base.lock.
80 : * @num_sdp_streams: Number of simultaneous streams. Protected by
81 : * &drm_dp_mst_topology_mgr.base.lock.
82 : * @num_sdp_stream_sinks: Number of stream sinks. Protected by
83 : * &drm_dp_mst_topology_mgr.base.lock.
84 : * @full_pbn: Max possible bandwidth for this port. Protected by
85 : * &drm_dp_mst_topology_mgr.base.lock.
86 : * @next: link to next port on this branch device
87 : * @aux: i2c aux transport to talk to device connected to this port, protected
88 : * by &drm_dp_mst_topology_mgr.base.lock.
89 : * @passthrough_aux: parent aux to which DSC pass-through requests should be
90 : * sent, only set if DSC pass-through is possible.
91 : * @parent: branch device parent of this port
92 : * @vcpi: Virtual Channel Payload info for this port.
93 : * @connector: DRM connector this port is connected to. Protected by
94 : * &drm_dp_mst_topology_mgr.base.lock.
95 : * @mgr: topology manager this port lives under.
96 : *
97 : * This structure represents an MST port endpoint on a device somewhere
98 : * in the MST topology.
99 : */
100 : struct drm_dp_mst_port {
101 : /**
102 : * @topology_kref: refcount for this port's lifetime in the topology,
103 : * only the DP MST helpers should need to touch this
104 : */
105 : struct kref topology_kref;
106 :
107 : /**
108 : * @malloc_kref: refcount for the memory allocation containing this
109 : * structure. See drm_dp_mst_get_port_malloc() and
110 : * drm_dp_mst_put_port_malloc().
111 : */
112 : struct kref malloc_kref;
113 :
114 : #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
115 : /**
116 : * @topology_ref_history: A history of each topology
117 : * reference/dereference. See CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS.
118 : */
119 : struct drm_dp_mst_topology_ref_history topology_ref_history;
120 : #endif
121 :
122 : u8 port_num;
123 : bool input;
124 : bool mcs;
125 : bool ddps;
126 : u8 pdt;
127 : bool ldps;
128 : u8 dpcd_rev;
129 : u8 num_sdp_streams;
130 : u8 num_sdp_stream_sinks;
131 : uint16_t full_pbn;
132 : struct list_head next;
133 : /**
134 : * @mstb: the branch device connected to this port, if there is one.
135 : * This should be considered protected for reading by
136 : * &drm_dp_mst_topology_mgr.lock. There are two exceptions to this:
137 : * &drm_dp_mst_topology_mgr.up_req_work and
138 : * &drm_dp_mst_topology_mgr.work, which do not grab
139 : * &drm_dp_mst_topology_mgr.lock during reads but are the only
140 : * updaters of this list and are protected from writing concurrently
141 : * by &drm_dp_mst_topology_mgr.probe_lock.
142 : */
143 : struct drm_dp_mst_branch *mstb;
144 : struct drm_dp_aux aux; /* i2c bus for this port? */
145 : struct drm_dp_aux *passthrough_aux;
146 : struct drm_dp_mst_branch *parent;
147 :
148 : struct drm_dp_vcpi vcpi;
149 : struct drm_connector *connector;
150 : struct drm_dp_mst_topology_mgr *mgr;
151 :
152 : /**
153 : * @cached_edid: for DP logical ports - make tiling work by ensuring
154 : * that the EDID for all connectors is read immediately.
155 : */
156 : struct edid *cached_edid;
157 : /**
158 : * @has_audio: Tracks whether the sink connector to this port is
159 : * audio-capable.
160 : */
161 : bool has_audio;
162 :
163 : /**
164 : * @fec_capable: bool indicating if FEC can be supported up to that
165 : * point in the MST topology.
166 : */
167 : bool fec_capable;
168 : };
169 :
170 : /* sideband msg header - not bit struct */
171 : struct drm_dp_sideband_msg_hdr {
172 : u8 lct;
173 : u8 lcr;
174 : u8 rad[8];
175 : bool broadcast;
176 : bool path_msg;
177 : u8 msg_len;
178 : bool somt;
179 : bool eomt;
180 : bool seqno;
181 : };
182 :
183 : struct drm_dp_sideband_msg_rx {
184 : u8 chunk[48];
185 : u8 msg[256];
186 : u8 curchunk_len;
187 : u8 curchunk_idx; /* chunk we are parsing now */
188 : u8 curchunk_hdrlen;
189 : u8 curlen; /* total length of the msg */
190 : bool have_somt;
191 : bool have_eomt;
192 : struct drm_dp_sideband_msg_hdr initial_hdr;
193 : };
194 :
195 : /**
196 : * struct drm_dp_mst_branch - MST branch device.
197 : * @rad: Relative Address to talk to this branch device.
198 : * @lct: Link count total to talk to this branch device.
199 : * @num_ports: number of ports on the branch.
200 : * @port_parent: pointer to the port parent, NULL if toplevel.
201 : * @mgr: topology manager for this branch device.
202 : * @link_address_sent: if a link address message has been sent to this device yet.
203 : * @guid: guid for DP 1.2 branch device. port under this branch can be
204 : * identified by port #.
205 : *
206 : * This structure represents an MST branch device, there is one
207 : * primary branch device at the root, along with any other branches connected
208 : * to downstream port of parent branches.
209 : */
210 : struct drm_dp_mst_branch {
211 : /**
212 : * @topology_kref: refcount for this branch device's lifetime in the
213 : * topology, only the DP MST helpers should need to touch this
214 : */
215 : struct kref topology_kref;
216 :
217 : /**
218 : * @malloc_kref: refcount for the memory allocation containing this
219 : * structure. See drm_dp_mst_get_mstb_malloc() and
220 : * drm_dp_mst_put_mstb_malloc().
221 : */
222 : struct kref malloc_kref;
223 :
224 : #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
225 : /**
226 : * @topology_ref_history: A history of each topology
227 : * reference/dereference. See CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS.
228 : */
229 : struct drm_dp_mst_topology_ref_history topology_ref_history;
230 : #endif
231 :
232 : /**
233 : * @destroy_next: linked-list entry used by
234 : * drm_dp_delayed_destroy_work()
235 : */
236 : struct list_head destroy_next;
237 :
238 : u8 rad[8];
239 : u8 lct;
240 : int num_ports;
241 :
242 : /**
243 : * @ports: the list of ports on this branch device. This should be
244 : * considered protected for reading by &drm_dp_mst_topology_mgr.lock.
245 : * There are two exceptions to this:
246 : * &drm_dp_mst_topology_mgr.up_req_work and
247 : * &drm_dp_mst_topology_mgr.work, which do not grab
248 : * &drm_dp_mst_topology_mgr.lock during reads but are the only
249 : * updaters of this list and are protected from updating the list
250 : * concurrently by @drm_dp_mst_topology_mgr.probe_lock
251 : */
252 : struct list_head ports;
253 :
254 : struct drm_dp_mst_port *port_parent;
255 : struct drm_dp_mst_topology_mgr *mgr;
256 :
257 : bool link_address_sent;
258 :
259 : /* global unique identifier to identify branch devices */
260 : u8 guid[16];
261 : };
262 :
263 :
264 : struct drm_dp_nak_reply {
265 : u8 guid[16];
266 : u8 reason;
267 : u8 nak_data;
268 : };
269 :
270 : struct drm_dp_link_address_ack_reply {
271 : u8 guid[16];
272 : u8 nports;
273 : struct drm_dp_link_addr_reply_port {
274 : bool input_port;
275 : u8 peer_device_type;
276 : u8 port_number;
277 : bool mcs;
278 : bool ddps;
279 : bool legacy_device_plug_status;
280 : u8 dpcd_revision;
281 : u8 peer_guid[16];
282 : u8 num_sdp_streams;
283 : u8 num_sdp_stream_sinks;
284 : } ports[16];
285 : };
286 :
287 : struct drm_dp_remote_dpcd_read_ack_reply {
288 : u8 port_number;
289 : u8 num_bytes;
290 : u8 bytes[255];
291 : };
292 :
293 : struct drm_dp_remote_dpcd_write_ack_reply {
294 : u8 port_number;
295 : };
296 :
297 : struct drm_dp_remote_dpcd_write_nak_reply {
298 : u8 port_number;
299 : u8 reason;
300 : u8 bytes_written_before_failure;
301 : };
302 :
303 : struct drm_dp_remote_i2c_read_ack_reply {
304 : u8 port_number;
305 : u8 num_bytes;
306 : u8 bytes[255];
307 : };
308 :
309 : struct drm_dp_remote_i2c_read_nak_reply {
310 : u8 port_number;
311 : u8 nak_reason;
312 : u8 i2c_nak_transaction;
313 : };
314 :
315 : struct drm_dp_remote_i2c_write_ack_reply {
316 : u8 port_number;
317 : };
318 :
319 : struct drm_dp_query_stream_enc_status_ack_reply {
320 : /* Bit[23:16]- Stream Id */
321 : u8 stream_id;
322 :
323 : /* Bit[15]- Signed */
324 : bool reply_signed;
325 :
326 : /* Bit[10:8]- Stream Output Sink Type */
327 : bool unauthorizable_device_present;
328 : bool legacy_device_present;
329 : bool query_capable_device_present;
330 :
331 : /* Bit[12:11]- Stream Output CP Type */
332 : bool hdcp_1x_device_present;
333 : bool hdcp_2x_device_present;
334 :
335 : /* Bit[4]- Stream Authentication */
336 : bool auth_completed;
337 :
338 : /* Bit[3]- Stream Encryption */
339 : bool encryption_enabled;
340 :
341 : /* Bit[2]- Stream Repeater Function Present */
342 : bool repeater_present;
343 :
344 : /* Bit[1:0]- Stream State */
345 : u8 state;
346 : };
347 :
348 : #define DRM_DP_MAX_SDP_STREAMS 16
349 : struct drm_dp_allocate_payload {
350 : u8 port_number;
351 : u8 number_sdp_streams;
352 : u8 vcpi;
353 : u16 pbn;
354 : u8 sdp_stream_sink[DRM_DP_MAX_SDP_STREAMS];
355 : };
356 :
357 : struct drm_dp_allocate_payload_ack_reply {
358 : u8 port_number;
359 : u8 vcpi;
360 : u16 allocated_pbn;
361 : };
362 :
363 : struct drm_dp_connection_status_notify {
364 : u8 guid[16];
365 : u8 port_number;
366 : bool legacy_device_plug_status;
367 : bool displayport_device_plug_status;
368 : bool message_capability_status;
369 : bool input_port;
370 : u8 peer_device_type;
371 : };
372 :
373 : struct drm_dp_remote_dpcd_read {
374 : u8 port_number;
375 : u32 dpcd_address;
376 : u8 num_bytes;
377 : };
378 :
379 : struct drm_dp_remote_dpcd_write {
380 : u8 port_number;
381 : u32 dpcd_address;
382 : u8 num_bytes;
383 : u8 *bytes;
384 : };
385 :
386 : #define DP_REMOTE_I2C_READ_MAX_TRANSACTIONS 4
387 : struct drm_dp_remote_i2c_read {
388 : u8 num_transactions;
389 : u8 port_number;
390 : struct drm_dp_remote_i2c_read_tx {
391 : u8 i2c_dev_id;
392 : u8 num_bytes;
393 : u8 *bytes;
394 : u8 no_stop_bit;
395 : u8 i2c_transaction_delay;
396 : } transactions[DP_REMOTE_I2C_READ_MAX_TRANSACTIONS];
397 : u8 read_i2c_device_id;
398 : u8 num_bytes_read;
399 : };
400 :
401 : struct drm_dp_remote_i2c_write {
402 : u8 port_number;
403 : u8 write_i2c_device_id;
404 : u8 num_bytes;
405 : u8 *bytes;
406 : };
407 :
408 : struct drm_dp_query_stream_enc_status {
409 : u8 stream_id;
410 : u8 client_id[7]; /* 56-bit nonce */
411 : u8 stream_event;
412 : bool valid_stream_event;
413 : u8 stream_behavior;
414 : u8 valid_stream_behavior;
415 : };
416 :
417 : /* this covers ENUM_RESOURCES, POWER_DOWN_PHY, POWER_UP_PHY */
418 : struct drm_dp_port_number_req {
419 : u8 port_number;
420 : };
421 :
422 : struct drm_dp_enum_path_resources_ack_reply {
423 : u8 port_number;
424 : bool fec_capable;
425 : u16 full_payload_bw_number;
426 : u16 avail_payload_bw_number;
427 : };
428 :
429 : /* covers POWER_DOWN_PHY, POWER_UP_PHY */
430 : struct drm_dp_port_number_rep {
431 : u8 port_number;
432 : };
433 :
434 : struct drm_dp_query_payload {
435 : u8 port_number;
436 : u8 vcpi;
437 : };
438 :
439 : struct drm_dp_resource_status_notify {
440 : u8 port_number;
441 : u8 guid[16];
442 : u16 available_pbn;
443 : };
444 :
445 : struct drm_dp_query_payload_ack_reply {
446 : u8 port_number;
447 : u16 allocated_pbn;
448 : };
449 :
450 : struct drm_dp_sideband_msg_req_body {
451 : u8 req_type;
452 : union ack_req {
453 : struct drm_dp_connection_status_notify conn_stat;
454 : struct drm_dp_port_number_req port_num;
455 : struct drm_dp_resource_status_notify resource_stat;
456 :
457 : struct drm_dp_query_payload query_payload;
458 : struct drm_dp_allocate_payload allocate_payload;
459 :
460 : struct drm_dp_remote_dpcd_read dpcd_read;
461 : struct drm_dp_remote_dpcd_write dpcd_write;
462 :
463 : struct drm_dp_remote_i2c_read i2c_read;
464 : struct drm_dp_remote_i2c_write i2c_write;
465 :
466 : struct drm_dp_query_stream_enc_status enc_status;
467 : } u;
468 : };
469 :
470 : struct drm_dp_sideband_msg_reply_body {
471 : u8 reply_type;
472 : u8 req_type;
473 : union ack_replies {
474 : struct drm_dp_nak_reply nak;
475 : struct drm_dp_link_address_ack_reply link_addr;
476 : struct drm_dp_port_number_rep port_number;
477 :
478 : struct drm_dp_enum_path_resources_ack_reply path_resources;
479 : struct drm_dp_allocate_payload_ack_reply allocate_payload;
480 : struct drm_dp_query_payload_ack_reply query_payload;
481 :
482 : struct drm_dp_remote_dpcd_read_ack_reply remote_dpcd_read_ack;
483 : struct drm_dp_remote_dpcd_write_ack_reply remote_dpcd_write_ack;
484 : struct drm_dp_remote_dpcd_write_nak_reply remote_dpcd_write_nack;
485 :
486 : struct drm_dp_remote_i2c_read_ack_reply remote_i2c_read_ack;
487 : struct drm_dp_remote_i2c_read_nak_reply remote_i2c_read_nack;
488 : struct drm_dp_remote_i2c_write_ack_reply remote_i2c_write_ack;
489 :
490 : struct drm_dp_query_stream_enc_status_ack_reply enc_status;
491 : } u;
492 : };
493 :
494 : /* msg is queued to be put into a slot */
495 : #define DRM_DP_SIDEBAND_TX_QUEUED 0
496 : /* msg has started transmitting on a slot - still on msgq */
497 : #define DRM_DP_SIDEBAND_TX_START_SEND 1
498 : /* msg has finished transmitting on a slot - removed from msgq only in slot */
499 : #define DRM_DP_SIDEBAND_TX_SENT 2
500 : /* msg has received a response - removed from slot */
501 : #define DRM_DP_SIDEBAND_TX_RX 3
502 : #define DRM_DP_SIDEBAND_TX_TIMEOUT 4
503 :
504 : struct drm_dp_sideband_msg_tx {
505 : u8 msg[256];
506 : u8 chunk[48];
507 : u8 cur_offset;
508 : u8 cur_len;
509 : struct drm_dp_mst_branch *dst;
510 : struct list_head next;
511 : int seqno;
512 : int state;
513 : bool path_msg;
514 : struct drm_dp_sideband_msg_reply_body reply;
515 : };
516 :
517 : /* sideband msg handler */
518 : struct drm_dp_mst_topology_mgr;
519 : struct drm_dp_mst_topology_cbs {
520 : /* create a connector for a port */
521 : struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *path);
522 : /*
523 : * Checks for any pending MST interrupts, passing them to MST core for
524 : * processing, the same way an HPD IRQ pulse handler would do this.
525 : * If provided MST core calls this callback from a poll-waiting loop
526 : * when waiting for MST down message replies. The driver is expected
527 : * to guard against a race between this callback and the driver's HPD
528 : * IRQ pulse handler.
529 : */
530 : void (*poll_hpd_irq)(struct drm_dp_mst_topology_mgr *mgr);
531 : };
532 :
533 : #define DP_MAX_PAYLOAD (sizeof(unsigned long) * 8)
534 :
535 : #define DP_PAYLOAD_LOCAL 1
536 : #define DP_PAYLOAD_REMOTE 2
537 : #define DP_PAYLOAD_DELETE_LOCAL 3
538 :
539 : struct drm_dp_payload {
540 : int payload_state;
541 : int start_slot;
542 : int num_slots;
543 : int vcpi;
544 : };
545 :
546 : #define to_dp_mst_topology_state(x) container_of(x, struct drm_dp_mst_topology_state, base)
547 :
548 : struct drm_dp_vcpi_allocation {
549 : struct drm_dp_mst_port *port;
550 : int vcpi;
551 : int pbn;
552 : bool dsc_enabled;
553 : struct list_head next;
554 : };
555 :
556 : struct drm_dp_mst_topology_state {
557 : struct drm_private_state base;
558 : struct list_head vcpis;
559 : struct drm_dp_mst_topology_mgr *mgr;
560 : u8 total_avail_slots;
561 : u8 start_slot;
562 : };
563 :
564 : #define to_dp_mst_topology_mgr(x) container_of(x, struct drm_dp_mst_topology_mgr, base)
565 :
566 : /**
567 : * struct drm_dp_mst_topology_mgr - DisplayPort MST manager
568 : *
569 : * This struct represents the toplevel displayport MST topology manager.
570 : * There should be one instance of this for every MST capable DP connector
571 : * on the GPU.
572 : */
573 : struct drm_dp_mst_topology_mgr {
574 : /**
575 : * @base: Base private object for atomic
576 : */
577 : struct drm_private_obj base;
578 :
579 : /**
580 : * @dev: device pointer for adding i2c devices etc.
581 : */
582 : struct drm_device *dev;
583 : /**
584 : * @cbs: callbacks for connector addition and destruction.
585 : */
586 : const struct drm_dp_mst_topology_cbs *cbs;
587 : /**
588 : * @max_dpcd_transaction_bytes: maximum number of bytes to read/write
589 : * in one go.
590 : */
591 : int max_dpcd_transaction_bytes;
592 : /**
593 : * @aux: AUX channel for the DP MST connector this topolgy mgr is
594 : * controlling.
595 : */
596 : struct drm_dp_aux *aux;
597 : /**
598 : * @max_payloads: maximum number of payloads the GPU can generate.
599 : */
600 : int max_payloads;
601 : /**
602 : * @max_lane_count: maximum number of lanes the GPU can drive.
603 : */
604 : int max_lane_count;
605 : /**
606 : * @max_link_rate: maximum link rate per lane GPU can output, in kHz.
607 : */
608 : int max_link_rate;
609 : /**
610 : * @conn_base_id: DRM connector ID this mgr is connected to. Only used
611 : * to build the MST connector path value.
612 : */
613 : int conn_base_id;
614 :
615 : /**
616 : * @up_req_recv: Message receiver state for up requests.
617 : */
618 : struct drm_dp_sideband_msg_rx up_req_recv;
619 :
620 : /**
621 : * @down_rep_recv: Message receiver state for replies to down
622 : * requests.
623 : */
624 : struct drm_dp_sideband_msg_rx down_rep_recv;
625 :
626 : /**
627 : * @lock: protects @mst_state, @mst_primary, @dpcd, and
628 : * @payload_id_table_cleared.
629 : */
630 : struct mutex lock;
631 :
632 : /**
633 : * @probe_lock: Prevents @work and @up_req_work, the only writers of
634 : * &drm_dp_mst_port.mstb and &drm_dp_mst_branch.ports, from racing
635 : * while they update the topology.
636 : */
637 : struct mutex probe_lock;
638 :
639 : /**
640 : * @mst_state: If this manager is enabled for an MST capable port. False
641 : * if no MST sink/branch devices is connected.
642 : */
643 : bool mst_state : 1;
644 :
645 : /**
646 : * @payload_id_table_cleared: Whether or not we've cleared the payload
647 : * ID table for @mst_primary. Protected by @lock.
648 : */
649 : bool payload_id_table_cleared : 1;
650 :
651 : /**
652 : * @mst_primary: Pointer to the primary/first branch device.
653 : */
654 : struct drm_dp_mst_branch *mst_primary;
655 :
656 : /**
657 : * @dpcd: Cache of DPCD for primary port.
658 : */
659 : u8 dpcd[DP_RECEIVER_CAP_SIZE];
660 : /**
661 : * @sink_count: Sink count from DEVICE_SERVICE_IRQ_VECTOR_ESI0.
662 : */
663 : u8 sink_count;
664 : /**
665 : * @pbn_div: PBN to slots divisor.
666 : */
667 : int pbn_div;
668 :
669 : /**
670 : * @funcs: Atomic helper callbacks
671 : */
672 : const struct drm_private_state_funcs *funcs;
673 :
674 : /**
675 : * @qlock: protects @tx_msg_downq and &drm_dp_sideband_msg_tx.state
676 : */
677 : struct mutex qlock;
678 :
679 : /**
680 : * @tx_msg_downq: List of pending down requests
681 : */
682 : struct list_head tx_msg_downq;
683 :
684 : /**
685 : * @payload_lock: Protect payload information.
686 : */
687 : struct mutex payload_lock;
688 : /**
689 : * @proposed_vcpis: Array of pointers for the new VCPI allocation. The
690 : * VCPI structure itself is &drm_dp_mst_port.vcpi, and the size of
691 : * this array is determined by @max_payloads.
692 : */
693 : struct drm_dp_vcpi **proposed_vcpis;
694 : /**
695 : * @payloads: Array of payloads. The size of this array is determined
696 : * by @max_payloads.
697 : */
698 : struct drm_dp_payload *payloads;
699 : /**
700 : * @payload_mask: Elements of @payloads actually in use. Since
701 : * reallocation of active outputs isn't possible gaps can be created by
702 : * disabling outputs out of order compared to how they've been enabled.
703 : */
704 : unsigned long payload_mask;
705 : /**
706 : * @vcpi_mask: Similar to @payload_mask, but for @proposed_vcpis.
707 : */
708 : unsigned long vcpi_mask;
709 :
710 : /**
711 : * @tx_waitq: Wait to queue stall for the tx worker.
712 : */
713 : wait_queue_head_t tx_waitq;
714 : /**
715 : * @work: Probe work.
716 : */
717 : struct work_struct work;
718 : /**
719 : * @tx_work: Sideband transmit worker. This can nest within the main
720 : * @work worker for each transaction @work launches.
721 : */
722 : struct work_struct tx_work;
723 :
724 : /**
725 : * @destroy_port_list: List of to be destroyed connectors.
726 : */
727 : struct list_head destroy_port_list;
728 : /**
729 : * @destroy_branch_device_list: List of to be destroyed branch
730 : * devices.
731 : */
732 : struct list_head destroy_branch_device_list;
733 : /**
734 : * @delayed_destroy_lock: Protects @destroy_port_list and
735 : * @destroy_branch_device_list.
736 : */
737 : struct mutex delayed_destroy_lock;
738 :
739 : /**
740 : * @delayed_destroy_wq: Workqueue used for delayed_destroy_work items.
741 : * A dedicated WQ makes it possible to drain any requeued work items
742 : * on it.
743 : */
744 : struct workqueue_struct *delayed_destroy_wq;
745 :
746 : /**
747 : * @delayed_destroy_work: Work item to destroy MST port and branch
748 : * devices, needed to avoid locking inversion.
749 : */
750 : struct work_struct delayed_destroy_work;
751 :
752 : /**
753 : * @up_req_list: List of pending up requests from the topology that
754 : * need to be processed, in chronological order.
755 : */
756 : struct list_head up_req_list;
757 : /**
758 : * @up_req_lock: Protects @up_req_list
759 : */
760 : struct mutex up_req_lock;
761 : /**
762 : * @up_req_work: Work item to process up requests received from the
763 : * topology. Needed to avoid blocking hotplug handling and sideband
764 : * transmissions.
765 : */
766 : struct work_struct up_req_work;
767 :
768 : #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
769 : /**
770 : * @topology_ref_history_lock: protects
771 : * &drm_dp_mst_port.topology_ref_history and
772 : * &drm_dp_mst_branch.topology_ref_history.
773 : */
774 : struct mutex topology_ref_history_lock;
775 : #endif
776 : };
777 :
778 : int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
779 : struct drm_device *dev, struct drm_dp_aux *aux,
780 : int max_dpcd_transaction_bytes,
781 : int max_payloads,
782 : int max_lane_count, int max_link_rate,
783 : int conn_base_id);
784 :
785 : void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr);
786 :
787 : bool drm_dp_read_mst_cap(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
788 : int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state);
789 :
790 : int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled);
791 :
792 :
793 : int
794 : drm_dp_mst_detect_port(struct drm_connector *connector,
795 : struct drm_modeset_acquire_ctx *ctx,
796 : struct drm_dp_mst_topology_mgr *mgr,
797 : struct drm_dp_mst_port *port);
798 :
799 : struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
800 :
801 : int drm_dp_get_vc_payload_bw(const struct drm_dp_mst_topology_mgr *mgr,
802 : int link_rate, int link_lane_count);
803 :
804 : int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc);
805 :
806 : bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
807 : struct drm_dp_mst_port *port, int pbn, int slots);
808 :
809 : int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
810 :
811 :
812 : void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
813 :
814 : void drm_dp_mst_update_slots(struct drm_dp_mst_topology_state *mst_state, uint8_t link_encoding_cap);
815 :
816 : void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
817 : struct drm_dp_mst_port *port);
818 :
819 :
820 : int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
821 : int pbn);
822 :
823 :
824 : int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr, int start_slot);
825 :
826 :
827 : int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr);
828 :
829 : int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr);
830 :
831 : void drm_dp_mst_dump_topology(struct seq_file *m,
832 : struct drm_dp_mst_topology_mgr *mgr);
833 :
834 : void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr);
835 : int __must_check
836 : drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
837 : bool sync);
838 :
839 : ssize_t drm_dp_mst_dpcd_read(struct drm_dp_aux *aux,
840 : unsigned int offset, void *buffer, size_t size);
841 : ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux,
842 : unsigned int offset, void *buffer, size_t size);
843 :
844 : int drm_dp_mst_connector_late_register(struct drm_connector *connector,
845 : struct drm_dp_mst_port *port);
846 : void drm_dp_mst_connector_early_unregister(struct drm_connector *connector,
847 : struct drm_dp_mst_port *port);
848 :
849 : struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
850 : struct drm_dp_mst_topology_mgr *mgr);
851 : int __must_check
852 : drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
853 : struct drm_dp_mst_topology_mgr *mgr,
854 : struct drm_dp_mst_port *port, int pbn,
855 : int pbn_div);
856 : int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state,
857 : struct drm_dp_mst_port *port,
858 : int pbn, int pbn_div,
859 : bool enable);
860 : int __must_check
861 : drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state,
862 : struct drm_dp_mst_topology_mgr *mgr);
863 : int __must_check
864 : drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
865 : struct drm_dp_mst_topology_mgr *mgr,
866 : struct drm_dp_mst_port *port);
867 : int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
868 : struct drm_dp_mst_port *port, bool power_up);
869 : int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr,
870 : struct drm_dp_mst_port *port,
871 : struct drm_dp_query_stream_enc_status_ack_reply *status);
872 : int __must_check drm_dp_mst_atomic_check(struct drm_atomic_state *state);
873 :
874 : void drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port);
875 : void drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port);
876 :
877 : struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port);
878 :
879 : extern const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs;
880 :
881 : /**
882 : * __drm_dp_mst_state_iter_get - private atomic state iterator function for
883 : * macro-internal use
884 : * @state: &struct drm_atomic_state pointer
885 : * @mgr: pointer to the &struct drm_dp_mst_topology_mgr iteration cursor
886 : * @old_state: optional pointer to the old &struct drm_dp_mst_topology_state
887 : * iteration cursor
888 : * @new_state: optional pointer to the new &struct drm_dp_mst_topology_state
889 : * iteration cursor
890 : * @i: int iteration cursor, for macro-internal use
891 : *
892 : * Used by for_each_oldnew_mst_mgr_in_state(),
893 : * for_each_old_mst_mgr_in_state(), and for_each_new_mst_mgr_in_state(). Don't
894 : * call this directly.
895 : *
896 : * Returns:
897 : * True if the current &struct drm_private_obj is a &struct
898 : * drm_dp_mst_topology_mgr, false otherwise.
899 : */
900 : static inline bool
901 : __drm_dp_mst_state_iter_get(struct drm_atomic_state *state,
902 : struct drm_dp_mst_topology_mgr **mgr,
903 : struct drm_dp_mst_topology_state **old_state,
904 : struct drm_dp_mst_topology_state **new_state,
905 : int i)
906 : {
907 0 : struct __drm_private_objs_state *objs_state = &state->private_objs[i];
908 :
909 0 : if (objs_state->ptr->funcs != &drm_dp_mst_topology_state_funcs)
910 : return false;
911 :
912 0 : *mgr = to_dp_mst_topology_mgr(objs_state->ptr);
913 : if (old_state)
914 : *old_state = to_dp_mst_topology_state(objs_state->old_state);
915 : if (new_state)
916 0 : *new_state = to_dp_mst_topology_state(objs_state->new_state);
917 :
918 : return true;
919 : }
920 :
921 : /**
922 : * for_each_oldnew_mst_mgr_in_state - iterate over all DP MST topology
923 : * managers in an atomic update
924 : * @__state: &struct drm_atomic_state pointer
925 : * @mgr: &struct drm_dp_mst_topology_mgr iteration cursor
926 : * @old_state: &struct drm_dp_mst_topology_state iteration cursor for the old
927 : * state
928 : * @new_state: &struct drm_dp_mst_topology_state iteration cursor for the new
929 : * state
930 : * @__i: int iteration cursor, for macro-internal use
931 : *
932 : * This iterates over all DRM DP MST topology managers in an atomic update,
933 : * tracking both old and new state. This is useful in places where the state
934 : * delta needs to be considered, for example in atomic check functions.
935 : */
936 : #define for_each_oldnew_mst_mgr_in_state(__state, mgr, old_state, new_state, __i) \
937 : for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \
938 : for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), &(old_state), &(new_state), (__i)))
939 :
940 : /**
941 : * for_each_old_mst_mgr_in_state - iterate over all DP MST topology managers
942 : * in an atomic update
943 : * @__state: &struct drm_atomic_state pointer
944 : * @mgr: &struct drm_dp_mst_topology_mgr iteration cursor
945 : * @old_state: &struct drm_dp_mst_topology_state iteration cursor for the old
946 : * state
947 : * @__i: int iteration cursor, for macro-internal use
948 : *
949 : * This iterates over all DRM DP MST topology managers in an atomic update,
950 : * tracking only the old state. This is useful in disable functions, where we
951 : * need the old state the hardware is still in.
952 : */
953 : #define for_each_old_mst_mgr_in_state(__state, mgr, old_state, __i) \
954 : for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \
955 : for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), &(old_state), NULL, (__i)))
956 :
957 : /**
958 : * for_each_new_mst_mgr_in_state - iterate over all DP MST topology managers
959 : * in an atomic update
960 : * @__state: &struct drm_atomic_state pointer
961 : * @mgr: &struct drm_dp_mst_topology_mgr iteration cursor
962 : * @new_state: &struct drm_dp_mst_topology_state iteration cursor for the new
963 : * state
964 : * @__i: int iteration cursor, for macro-internal use
965 : *
966 : * This iterates over all DRM DP MST topology managers in an atomic update,
967 : * tracking only the new state. This is useful in enable functions, where we
968 : * need the new state the hardware should be in when the atomic commit
969 : * operation has completed.
970 : */
971 : #define for_each_new_mst_mgr_in_state(__state, mgr, new_state, __i) \
972 : for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \
973 : for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), NULL, &(new_state), (__i)))
974 :
975 : #endif
|