Line data Source code
1 : /*
2 : * Copyright 2015 Advanced Micro Devices, Inc.
3 : *
4 : * Permission is hereby granted, free of charge, to any person obtaining a
5 : * copy of this software and associated documentation files (the "Software"),
6 : * to deal in the Software without restriction, including without limitation
7 : * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 : * and/or sell copies of the Software, and to permit persons to whom the
9 : * Software is furnished to do so, subject to the following conditions:
10 : *
11 : * The above copyright notice and this permission notice shall be included in
12 : * all copies or substantial portions of the Software.
13 : *
14 : * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 : * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 : * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 : * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 : * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 : * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 : * OTHER DEALINGS IN THE SOFTWARE.
21 : *
22 : * Authors: AMD
23 : */
24 :
25 : #include "dm_services.h"
26 :
27 : #include "dc.h"
28 :
29 : #include "core_status.h"
30 : #include "core_types.h"
31 : #include "hw_sequencer.h"
32 : #include "dce/dce_hwseq.h"
33 :
34 : #include "resource.h"
35 :
36 : #include "clk_mgr.h"
37 : #include "clock_source.h"
38 : #include "dc_bios_types.h"
39 :
40 : #include "bios_parser_interface.h"
41 : #include "bios/bios_parser_helper.h"
42 : #include "include/irq_service_interface.h"
43 : #include "transform.h"
44 : #include "dmcu.h"
45 : #include "dpp.h"
46 : #include "timing_generator.h"
47 : #include "abm.h"
48 : #include "virtual/virtual_link_encoder.h"
49 : #include "hubp.h"
50 :
51 : #include "link_hwss.h"
52 : #include "link_encoder.h"
53 : #include "link_enc_cfg.h"
54 :
55 : #include "dc_link.h"
56 : #include "dc_link_ddc.h"
57 : #include "dm_helpers.h"
58 : #include "mem_input.h"
59 :
60 : #include "dc_link_dp.h"
61 : #include "dc_dmub_srv.h"
62 :
63 : #include "dsc.h"
64 :
65 : #include "vm_helper.h"
66 :
67 : #include "dce/dce_i2c.h"
68 :
69 : #include "dmub/dmub_srv.h"
70 :
71 : #include "i2caux_interface.h"
72 :
73 : #include "dce/dmub_psr.h"
74 :
75 : #include "dce/dmub_hw_lock_mgr.h"
76 :
77 : #include "dc_trace.h"
78 :
79 : #include "dce/dmub_outbox.h"
80 :
81 : #define CTX \
82 : dc->ctx
83 :
84 : #define DC_LOGGER \
85 : dc->ctx->logger
86 :
87 : static const char DC_BUILD_ID[] = "production-build";
88 :
89 : /**
90 : * DOC: Overview
91 : *
92 : * DC is the OS-agnostic component of the amdgpu DC driver.
93 : *
94 : * DC maintains and validates a set of structs representing the state of the
95 : * driver and writes that state to AMD hardware
96 : *
97 : * Main DC HW structs:
98 : *
99 : * struct dc - The central struct. One per driver. Created on driver load,
100 : * destroyed on driver unload.
101 : *
102 : * struct dc_context - One per driver.
103 : * Used as a backpointer by most other structs in dc.
104 : *
105 : * struct dc_link - One per connector (the physical DP, HDMI, miniDP, or eDP
106 : * plugpoints). Created on driver load, destroyed on driver unload.
107 : *
108 : * struct dc_sink - One per display. Created on boot or hotplug.
109 : * Destroyed on shutdown or hotunplug. A dc_link can have a local sink
110 : * (the display directly attached). It may also have one or more remote
111 : * sinks (in the Multi-Stream Transport case)
112 : *
113 : * struct resource_pool - One per driver. Represents the hw blocks not in the
114 : * main pipeline. Not directly accessible by dm.
115 : *
116 : * Main dc state structs:
117 : *
118 : * These structs can be created and destroyed as needed. There is a full set of
119 : * these structs in dc->current_state representing the currently programmed state.
120 : *
121 : * struct dc_state - The global DC state to track global state information,
122 : * such as bandwidth values.
123 : *
124 : * struct dc_stream_state - Represents the hw configuration for the pipeline from
125 : * a framebuffer to a display. Maps one-to-one with dc_sink.
126 : *
127 : * struct dc_plane_state - Represents a framebuffer. Each stream has at least one,
128 : * and may have more in the Multi-Plane Overlay case.
129 : *
130 : * struct resource_context - Represents the programmable state of everything in
131 : * the resource_pool. Not directly accessible by dm.
132 : *
133 : * struct pipe_ctx - A member of struct resource_context. Represents the
134 : * internal hardware pipeline components. Each dc_plane_state has either
135 : * one or two (in the pipe-split case).
136 : */
137 :
138 : /*******************************************************************************
139 : * Private functions
140 : ******************************************************************************/
141 :
142 : static inline void elevate_update_type(enum surface_update_type *original, enum surface_update_type new)
143 : {
144 0 : if (new > *original)
145 0 : *original = new;
146 : }
147 :
148 : static void destroy_links(struct dc *dc)
149 : {
150 : uint32_t i;
151 :
152 0 : for (i = 0; i < dc->link_count; i++) {
153 0 : if (NULL != dc->links[i])
154 0 : link_destroy(&dc->links[i]);
155 : }
156 : }
157 :
158 : static uint32_t get_num_of_internal_disp(struct dc_link **links, uint32_t num_links)
159 : {
160 : int i;
161 0 : uint32_t count = 0;
162 :
163 0 : for (i = 0; i < num_links; i++) {
164 0 : if (links[i]->connector_signal == SIGNAL_TYPE_EDP ||
165 0 : links[i]->is_internal_display)
166 0 : count++;
167 : }
168 :
169 : return count;
170 : }
171 :
172 : static int get_seamless_boot_stream_count(struct dc_state *ctx)
173 : {
174 : uint8_t i;
175 0 : uint8_t seamless_boot_stream_count = 0;
176 :
177 0 : for (i = 0; i < ctx->stream_count; i++)
178 0 : if (ctx->streams[i]->apply_seamless_boot_optimization)
179 0 : seamless_boot_stream_count++;
180 :
181 0 : return seamless_boot_stream_count;
182 : }
183 :
184 0 : static bool create_links(
185 : struct dc *dc,
186 : uint32_t num_virtual_links)
187 : {
188 : int i;
189 : int connectors_num;
190 0 : struct dc_bios *bios = dc->ctx->dc_bios;
191 :
192 0 : dc->link_count = 0;
193 :
194 0 : connectors_num = bios->funcs->get_connectors_number(bios);
195 :
196 0 : DC_LOG_DC("BIOS object table - number of connectors: %d", connectors_num);
197 :
198 0 : if (connectors_num > ENUM_ID_COUNT) {
199 0 : dm_error(
200 : "DC: Number of connectors %d exceeds maximum of %d!\n",
201 : connectors_num,
202 : ENUM_ID_COUNT);
203 0 : return false;
204 : }
205 :
206 0 : dm_output_to_console(
207 : "DC: %s: connectors_num: physical:%d, virtual:%d\n",
208 : __func__,
209 : connectors_num,
210 : num_virtual_links);
211 :
212 0 : for (i = 0; i < connectors_num; i++) {
213 0 : struct link_init_data link_init_params = {0};
214 : struct dc_link *link;
215 :
216 0 : DC_LOG_DC("BIOS object table - printing link object info for connector number: %d, link_index: %d", i, dc->link_count);
217 :
218 0 : link_init_params.ctx = dc->ctx;
219 : /* next BIOS object table connector */
220 0 : link_init_params.connector_index = i;
221 0 : link_init_params.link_index = dc->link_count;
222 0 : link_init_params.dc = dc;
223 0 : link = link_create(&link_init_params);
224 :
225 0 : if (link) {
226 0 : dc->links[dc->link_count] = link;
227 0 : link->dc = dc;
228 0 : ++dc->link_count;
229 : }
230 : }
231 :
232 0 : DC_LOG_DC("BIOS object table - end");
233 :
234 : /* Create a link for each usb4 dpia port */
235 0 : for (i = 0; i < dc->res_pool->usb4_dpia_count; i++) {
236 0 : struct link_init_data link_init_params = {0};
237 : struct dc_link *link;
238 :
239 0 : link_init_params.ctx = dc->ctx;
240 0 : link_init_params.connector_index = i;
241 0 : link_init_params.link_index = dc->link_count;
242 0 : link_init_params.dc = dc;
243 0 : link_init_params.is_dpia_link = true;
244 :
245 0 : link = link_create(&link_init_params);
246 0 : if (link) {
247 0 : dc->links[dc->link_count] = link;
248 0 : link->dc = dc;
249 0 : ++dc->link_count;
250 : }
251 : }
252 :
253 0 : for (i = 0; i < num_virtual_links; i++) {
254 0 : struct dc_link *link = kzalloc(sizeof(*link), GFP_KERNEL);
255 0 : struct encoder_init_data enc_init = {0};
256 :
257 0 : if (link == NULL) {
258 0 : BREAK_TO_DEBUGGER();
259 0 : goto failed_alloc;
260 : }
261 :
262 0 : link->link_index = dc->link_count;
263 0 : dc->links[dc->link_count] = link;
264 0 : dc->link_count++;
265 :
266 0 : link->ctx = dc->ctx;
267 0 : link->dc = dc;
268 0 : link->connector_signal = SIGNAL_TYPE_VIRTUAL;
269 0 : link->link_id.type = OBJECT_TYPE_CONNECTOR;
270 0 : link->link_id.id = CONNECTOR_ID_VIRTUAL;
271 0 : link->link_id.enum_id = ENUM_ID_1;
272 0 : link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL);
273 :
274 0 : if (!link->link_enc) {
275 0 : BREAK_TO_DEBUGGER();
276 0 : goto failed_alloc;
277 : }
278 :
279 0 : link->link_status.dpcd_caps = &link->dpcd_caps;
280 :
281 0 : enc_init.ctx = dc->ctx;
282 0 : enc_init.channel = CHANNEL_ID_UNKNOWN;
283 0 : enc_init.hpd_source = HPD_SOURCEID_UNKNOWN;
284 0 : enc_init.transmitter = TRANSMITTER_UNKNOWN;
285 0 : enc_init.connector = link->link_id;
286 0 : enc_init.encoder.type = OBJECT_TYPE_ENCODER;
287 0 : enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL;
288 0 : enc_init.encoder.enum_id = ENUM_ID_1;
289 0 : virtual_link_encoder_construct(link->link_enc, &enc_init);
290 : }
291 :
292 0 : dc->caps.num_of_internal_disp = get_num_of_internal_disp(dc->links, dc->link_count);
293 :
294 0 : return true;
295 :
296 : failed_alloc:
297 0 : return false;
298 : }
299 :
300 : /* Create additional DIG link encoder objects if fewer than the platform
301 : * supports were created during link construction. This can happen if the
302 : * number of physical connectors is less than the number of DIGs.
303 : */
304 0 : static bool create_link_encoders(struct dc *dc)
305 : {
306 0 : bool res = true;
307 0 : unsigned int num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia;
308 0 : unsigned int num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc;
309 : int i;
310 :
311 : /* A platform without USB4 DPIA endpoints has a fixed mapping between DIG
312 : * link encoders and physical display endpoints and does not require
313 : * additional link encoder objects.
314 : */
315 0 : if (num_usb4_dpia == 0)
316 : return res;
317 :
318 : /* Create as many link encoder objects as the platform supports. DPIA
319 : * endpoints can be programmably mapped to any DIG.
320 : */
321 0 : if (num_dig_link_enc > dc->res_pool->dig_link_enc_count) {
322 0 : for (i = 0; i < num_dig_link_enc; i++) {
323 0 : struct link_encoder *link_enc = dc->res_pool->link_encoders[i];
324 :
325 0 : if (!link_enc && dc->res_pool->funcs->link_enc_create_minimal) {
326 0 : link_enc = dc->res_pool->funcs->link_enc_create_minimal(dc->ctx,
327 : (enum engine_id)(ENGINE_ID_DIGA + i));
328 0 : if (link_enc) {
329 0 : dc->res_pool->link_encoders[i] = link_enc;
330 0 : dc->res_pool->dig_link_enc_count++;
331 : } else {
332 : res = false;
333 : }
334 : }
335 : }
336 : }
337 :
338 : return res;
339 : }
340 :
341 : /* Destroy any additional DIG link encoder objects created by
342 : * create_link_encoders().
343 : * NB: Must only be called after destroy_links().
344 : */
345 0 : static void destroy_link_encoders(struct dc *dc)
346 : {
347 : unsigned int num_usb4_dpia;
348 : unsigned int num_dig_link_enc;
349 : int i;
350 :
351 0 : if (!dc->res_pool)
352 : return;
353 :
354 0 : num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia;
355 0 : num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc;
356 :
357 : /* A platform without USB4 DPIA endpoints has a fixed mapping between DIG
358 : * link encoders and physical display endpoints and does not require
359 : * additional link encoder objects.
360 : */
361 0 : if (num_usb4_dpia == 0)
362 : return;
363 :
364 0 : for (i = 0; i < num_dig_link_enc; i++) {
365 0 : struct link_encoder *link_enc = dc->res_pool->link_encoders[i];
366 :
367 0 : if (link_enc) {
368 0 : link_enc->funcs->destroy(&link_enc);
369 0 : dc->res_pool->link_encoders[i] = NULL;
370 0 : dc->res_pool->dig_link_enc_count--;
371 : }
372 : }
373 : }
374 :
375 : static struct dc_perf_trace *dc_perf_trace_create(void)
376 : {
377 0 : return kzalloc(sizeof(struct dc_perf_trace), GFP_KERNEL);
378 : }
379 :
380 : static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace)
381 : {
382 0 : kfree(*perf_trace);
383 0 : *perf_trace = NULL;
384 : }
385 :
386 : /**
387 : * dc_stream_adjust_vmin_vmax:
388 : *
389 : * Looks up the pipe context of dc_stream_state and updates the
390 : * vertical_total_min and vertical_total_max of the DRR, Dynamic Refresh
391 : * Rate, which is a power-saving feature that targets reducing panel
392 : * refresh rate while the screen is static
393 : *
394 : * @dc: dc reference
395 : * @stream: Initial dc stream state
396 : * @adjust: Updated parameters for vertical_total_min and vertical_total_max
397 : */
398 0 : bool dc_stream_adjust_vmin_vmax(struct dc *dc,
399 : struct dc_stream_state *stream,
400 : struct dc_crtc_timing_adjust *adjust)
401 : {
402 : int i;
403 :
404 0 : stream->adjust.v_total_max = adjust->v_total_max;
405 0 : stream->adjust.v_total_mid = adjust->v_total_mid;
406 0 : stream->adjust.v_total_mid_frame_num = adjust->v_total_mid_frame_num;
407 0 : stream->adjust.v_total_min = adjust->v_total_min;
408 :
409 0 : for (i = 0; i < MAX_PIPES; i++) {
410 0 : struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
411 :
412 0 : if (pipe->stream == stream && pipe->stream_res.tg) {
413 0 : dc->hwss.set_drr(&pipe,
414 : 1,
415 : *adjust);
416 :
417 0 : return true;
418 : }
419 : }
420 : return false;
421 : }
422 :
423 : /**
424 : *****************************************************************************
425 : * Function: dc_stream_get_last_vrr_vtotal
426 : *
427 : * @brief
428 : * Looks up the pipe context of dc_stream_state and gets the
429 : * last VTOTAL used by DRR (Dynamic Refresh Rate)
430 : *
431 : * @param [in] dc: dc reference
432 : * @param [in] stream: Initial dc stream state
433 : * @param [in] adjust: Updated parameters for vertical_total_min and
434 : * vertical_total_max
435 : *****************************************************************************
436 : */
437 0 : bool dc_stream_get_last_used_drr_vtotal(struct dc *dc,
438 : struct dc_stream_state *stream,
439 : uint32_t *refresh_rate)
440 : {
441 0 : bool status = false;
442 :
443 0 : int i = 0;
444 :
445 0 : for (i = 0; i < MAX_PIPES; i++) {
446 0 : struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
447 :
448 0 : if (pipe->stream == stream && pipe->stream_res.tg) {
449 : /* Only execute if a function pointer has been defined for
450 : * the DC version in question
451 : */
452 0 : if (pipe->stream_res.tg->funcs->get_last_used_drr_vtotal) {
453 0 : pipe->stream_res.tg->funcs->get_last_used_drr_vtotal(pipe->stream_res.tg, refresh_rate);
454 :
455 0 : status = true;
456 :
457 0 : break;
458 : }
459 : }
460 : }
461 :
462 0 : return status;
463 : }
464 :
465 0 : bool dc_stream_get_crtc_position(struct dc *dc,
466 : struct dc_stream_state **streams, int num_streams,
467 : unsigned int *v_pos, unsigned int *nom_v_pos)
468 : {
469 : /* TODO: Support multiple streams */
470 0 : const struct dc_stream_state *stream = streams[0];
471 : int i;
472 0 : bool ret = false;
473 : struct crtc_position position;
474 :
475 0 : for (i = 0; i < MAX_PIPES; i++) {
476 0 : struct pipe_ctx *pipe =
477 0 : &dc->current_state->res_ctx.pipe_ctx[i];
478 :
479 0 : if (pipe->stream == stream && pipe->stream_res.stream_enc) {
480 0 : dc->hwss.get_position(&pipe, 1, &position);
481 :
482 0 : *v_pos = position.vertical_count;
483 0 : *nom_v_pos = position.nominal_vcount;
484 0 : ret = true;
485 : }
486 : }
487 0 : return ret;
488 : }
489 :
490 : #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
491 : bool dc_stream_forward_dmcu_crc_window(struct dc *dc, struct dc_stream_state *stream,
492 : struct crc_params *crc_window)
493 : {
494 : int i;
495 : struct dmcu *dmcu = dc->res_pool->dmcu;
496 : struct pipe_ctx *pipe;
497 : struct crc_region tmp_win, *crc_win;
498 : struct otg_phy_mux mapping_tmp, *mux_mapping;
499 :
500 : /*crc window can't be null*/
501 : if (!crc_window)
502 : return false;
503 :
504 : if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu))) {
505 : crc_win = &tmp_win;
506 : mux_mapping = &mapping_tmp;
507 : /*set crc window*/
508 : tmp_win.x_start = crc_window->windowa_x_start;
509 : tmp_win.y_start = crc_window->windowa_y_start;
510 : tmp_win.x_end = crc_window->windowa_x_end;
511 : tmp_win.y_end = crc_window->windowa_y_end;
512 :
513 : for (i = 0; i < MAX_PIPES; i++) {
514 : pipe = &dc->current_state->res_ctx.pipe_ctx[i];
515 : if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe)
516 : break;
517 : }
518 :
519 : /* Stream not found */
520 : if (i == MAX_PIPES)
521 : return false;
522 :
523 :
524 : /*set mux routing info*/
525 : mapping_tmp.phy_output_num = stream->link->link_enc_hw_inst;
526 : mapping_tmp.otg_output_num = pipe->stream_res.tg->inst;
527 :
528 : dmcu->funcs->forward_crc_window(dmcu, crc_win, mux_mapping);
529 : } else {
530 : DC_LOG_DC("dmcu is not initialized");
531 : return false;
532 : }
533 :
534 : return true;
535 : }
536 :
537 : bool dc_stream_stop_dmcu_crc_win_update(struct dc *dc, struct dc_stream_state *stream)
538 : {
539 : int i;
540 : struct dmcu *dmcu = dc->res_pool->dmcu;
541 : struct pipe_ctx *pipe;
542 : struct otg_phy_mux mapping_tmp, *mux_mapping;
543 :
544 : if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu))) {
545 : mux_mapping = &mapping_tmp;
546 :
547 : for (i = 0; i < MAX_PIPES; i++) {
548 : pipe = &dc->current_state->res_ctx.pipe_ctx[i];
549 : if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe)
550 : break;
551 : }
552 :
553 : /* Stream not found */
554 : if (i == MAX_PIPES)
555 : return false;
556 :
557 :
558 : /*set mux routing info*/
559 : mapping_tmp.phy_output_num = stream->link->link_enc_hw_inst;
560 : mapping_tmp.otg_output_num = pipe->stream_res.tg->inst;
561 :
562 : dmcu->funcs->stop_crc_win_update(dmcu, mux_mapping);
563 : } else {
564 : DC_LOG_DC("dmcu is not initialized");
565 : return false;
566 : }
567 :
568 : return true;
569 : }
570 : #endif
571 :
572 : /**
573 : * dc_stream_configure_crc() - Configure CRC capture for the given stream.
574 : * @dc: DC Object
575 : * @stream: The stream to configure CRC on.
576 : * @enable: Enable CRC if true, disable otherwise.
577 : * @crc_window: CRC window (x/y start/end) information
578 : * @continuous: Capture CRC on every frame if true. Otherwise, only capture
579 : * once.
580 : *
581 : * By default, only CRC0 is configured, and the entire frame is used to
582 : * calculate the crc.
583 : */
584 0 : bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
585 : struct crc_params *crc_window, bool enable, bool continuous)
586 : {
587 : int i;
588 : struct pipe_ctx *pipe;
589 : struct crc_params param;
590 : struct timing_generator *tg;
591 :
592 0 : for (i = 0; i < MAX_PIPES; i++) {
593 0 : pipe = &dc->current_state->res_ctx.pipe_ctx[i];
594 0 : if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe)
595 : break;
596 : }
597 : /* Stream not found */
598 0 : if (i == MAX_PIPES)
599 : return false;
600 :
601 : /* By default, capture the full frame */
602 0 : param.windowa_x_start = 0;
603 0 : param.windowa_y_start = 0;
604 0 : param.windowa_x_end = pipe->stream->timing.h_addressable;
605 0 : param.windowa_y_end = pipe->stream->timing.v_addressable;
606 0 : param.windowb_x_start = 0;
607 0 : param.windowb_y_start = 0;
608 0 : param.windowb_x_end = pipe->stream->timing.h_addressable;
609 0 : param.windowb_y_end = pipe->stream->timing.v_addressable;
610 :
611 0 : if (crc_window) {
612 0 : param.windowa_x_start = crc_window->windowa_x_start;
613 0 : param.windowa_y_start = crc_window->windowa_y_start;
614 0 : param.windowa_x_end = crc_window->windowa_x_end;
615 0 : param.windowa_y_end = crc_window->windowa_y_end;
616 0 : param.windowb_x_start = crc_window->windowb_x_start;
617 0 : param.windowb_y_start = crc_window->windowb_y_start;
618 0 : param.windowb_x_end = crc_window->windowb_x_end;
619 0 : param.windowb_y_end = crc_window->windowb_y_end;
620 : }
621 :
622 0 : param.dsc_mode = pipe->stream->timing.flags.DSC ? 1:0;
623 0 : param.odm_mode = pipe->next_odm_pipe ? 1:0;
624 :
625 : /* Default to the union of both windows */
626 0 : param.selection = UNION_WINDOW_A_B;
627 0 : param.continuous_mode = continuous;
628 0 : param.enable = enable;
629 :
630 0 : tg = pipe->stream_res.tg;
631 :
632 : /* Only call if supported */
633 0 : if (tg->funcs->configure_crc)
634 0 : return tg->funcs->configure_crc(tg, ¶m);
635 0 : DC_LOG_WARNING("CRC capture not supported.");
636 0 : return false;
637 : }
638 :
639 : /**
640 : * dc_stream_get_crc() - Get CRC values for the given stream.
641 : *
642 : * @dc: DC object.
643 : * @stream: The DC stream state of the stream to get CRCs from.
644 : * @r_cr: CRC value for the red component.
645 : * @g_y: CRC value for the green component.
646 : * @b_cb: CRC value for the blue component.
647 : *
648 : * dc_stream_configure_crc needs to be called beforehand to enable CRCs.
649 : *
650 : * Return:
651 : * false if stream is not found, or if CRCs are not enabled.
652 : */
653 0 : bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,
654 : uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
655 : {
656 : int i;
657 : struct pipe_ctx *pipe;
658 : struct timing_generator *tg;
659 :
660 0 : for (i = 0; i < MAX_PIPES; i++) {
661 0 : pipe = &dc->current_state->res_ctx.pipe_ctx[i];
662 0 : if (pipe->stream == stream)
663 : break;
664 : }
665 : /* Stream not found */
666 0 : if (i == MAX_PIPES)
667 : return false;
668 :
669 0 : tg = pipe->stream_res.tg;
670 :
671 0 : if (tg->funcs->get_crc)
672 0 : return tg->funcs->get_crc(tg, r_cr, g_y, b_cb);
673 0 : DC_LOG_WARNING("CRC capture not supported.");
674 0 : return false;
675 : }
676 :
677 0 : void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream,
678 : enum dc_dynamic_expansion option)
679 : {
680 : /* OPP FMT dyn expansion updates*/
681 : int i;
682 : struct pipe_ctx *pipe_ctx;
683 :
684 0 : for (i = 0; i < MAX_PIPES; i++) {
685 0 : if (dc->current_state->res_ctx.pipe_ctx[i].stream
686 : == stream) {
687 0 : pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
688 0 : pipe_ctx->stream_res.opp->dyn_expansion = option;
689 0 : pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
690 : pipe_ctx->stream_res.opp,
691 : COLOR_SPACE_YCBCR601,
692 : stream->timing.display_color_depth,
693 : stream->signal);
694 : }
695 : }
696 0 : }
697 :
698 0 : void dc_stream_set_dither_option(struct dc_stream_state *stream,
699 : enum dc_dither_option option)
700 : {
701 : struct bit_depth_reduction_params params;
702 0 : struct dc_link *link = stream->link;
703 0 : struct pipe_ctx *pipes = NULL;
704 : int i;
705 :
706 0 : for (i = 0; i < MAX_PIPES; i++) {
707 0 : if (link->dc->current_state->res_ctx.pipe_ctx[i].stream ==
708 : stream) {
709 0 : pipes = &link->dc->current_state->res_ctx.pipe_ctx[i];
710 0 : break;
711 : }
712 : }
713 :
714 0 : if (!pipes)
715 0 : return;
716 0 : if (option > DITHER_OPTION_MAX)
717 : return;
718 :
719 0 : stream->dither_option = option;
720 :
721 0 : memset(¶ms, 0, sizeof(params));
722 0 : resource_build_bit_depth_reduction_params(stream, ¶ms);
723 0 : stream->bit_depth_params = params;
724 :
725 0 : if (pipes->plane_res.xfm &&
726 0 : pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth) {
727 0 : pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth(
728 : pipes->plane_res.xfm,
729 : pipes->plane_res.scl_data.lb_params.depth,
730 0 : &stream->bit_depth_params);
731 : }
732 :
733 0 : pipes->stream_res.opp->funcs->
734 : opp_program_bit_depth_reduction(pipes->stream_res.opp, ¶ms);
735 : }
736 :
737 0 : bool dc_stream_set_gamut_remap(struct dc *dc, const struct dc_stream_state *stream)
738 : {
739 : int i;
740 0 : bool ret = false;
741 : struct pipe_ctx *pipes;
742 :
743 0 : for (i = 0; i < MAX_PIPES; i++) {
744 0 : if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) {
745 0 : pipes = &dc->current_state->res_ctx.pipe_ctx[i];
746 0 : dc->hwss.program_gamut_remap(pipes);
747 0 : ret = true;
748 : }
749 : }
750 :
751 0 : return ret;
752 : }
753 :
754 0 : bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream)
755 : {
756 : int i;
757 0 : bool ret = false;
758 : struct pipe_ctx *pipes;
759 :
760 0 : for (i = 0; i < MAX_PIPES; i++) {
761 0 : if (dc->current_state->res_ctx.pipe_ctx[i].stream
762 : == stream) {
763 :
764 0 : pipes = &dc->current_state->res_ctx.pipe_ctx[i];
765 0 : dc->hwss.program_output_csc(dc,
766 : pipes,
767 : stream->output_color_space,
768 0 : stream->csc_color_matrix.matrix,
769 0 : pipes->stream_res.opp->inst);
770 0 : ret = true;
771 : }
772 : }
773 :
774 0 : return ret;
775 : }
776 :
777 0 : void dc_stream_set_static_screen_params(struct dc *dc,
778 : struct dc_stream_state **streams,
779 : int num_streams,
780 : const struct dc_static_screen_params *params)
781 : {
782 : int i, j;
783 : struct pipe_ctx *pipes_affected[MAX_PIPES];
784 0 : int num_pipes_affected = 0;
785 :
786 0 : for (i = 0; i < num_streams; i++) {
787 0 : struct dc_stream_state *stream = streams[i];
788 :
789 0 : for (j = 0; j < MAX_PIPES; j++) {
790 0 : if (dc->current_state->res_ctx.pipe_ctx[j].stream
791 : == stream) {
792 0 : pipes_affected[num_pipes_affected++] =
793 0 : &dc->current_state->res_ctx.pipe_ctx[j];
794 : }
795 : }
796 : }
797 :
798 0 : dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, params);
799 0 : }
800 :
801 0 : static void dc_destruct(struct dc *dc)
802 : {
803 : // reset link encoder assignment table on destruct
804 0 : if (dc->res_pool && dc->res_pool->funcs->link_encs_assign)
805 0 : link_enc_cfg_init(dc, dc->current_state);
806 :
807 0 : if (dc->current_state) {
808 0 : dc_release_state(dc->current_state);
809 0 : dc->current_state = NULL;
810 : }
811 :
812 0 : destroy_links(dc);
813 :
814 0 : destroy_link_encoders(dc);
815 :
816 0 : if (dc->clk_mgr) {
817 0 : dc_destroy_clk_mgr(dc->clk_mgr);
818 0 : dc->clk_mgr = NULL;
819 : }
820 :
821 0 : dc_destroy_resource_pool(dc);
822 :
823 0 : if (dc->ctx->gpio_service)
824 0 : dal_gpio_service_destroy(&dc->ctx->gpio_service);
825 :
826 0 : if (dc->ctx->created_bios)
827 0 : dal_bios_parser_destroy(&dc->ctx->dc_bios);
828 :
829 0 : dc_perf_trace_destroy(&dc->ctx->perf_trace);
830 :
831 0 : kfree(dc->ctx);
832 0 : dc->ctx = NULL;
833 :
834 0 : kfree(dc->bw_vbios);
835 0 : dc->bw_vbios = NULL;
836 :
837 0 : kfree(dc->bw_dceip);
838 0 : dc->bw_dceip = NULL;
839 :
840 0 : kfree(dc->dcn_soc);
841 0 : dc->dcn_soc = NULL;
842 :
843 0 : kfree(dc->dcn_ip);
844 0 : dc->dcn_ip = NULL;
845 :
846 0 : kfree(dc->vm_helper);
847 0 : dc->vm_helper = NULL;
848 :
849 0 : }
850 :
851 0 : static bool dc_construct_ctx(struct dc *dc,
852 : const struct dc_init_data *init_params)
853 : {
854 : struct dc_context *dc_ctx;
855 0 : enum dce_version dc_version = DCE_VERSION_UNKNOWN;
856 :
857 0 : dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL);
858 0 : if (!dc_ctx)
859 : return false;
860 :
861 0 : dc_ctx->cgs_device = init_params->cgs_device;
862 0 : dc_ctx->driver_context = init_params->driver;
863 0 : dc_ctx->dc = dc;
864 0 : dc_ctx->asic_id = init_params->asic_id;
865 0 : dc_ctx->dc_sink_id_count = 0;
866 0 : dc_ctx->dc_stream_id_count = 0;
867 0 : dc_ctx->dce_environment = init_params->dce_environment;
868 0 : dc_ctx->dcn_reg_offsets = init_params->dcn_reg_offsets;
869 0 : dc_ctx->nbio_reg_offsets = init_params->nbio_reg_offsets;
870 :
871 : /* Create logger */
872 :
873 0 : dc_version = resource_parse_asic_id(init_params->asic_id);
874 0 : dc_ctx->dce_version = dc_version;
875 :
876 0 : dc_ctx->perf_trace = dc_perf_trace_create();
877 0 : if (!dc_ctx->perf_trace) {
878 0 : ASSERT_CRITICAL(false);
879 0 : return false;
880 : }
881 :
882 0 : dc->ctx = dc_ctx;
883 :
884 0 : return true;
885 : }
886 :
887 0 : static bool dc_construct(struct dc *dc,
888 : const struct dc_init_data *init_params)
889 : {
890 : struct dc_context *dc_ctx;
891 : struct bw_calcs_dceip *dc_dceip;
892 : struct bw_calcs_vbios *dc_vbios;
893 : struct dcn_soc_bounding_box *dcn_soc;
894 : struct dcn_ip_params *dcn_ip;
895 :
896 0 : dc->config = init_params->flags;
897 :
898 : // Allocate memory for the vm_helper
899 0 : dc->vm_helper = kzalloc(sizeof(struct vm_helper), GFP_KERNEL);
900 0 : if (!dc->vm_helper) {
901 0 : dm_error("%s: failed to create dc->vm_helper\n", __func__);
902 0 : goto fail;
903 : }
904 :
905 0 : memcpy(&dc->bb_overrides, &init_params->bb_overrides, sizeof(dc->bb_overrides));
906 :
907 0 : dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL);
908 0 : if (!dc_dceip) {
909 0 : dm_error("%s: failed to create dceip\n", __func__);
910 0 : goto fail;
911 : }
912 :
913 0 : dc->bw_dceip = dc_dceip;
914 :
915 0 : dc_vbios = kzalloc(sizeof(*dc_vbios), GFP_KERNEL);
916 0 : if (!dc_vbios) {
917 0 : dm_error("%s: failed to create vbios\n", __func__);
918 0 : goto fail;
919 : }
920 :
921 0 : dc->bw_vbios = dc_vbios;
922 0 : dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL);
923 0 : if (!dcn_soc) {
924 0 : dm_error("%s: failed to create dcn_soc\n", __func__);
925 0 : goto fail;
926 : }
927 :
928 0 : dc->dcn_soc = dcn_soc;
929 :
930 0 : dcn_ip = kzalloc(sizeof(*dcn_ip), GFP_KERNEL);
931 0 : if (!dcn_ip) {
932 0 : dm_error("%s: failed to create dcn_ip\n", __func__);
933 0 : goto fail;
934 : }
935 :
936 0 : dc->dcn_ip = dcn_ip;
937 :
938 0 : if (!dc_construct_ctx(dc, init_params)) {
939 0 : dm_error("%s: failed to create ctx\n", __func__);
940 0 : goto fail;
941 : }
942 :
943 0 : dc_ctx = dc->ctx;
944 :
945 : /* Resource should construct all asic specific resources.
946 : * This should be the only place where we need to parse the asic id
947 : */
948 0 : if (init_params->vbios_override)
949 0 : dc_ctx->dc_bios = init_params->vbios_override;
950 : else {
951 : /* Create BIOS parser */
952 : struct bp_init_data bp_init_data;
953 :
954 0 : bp_init_data.ctx = dc_ctx;
955 0 : bp_init_data.bios = init_params->asic_id.atombios_base_address;
956 :
957 0 : dc_ctx->dc_bios = dal_bios_parser_create(
958 : &bp_init_data, dc_ctx->dce_version);
959 :
960 0 : if (!dc_ctx->dc_bios) {
961 0 : ASSERT_CRITICAL(false);
962 0 : goto fail;
963 : }
964 :
965 0 : dc_ctx->created_bios = true;
966 : }
967 :
968 0 : dc->vendor_signature = init_params->vendor_signature;
969 :
970 : /* Create GPIO service */
971 0 : dc_ctx->gpio_service = dal_gpio_service_create(
972 : dc_ctx->dce_version,
973 : dc_ctx->dce_environment,
974 : dc_ctx);
975 :
976 0 : if (!dc_ctx->gpio_service) {
977 0 : ASSERT_CRITICAL(false);
978 0 : goto fail;
979 : }
980 :
981 0 : dc->res_pool = dc_create_resource_pool(dc, init_params, dc_ctx->dce_version);
982 0 : if (!dc->res_pool)
983 : goto fail;
984 :
985 : /* set i2c speed if not done by the respective dcnxxx__resource.c */
986 0 : if (dc->caps.i2c_speed_in_khz_hdcp == 0)
987 0 : dc->caps.i2c_speed_in_khz_hdcp = dc->caps.i2c_speed_in_khz;
988 :
989 0 : dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg);
990 0 : if (!dc->clk_mgr)
991 : goto fail;
992 : #ifdef CONFIG_DRM_AMD_DC_DCN
993 0 : dc->clk_mgr->force_smu_not_present = init_params->force_smu_not_present;
994 :
995 0 : if (dc->res_pool->funcs->update_bw_bounding_box) {
996 0 : DC_FP_START();
997 0 : dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params);
998 0 : DC_FP_END();
999 : }
1000 : #endif
1001 :
1002 : /* Creation of current_state must occur after dc->dml
1003 : * is initialized in dc_create_resource_pool because
1004 : * on creation it copies the contents of dc->dml
1005 : */
1006 :
1007 0 : dc->current_state = dc_create_state(dc);
1008 :
1009 0 : if (!dc->current_state) {
1010 0 : dm_error("%s: failed to create validate ctx\n", __func__);
1011 0 : goto fail;
1012 : }
1013 :
1014 0 : if (!create_links(dc, init_params->num_virtual_links))
1015 : goto fail;
1016 :
1017 : /* Create additional DIG link encoder objects if fewer than the platform
1018 : * supports were created during link construction.
1019 : */
1020 0 : if (!create_link_encoders(dc))
1021 : goto fail;
1022 :
1023 0 : dc_resource_state_construct(dc, dc->current_state);
1024 :
1025 0 : return true;
1026 :
1027 : fail:
1028 : return false;
1029 : }
1030 :
1031 : static void disable_all_writeback_pipes_for_stream(
1032 : const struct dc *dc,
1033 : struct dc_stream_state *stream,
1034 : struct dc_state *context)
1035 : {
1036 : int i;
1037 :
1038 0 : for (i = 0; i < stream->num_wb_info; i++)
1039 0 : stream->writeback_info[i].wb_enabled = false;
1040 : }
1041 :
1042 0 : static void apply_ctx_interdependent_lock(struct dc *dc, struct dc_state *context,
1043 : struct dc_stream_state *stream, bool lock)
1044 : {
1045 : int i;
1046 :
1047 : /* Checks if interdependent update function pointer is NULL or not, takes care of DCE110 case */
1048 0 : if (dc->hwss.interdependent_update_lock)
1049 0 : dc->hwss.interdependent_update_lock(dc, context, lock);
1050 : else {
1051 0 : for (i = 0; i < dc->res_pool->pipe_count; i++) {
1052 0 : struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1053 0 : struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
1054 :
1055 : // Copied conditions that were previously in dce110_apply_ctx_for_surface
1056 0 : if (stream == pipe_ctx->stream) {
1057 0 : if (!pipe_ctx->top_pipe &&
1058 0 : (pipe_ctx->plane_state || old_pipe_ctx->plane_state))
1059 0 : dc->hwss.pipe_control_lock(dc, pipe_ctx, lock);
1060 : }
1061 : }
1062 : }
1063 0 : }
1064 :
1065 0 : static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
1066 : {
1067 : int i, j;
1068 0 : struct dc_state *dangling_context = dc_create_state(dc);
1069 : struct dc_state *current_ctx;
1070 :
1071 0 : if (dangling_context == NULL)
1072 : return;
1073 :
1074 0 : dc_resource_state_copy_construct(dc->current_state, dangling_context);
1075 :
1076 0 : for (i = 0; i < dc->res_pool->pipe_count; i++) {
1077 0 : struct dc_stream_state *old_stream =
1078 0 : dc->current_state->res_ctx.pipe_ctx[i].stream;
1079 0 : bool should_disable = true;
1080 0 : bool pipe_split_change = false;
1081 :
1082 0 : if ((context->res_ctx.pipe_ctx[i].top_pipe) &&
1083 0 : (dc->current_state->res_ctx.pipe_ctx[i].top_pipe))
1084 0 : pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe->pipe_idx !=
1085 0 : dc->current_state->res_ctx.pipe_ctx[i].top_pipe->pipe_idx;
1086 : else
1087 0 : pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe !=
1088 0 : dc->current_state->res_ctx.pipe_ctx[i].top_pipe;
1089 :
1090 0 : for (j = 0; j < context->stream_count; j++) {
1091 0 : if (old_stream == context->streams[j]) {
1092 : should_disable = false;
1093 : break;
1094 : }
1095 : }
1096 0 : if (!should_disable && pipe_split_change &&
1097 0 : dc->current_state->stream_count != context->stream_count)
1098 0 : should_disable = true;
1099 :
1100 0 : if (old_stream && !dc->current_state->res_ctx.pipe_ctx[i].top_pipe &&
1101 0 : !dc->current_state->res_ctx.pipe_ctx[i].prev_odm_pipe) {
1102 : struct pipe_ctx *old_pipe, *new_pipe;
1103 :
1104 0 : old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1105 0 : new_pipe = &context->res_ctx.pipe_ctx[i];
1106 :
1107 0 : if (old_pipe->plane_state && !new_pipe->plane_state)
1108 0 : should_disable = true;
1109 : }
1110 :
1111 0 : if (should_disable && old_stream) {
1112 0 : dc_rem_all_planes_for_stream(dc, old_stream, dangling_context);
1113 0 : disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
1114 :
1115 0 : if (dc->hwss.apply_ctx_for_surface) {
1116 0 : apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, true);
1117 0 : dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context);
1118 0 : apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, false);
1119 0 : dc->hwss.post_unlock_program_front_end(dc, dangling_context);
1120 : }
1121 0 : if (dc->hwss.program_front_end_for_ctx) {
1122 0 : dc->hwss.interdependent_update_lock(dc, dc->current_state, true);
1123 0 : dc->hwss.program_front_end_for_ctx(dc, dangling_context);
1124 0 : dc->hwss.interdependent_update_lock(dc, dc->current_state, false);
1125 0 : dc->hwss.post_unlock_program_front_end(dc, dangling_context);
1126 : }
1127 : }
1128 : }
1129 :
1130 0 : current_ctx = dc->current_state;
1131 0 : dc->current_state = dangling_context;
1132 : dc_release_state(current_ctx);
1133 : }
1134 :
1135 0 : static void disable_vbios_mode_if_required(
1136 : struct dc *dc,
1137 : struct dc_state *context)
1138 : {
1139 : unsigned int i, j;
1140 :
1141 : /* check if timing_changed, disable stream*/
1142 0 : for (i = 0; i < dc->res_pool->pipe_count; i++) {
1143 0 : struct dc_stream_state *stream = NULL;
1144 0 : struct dc_link *link = NULL;
1145 0 : struct pipe_ctx *pipe = NULL;
1146 :
1147 0 : pipe = &context->res_ctx.pipe_ctx[i];
1148 0 : stream = pipe->stream;
1149 0 : if (stream == NULL)
1150 0 : continue;
1151 :
1152 : // only looking for first odm pipe
1153 0 : if (pipe->prev_odm_pipe)
1154 0 : continue;
1155 :
1156 0 : if (stream->link->local_sink &&
1157 0 : stream->link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
1158 0 : link = stream->link;
1159 : }
1160 :
1161 0 : if (link != NULL && link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
1162 0 : unsigned int enc_inst, tg_inst = 0;
1163 : unsigned int pix_clk_100hz;
1164 :
1165 0 : enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
1166 0 : if (enc_inst != ENGINE_ID_UNKNOWN) {
1167 0 : for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
1168 0 : if (dc->res_pool->stream_enc[j]->id == enc_inst) {
1169 0 : tg_inst = dc->res_pool->stream_enc[j]->funcs->dig_source_otg(
1170 : dc->res_pool->stream_enc[j]);
1171 0 : break;
1172 : }
1173 : }
1174 :
1175 0 : dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
1176 : dc->res_pool->dp_clock_source,
1177 : tg_inst, &pix_clk_100hz);
1178 :
1179 0 : if (link->link_status.link_active) {
1180 0 : uint32_t requested_pix_clk_100hz =
1181 : pipe->stream_res.pix_clk_params.requested_pix_clk_100hz;
1182 :
1183 0 : if (pix_clk_100hz != requested_pix_clk_100hz) {
1184 0 : if (dc->hwss.update_phy_state)
1185 0 : dc->hwss.update_phy_state(dc->current_state,
1186 : pipe, TX_OFF_SYMCLK_OFF);
1187 : else
1188 0 : core_link_disable_stream(pipe);
1189 0 : pipe->stream->dpms_off = false;
1190 : }
1191 : }
1192 : }
1193 : }
1194 : }
1195 0 : }
1196 :
1197 0 : static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
1198 : {
1199 : int i;
1200 0 : PERF_TRACE();
1201 0 : for (i = 0; i < MAX_PIPES; i++) {
1202 0 : int count = 0;
1203 0 : struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
1204 :
1205 0 : if (!pipe->plane_state)
1206 0 : continue;
1207 :
1208 : /* Timeout 100 ms */
1209 0 : while (count < 100000) {
1210 : /* Must set to false to start with, due to OR in update function */
1211 0 : pipe->plane_state->status.is_flip_pending = false;
1212 0 : dc->hwss.update_pending_status(pipe);
1213 0 : if (!pipe->plane_state->status.is_flip_pending)
1214 : break;
1215 0 : udelay(1);
1216 0 : count++;
1217 : }
1218 0 : ASSERT(!pipe->plane_state->status.is_flip_pending);
1219 : }
1220 0 : PERF_TRACE();
1221 0 : }
1222 :
1223 : /*******************************************************************************
1224 : * Public functions
1225 : ******************************************************************************/
1226 :
1227 0 : struct dc *dc_create(const struct dc_init_data *init_params)
1228 : {
1229 0 : struct dc *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
1230 : unsigned int full_pipe_count;
1231 :
1232 0 : if (!dc)
1233 : return NULL;
1234 :
1235 0 : if (init_params->dce_environment == DCE_ENV_VIRTUAL_HW) {
1236 0 : if (!dc_construct_ctx(dc, init_params))
1237 : goto destruct_dc;
1238 : } else {
1239 0 : if (!dc_construct(dc, init_params))
1240 : goto destruct_dc;
1241 :
1242 0 : full_pipe_count = dc->res_pool->pipe_count;
1243 0 : if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
1244 0 : full_pipe_count--;
1245 0 : dc->caps.max_streams = min(
1246 : full_pipe_count,
1247 : dc->res_pool->stream_enc_count);
1248 :
1249 0 : dc->caps.max_links = dc->link_count;
1250 0 : dc->caps.max_audios = dc->res_pool->audio_count;
1251 0 : dc->caps.linear_pitch_alignment = 64;
1252 :
1253 0 : dc->caps.max_dp_protocol_version = DP_VERSION_1_4;
1254 :
1255 0 : dc->caps.max_otg_num = dc->res_pool->res_cap->num_timing_generator;
1256 :
1257 0 : if (dc->res_pool->dmcu != NULL)
1258 0 : dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version;
1259 : }
1260 :
1261 0 : dc->dcn_reg_offsets = init_params->dcn_reg_offsets;
1262 0 : dc->nbio_reg_offsets = init_params->nbio_reg_offsets;
1263 :
1264 : /* Populate versioning information */
1265 0 : dc->versions.dc_ver = DC_VER;
1266 :
1267 0 : dc->build_id = DC_BUILD_ID;
1268 :
1269 0 : DC_LOG_DC("Display Core initialized\n");
1270 :
1271 :
1272 :
1273 0 : return dc;
1274 :
1275 : destruct_dc:
1276 0 : dc_destruct(dc);
1277 0 : kfree(dc);
1278 0 : return NULL;
1279 : }
1280 :
1281 0 : static void detect_edp_presence(struct dc *dc)
1282 : {
1283 : struct dc_link *edp_links[MAX_NUM_EDP];
1284 0 : struct dc_link *edp_link = NULL;
1285 : enum dc_connection_type type;
1286 : int i;
1287 : int edp_num;
1288 :
1289 0 : get_edp_links(dc, edp_links, &edp_num);
1290 0 : if (!edp_num)
1291 0 : return;
1292 :
1293 0 : for (i = 0; i < edp_num; i++) {
1294 0 : edp_link = edp_links[i];
1295 0 : if (dc->config.edp_not_connected) {
1296 0 : edp_link->edp_sink_present = false;
1297 : } else {
1298 0 : dc_link_detect_sink(edp_link, &type);
1299 0 : edp_link->edp_sink_present = (type != dc_connection_none);
1300 : }
1301 : }
1302 : }
1303 :
1304 0 : void dc_hardware_init(struct dc *dc)
1305 : {
1306 :
1307 0 : detect_edp_presence(dc);
1308 0 : if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW)
1309 0 : dc->hwss.init_hw(dc);
1310 0 : }
1311 :
1312 0 : void dc_init_callbacks(struct dc *dc,
1313 : const struct dc_callback_init *init_params)
1314 : {
1315 : #ifdef CONFIG_DRM_AMD_DC_HDCP
1316 : dc->ctx->cp_psp = init_params->cp_psp;
1317 : #endif
1318 0 : }
1319 :
1320 0 : void dc_deinit_callbacks(struct dc *dc)
1321 : {
1322 : #ifdef CONFIG_DRM_AMD_DC_HDCP
1323 : memset(&dc->ctx->cp_psp, 0, sizeof(dc->ctx->cp_psp));
1324 : #endif
1325 0 : }
1326 :
1327 0 : void dc_destroy(struct dc **dc)
1328 : {
1329 0 : dc_destruct(*dc);
1330 0 : kfree(*dc);
1331 0 : *dc = NULL;
1332 0 : }
1333 :
1334 0 : static void enable_timing_multisync(
1335 : struct dc *dc,
1336 : struct dc_state *ctx)
1337 : {
1338 0 : int i, multisync_count = 0;
1339 0 : int pipe_count = dc->res_pool->pipe_count;
1340 0 : struct pipe_ctx *multisync_pipes[MAX_PIPES] = { NULL };
1341 :
1342 0 : for (i = 0; i < pipe_count; i++) {
1343 0 : if (!ctx->res_ctx.pipe_ctx[i].stream ||
1344 0 : !ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.enabled)
1345 0 : continue;
1346 0 : if (ctx->res_ctx.pipe_ctx[i].stream == ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.event_source)
1347 0 : continue;
1348 0 : multisync_pipes[multisync_count] = &ctx->res_ctx.pipe_ctx[i];
1349 0 : multisync_count++;
1350 : }
1351 :
1352 0 : if (multisync_count > 0) {
1353 0 : dc->hwss.enable_per_frame_crtc_position_reset(
1354 : dc, multisync_count, multisync_pipes);
1355 : }
1356 0 : }
1357 :
1358 0 : static void program_timing_sync(
1359 : struct dc *dc,
1360 : struct dc_state *ctx)
1361 : {
1362 : int i, j, k;
1363 0 : int group_index = 0;
1364 0 : int num_group = 0;
1365 0 : int pipe_count = dc->res_pool->pipe_count;
1366 0 : struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL };
1367 :
1368 0 : for (i = 0; i < pipe_count; i++) {
1369 0 : if (!ctx->res_ctx.pipe_ctx[i].stream
1370 0 : || ctx->res_ctx.pipe_ctx[i].top_pipe
1371 0 : || ctx->res_ctx.pipe_ctx[i].prev_odm_pipe)
1372 0 : continue;
1373 :
1374 0 : unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i];
1375 : }
1376 :
1377 0 : for (i = 0; i < pipe_count; i++) {
1378 0 : int group_size = 1;
1379 0 : enum timing_synchronization_type sync_type = NOT_SYNCHRONIZABLE;
1380 : struct pipe_ctx *pipe_set[MAX_PIPES];
1381 :
1382 0 : if (!unsynced_pipes[i])
1383 0 : continue;
1384 :
1385 0 : pipe_set[0] = unsynced_pipes[i];
1386 0 : unsynced_pipes[i] = NULL;
1387 :
1388 : /* Add tg to the set, search rest of the tg's for ones with
1389 : * same timing, add all tgs with same timing to the group
1390 : */
1391 0 : for (j = i + 1; j < pipe_count; j++) {
1392 0 : if (!unsynced_pipes[j])
1393 0 : continue;
1394 0 : if (sync_type != TIMING_SYNCHRONIZABLE &&
1395 0 : dc->hwss.enable_vblanks_synchronization &&
1396 0 : unsynced_pipes[j]->stream_res.tg->funcs->align_vblanks &&
1397 0 : resource_are_vblanks_synchronizable(
1398 : unsynced_pipes[j]->stream,
1399 0 : pipe_set[0]->stream)) {
1400 0 : sync_type = VBLANK_SYNCHRONIZABLE;
1401 0 : pipe_set[group_size] = unsynced_pipes[j];
1402 0 : unsynced_pipes[j] = NULL;
1403 0 : group_size++;
1404 : } else
1405 0 : if (sync_type != VBLANK_SYNCHRONIZABLE &&
1406 0 : resource_are_streams_timing_synchronizable(
1407 : unsynced_pipes[j]->stream,
1408 0 : pipe_set[0]->stream)) {
1409 0 : sync_type = TIMING_SYNCHRONIZABLE;
1410 0 : pipe_set[group_size] = unsynced_pipes[j];
1411 0 : unsynced_pipes[j] = NULL;
1412 0 : group_size++;
1413 : }
1414 : }
1415 :
1416 : /* set first unblanked pipe as master */
1417 0 : for (j = 0; j < group_size; j++) {
1418 : bool is_blanked;
1419 :
1420 0 : if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
1421 0 : is_blanked =
1422 : pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
1423 : else
1424 0 : is_blanked =
1425 0 : pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
1426 0 : if (!is_blanked) {
1427 0 : if (j == 0)
1428 : break;
1429 :
1430 0 : swap(pipe_set[0], pipe_set[j]);
1431 0 : break;
1432 : }
1433 : }
1434 :
1435 0 : for (k = 0; k < group_size; k++) {
1436 0 : struct dc_stream_status *status = dc_stream_get_status_from_state(ctx, pipe_set[k]->stream);
1437 :
1438 0 : status->timing_sync_info.group_id = num_group;
1439 0 : status->timing_sync_info.group_size = group_size;
1440 0 : if (k == 0)
1441 0 : status->timing_sync_info.master = true;
1442 : else
1443 0 : status->timing_sync_info.master = false;
1444 :
1445 : }
1446 :
1447 : /* remove any other pipes that are already been synced */
1448 0 : if (dc->config.use_pipe_ctx_sync_logic) {
1449 : /* check pipe's syncd to decide which pipe to be removed */
1450 0 : for (j = 1; j < group_size; j++) {
1451 0 : if (pipe_set[j]->pipe_idx_syncd == pipe_set[0]->pipe_idx_syncd) {
1452 0 : group_size--;
1453 0 : pipe_set[j] = pipe_set[group_size];
1454 0 : j--;
1455 : } else
1456 : /* link slave pipe's syncd with master pipe */
1457 0 : pipe_set[j]->pipe_idx_syncd = pipe_set[0]->pipe_idx_syncd;
1458 : }
1459 : } else {
1460 0 : for (j = j + 1; j < group_size; j++) {
1461 : bool is_blanked;
1462 :
1463 0 : if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
1464 0 : is_blanked =
1465 : pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
1466 : else
1467 0 : is_blanked =
1468 0 : pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
1469 0 : if (!is_blanked) {
1470 0 : group_size--;
1471 0 : pipe_set[j] = pipe_set[group_size];
1472 0 : j--;
1473 : }
1474 : }
1475 : }
1476 :
1477 0 : if (group_size > 1) {
1478 0 : if (sync_type == TIMING_SYNCHRONIZABLE) {
1479 0 : dc->hwss.enable_timing_synchronization(
1480 : dc, group_index, group_size, pipe_set);
1481 : } else
1482 0 : if (sync_type == VBLANK_SYNCHRONIZABLE) {
1483 0 : dc->hwss.enable_vblanks_synchronization(
1484 : dc, group_index, group_size, pipe_set);
1485 : }
1486 0 : group_index++;
1487 : }
1488 0 : num_group++;
1489 : }
1490 0 : }
1491 :
1492 : static bool context_changed(
1493 : struct dc *dc,
1494 : struct dc_state *context)
1495 : {
1496 : uint8_t i;
1497 :
1498 0 : if (context->stream_count != dc->current_state->stream_count)
1499 : return true;
1500 :
1501 0 : for (i = 0; i < dc->current_state->stream_count; i++) {
1502 0 : if (dc->current_state->streams[i] != context->streams[i])
1503 : return true;
1504 : }
1505 :
1506 : return false;
1507 : }
1508 :
1509 0 : bool dc_validate_boot_timing(const struct dc *dc,
1510 : const struct dc_sink *sink,
1511 : struct dc_crtc_timing *crtc_timing)
1512 : {
1513 : struct timing_generator *tg;
1514 0 : struct stream_encoder *se = NULL;
1515 :
1516 0 : struct dc_crtc_timing hw_crtc_timing = {0};
1517 :
1518 0 : struct dc_link *link = sink->link;
1519 0 : unsigned int i, enc_inst, tg_inst = 0;
1520 :
1521 : /* Support seamless boot on EDP displays only */
1522 0 : if (sink->sink_signal != SIGNAL_TYPE_EDP) {
1523 : return false;
1524 : }
1525 :
1526 : /* Check for enabled DIG to identify enabled display */
1527 0 : if (!link->link_enc->funcs->is_dig_enabled(link->link_enc))
1528 : return false;
1529 :
1530 0 : enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
1531 :
1532 0 : if (enc_inst == ENGINE_ID_UNKNOWN)
1533 : return false;
1534 :
1535 0 : for (i = 0; i < dc->res_pool->stream_enc_count; i++) {
1536 0 : if (dc->res_pool->stream_enc[i]->id == enc_inst) {
1537 :
1538 0 : se = dc->res_pool->stream_enc[i];
1539 :
1540 0 : tg_inst = dc->res_pool->stream_enc[i]->funcs->dig_source_otg(
1541 : dc->res_pool->stream_enc[i]);
1542 0 : break;
1543 : }
1544 : }
1545 :
1546 : // tg_inst not found
1547 0 : if (i == dc->res_pool->stream_enc_count)
1548 : return false;
1549 :
1550 0 : if (tg_inst >= dc->res_pool->timing_generator_count)
1551 : return false;
1552 :
1553 0 : tg = dc->res_pool->timing_generators[tg_inst];
1554 :
1555 0 : if (!tg->funcs->get_hw_timing)
1556 : return false;
1557 :
1558 0 : if (!tg->funcs->get_hw_timing(tg, &hw_crtc_timing))
1559 : return false;
1560 :
1561 0 : if (crtc_timing->h_total != hw_crtc_timing.h_total)
1562 : return false;
1563 :
1564 0 : if (crtc_timing->h_border_left != hw_crtc_timing.h_border_left)
1565 : return false;
1566 :
1567 0 : if (crtc_timing->h_addressable != hw_crtc_timing.h_addressable)
1568 : return false;
1569 :
1570 0 : if (crtc_timing->h_border_right != hw_crtc_timing.h_border_right)
1571 : return false;
1572 :
1573 0 : if (crtc_timing->h_front_porch != hw_crtc_timing.h_front_porch)
1574 : return false;
1575 :
1576 0 : if (crtc_timing->h_sync_width != hw_crtc_timing.h_sync_width)
1577 : return false;
1578 :
1579 0 : if (crtc_timing->v_total != hw_crtc_timing.v_total)
1580 : return false;
1581 :
1582 0 : if (crtc_timing->v_border_top != hw_crtc_timing.v_border_top)
1583 : return false;
1584 :
1585 0 : if (crtc_timing->v_addressable != hw_crtc_timing.v_addressable)
1586 : return false;
1587 :
1588 0 : if (crtc_timing->v_border_bottom != hw_crtc_timing.v_border_bottom)
1589 : return false;
1590 :
1591 0 : if (crtc_timing->v_front_porch != hw_crtc_timing.v_front_porch)
1592 : return false;
1593 :
1594 0 : if (crtc_timing->v_sync_width != hw_crtc_timing.v_sync_width)
1595 : return false;
1596 :
1597 : /* block DSC for now, as VBIOS does not currently support DSC timings */
1598 0 : if (crtc_timing->flags.DSC)
1599 : return false;
1600 :
1601 0 : if (dc_is_dp_signal(link->connector_signal)) {
1602 : unsigned int pix_clk_100hz;
1603 0 : uint32_t numOdmPipes = 1;
1604 0 : uint32_t id_src[4] = {0};
1605 :
1606 0 : dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
1607 : dc->res_pool->dp_clock_source,
1608 : tg_inst, &pix_clk_100hz);
1609 :
1610 0 : if (tg->funcs->get_optc_source)
1611 0 : tg->funcs->get_optc_source(tg,
1612 : &numOdmPipes, &id_src[0], &id_src[1]);
1613 :
1614 0 : if (numOdmPipes == 2)
1615 0 : pix_clk_100hz *= 2;
1616 0 : if (numOdmPipes == 4)
1617 0 : pix_clk_100hz *= 4;
1618 :
1619 : // Note: In rare cases, HW pixclk may differ from crtc's pixclk
1620 : // slightly due to rounding issues in 10 kHz units.
1621 0 : if (crtc_timing->pix_clk_100hz != pix_clk_100hz)
1622 0 : return false;
1623 :
1624 0 : if (!se->funcs->dp_get_pixel_format)
1625 : return false;
1626 :
1627 0 : if (!se->funcs->dp_get_pixel_format(
1628 : se,
1629 : &hw_crtc_timing.pixel_encoding,
1630 : &hw_crtc_timing.display_color_depth))
1631 : return false;
1632 :
1633 0 : if (hw_crtc_timing.display_color_depth != crtc_timing->display_color_depth)
1634 : return false;
1635 :
1636 0 : if (hw_crtc_timing.pixel_encoding != crtc_timing->pixel_encoding)
1637 : return false;
1638 : }
1639 :
1640 0 : if (link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) {
1641 : return false;
1642 : }
1643 :
1644 0 : if (is_edp_ilr_optimization_required(link, crtc_timing)) {
1645 0 : DC_LOG_EVENT_LINK_TRAINING("Seamless boot disabled to optimize eDP link rate\n");
1646 0 : return false;
1647 : }
1648 :
1649 : return true;
1650 : }
1651 :
1652 : static inline bool should_update_pipe_for_stream(
1653 : struct dc_state *context,
1654 : struct pipe_ctx *pipe_ctx,
1655 : struct dc_stream_state *stream)
1656 : {
1657 0 : return (pipe_ctx->stream && pipe_ctx->stream == stream);
1658 : }
1659 :
1660 : static inline bool should_update_pipe_for_plane(
1661 : struct dc_state *context,
1662 : struct pipe_ctx *pipe_ctx,
1663 : struct dc_plane_state *plane_state)
1664 : {
1665 : return (pipe_ctx->plane_state == plane_state);
1666 : }
1667 :
1668 0 : void dc_enable_stereo(
1669 : struct dc *dc,
1670 : struct dc_state *context,
1671 : struct dc_stream_state *streams[],
1672 : uint8_t stream_count)
1673 : {
1674 : int i, j;
1675 : struct pipe_ctx *pipe;
1676 :
1677 0 : for (i = 0; i < MAX_PIPES; i++) {
1678 0 : if (context != NULL) {
1679 0 : pipe = &context->res_ctx.pipe_ctx[i];
1680 : } else {
1681 0 : context = dc->current_state;
1682 0 : pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1683 : }
1684 :
1685 0 : for (j = 0; pipe && j < stream_count; j++) {
1686 0 : if (should_update_pipe_for_stream(context, pipe, streams[j]) &&
1687 0 : dc->hwss.setup_stereo)
1688 0 : dc->hwss.setup_stereo(pipe, dc);
1689 : }
1690 : }
1691 0 : }
1692 :
1693 0 : void dc_trigger_sync(struct dc *dc, struct dc_state *context)
1694 : {
1695 0 : if (context->stream_count > 1 && !dc->debug.disable_timing_sync) {
1696 0 : enable_timing_multisync(dc, context);
1697 0 : program_timing_sync(dc, context);
1698 : }
1699 0 : }
1700 :
1701 : static uint8_t get_stream_mask(struct dc *dc, struct dc_state *context)
1702 : {
1703 : int i;
1704 : unsigned int stream_mask = 0;
1705 :
1706 0 : for (i = 0; i < dc->res_pool->pipe_count; i++) {
1707 0 : if (context->res_ctx.pipe_ctx[i].stream)
1708 0 : stream_mask |= 1 << i;
1709 : }
1710 :
1711 0 : return stream_mask;
1712 : }
1713 :
1714 0 : void dc_z10_restore(const struct dc *dc)
1715 : {
1716 0 : if (dc->hwss.z10_restore)
1717 0 : dc->hwss.z10_restore(dc);
1718 0 : }
1719 :
1720 0 : void dc_z10_save_init(struct dc *dc)
1721 : {
1722 0 : if (dc->hwss.z10_save_init)
1723 0 : dc->hwss.z10_save_init(dc);
1724 0 : }
1725 :
1726 : /*
1727 : * Applies given context to HW and copy it into current context.
1728 : * It's up to the user to release the src context afterwards.
1729 : */
1730 0 : static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context)
1731 : {
1732 0 : struct dc_bios *dcb = dc->ctx->dc_bios;
1733 0 : enum dc_status result = DC_ERROR_UNEXPECTED;
1734 : struct pipe_ctx *pipe;
1735 : int i, k, l;
1736 0 : struct dc_stream_state *dc_streams[MAX_STREAMS] = {0};
1737 : struct dc_state *old_state;
1738 :
1739 0 : dc_z10_restore(dc);
1740 0 : dc_allow_idle_optimizations(dc, false);
1741 :
1742 0 : for (i = 0; i < context->stream_count; i++)
1743 0 : dc_streams[i] = context->streams[i];
1744 :
1745 0 : if (!dcb->funcs->is_accelerated_mode(dcb)) {
1746 0 : disable_vbios_mode_if_required(dc, context);
1747 0 : dc->hwss.enable_accelerated_mode(dc, context);
1748 : }
1749 :
1750 0 : if (context->stream_count > get_seamless_boot_stream_count(context) ||
1751 : context->stream_count == 0)
1752 0 : dc->hwss.prepare_bandwidth(dc, context);
1753 :
1754 0 : disable_dangling_plane(dc, context);
1755 : /* re-program planes for existing stream, in case we need to
1756 : * free up plane resource for later use
1757 : */
1758 0 : if (dc->hwss.apply_ctx_for_surface) {
1759 0 : for (i = 0; i < context->stream_count; i++) {
1760 0 : if (context->streams[i]->mode_changed)
1761 0 : continue;
1762 0 : apply_ctx_interdependent_lock(dc, context, context->streams[i], true);
1763 0 : dc->hwss.apply_ctx_for_surface(
1764 0 : dc, context->streams[i],
1765 : context->stream_status[i].plane_count,
1766 : context); /* use new pipe config in new context */
1767 0 : apply_ctx_interdependent_lock(dc, context, context->streams[i], false);
1768 0 : dc->hwss.post_unlock_program_front_end(dc, context);
1769 : }
1770 : }
1771 :
1772 : /* Program hardware */
1773 0 : for (i = 0; i < dc->res_pool->pipe_count; i++) {
1774 0 : pipe = &context->res_ctx.pipe_ctx[i];
1775 0 : dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
1776 : }
1777 :
1778 0 : result = dc->hwss.apply_ctx_to_hw(dc, context);
1779 :
1780 0 : if (result != DC_OK) {
1781 : /* Application of dc_state to hardware stopped. */
1782 0 : dc->current_state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_STEADY;
1783 0 : return result;
1784 : }
1785 :
1786 0 : dc_trigger_sync(dc, context);
1787 :
1788 : /* Program all planes within new context*/
1789 0 : if (dc->hwss.program_front_end_for_ctx) {
1790 0 : dc->hwss.interdependent_update_lock(dc, context, true);
1791 0 : dc->hwss.program_front_end_for_ctx(dc, context);
1792 0 : dc->hwss.interdependent_update_lock(dc, context, false);
1793 0 : dc->hwss.post_unlock_program_front_end(dc, context);
1794 : }
1795 0 : for (i = 0; i < context->stream_count; i++) {
1796 0 : const struct dc_link *link = context->streams[i]->link;
1797 :
1798 0 : if (!context->streams[i]->mode_changed)
1799 0 : continue;
1800 :
1801 0 : if (dc->hwss.apply_ctx_for_surface) {
1802 0 : apply_ctx_interdependent_lock(dc, context, context->streams[i], true);
1803 0 : dc->hwss.apply_ctx_for_surface(
1804 0 : dc, context->streams[i],
1805 : context->stream_status[i].plane_count,
1806 : context);
1807 0 : apply_ctx_interdependent_lock(dc, context, context->streams[i], false);
1808 0 : dc->hwss.post_unlock_program_front_end(dc, context);
1809 : }
1810 :
1811 : /*
1812 : * enable stereo
1813 : * TODO rework dc_enable_stereo call to work with validation sets?
1814 : */
1815 0 : for (k = 0; k < MAX_PIPES; k++) {
1816 0 : pipe = &context->res_ctx.pipe_ctx[k];
1817 :
1818 0 : for (l = 0 ; pipe && l < context->stream_count; l++) {
1819 0 : if (context->streams[l] &&
1820 0 : context->streams[l] == pipe->stream &&
1821 0 : dc->hwss.setup_stereo)
1822 0 : dc->hwss.setup_stereo(pipe, dc);
1823 : }
1824 : }
1825 :
1826 0 : CONN_MSG_MODE(link, "{%dx%d, %dx%d@%dKhz}",
1827 : context->streams[i]->timing.h_addressable,
1828 : context->streams[i]->timing.v_addressable,
1829 : context->streams[i]->timing.h_total,
1830 : context->streams[i]->timing.v_total,
1831 : context->streams[i]->timing.pix_clk_100hz / 10);
1832 : }
1833 :
1834 0 : dc_enable_stereo(dc, context, dc_streams, context->stream_count);
1835 :
1836 0 : if (context->stream_count > get_seamless_boot_stream_count(context) ||
1837 : context->stream_count == 0) {
1838 : /* Must wait for no flips to be pending before doing optimize bw */
1839 0 : wait_for_no_pipes_pending(dc, context);
1840 : /* pplib is notified if disp_num changed */
1841 0 : dc->hwss.optimize_bandwidth(dc, context);
1842 : }
1843 :
1844 : if (dc->ctx->dce_version >= DCE_VERSION_MAX)
1845 : TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
1846 : else
1847 0 : TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
1848 :
1849 0 : context->stream_mask = get_stream_mask(dc, context);
1850 :
1851 0 : if (context->stream_mask != dc->current_state->stream_mask)
1852 0 : dc_dmub_srv_notify_stream_mask(dc->ctx->dmub_srv, context->stream_mask);
1853 :
1854 0 : for (i = 0; i < context->stream_count; i++)
1855 0 : context->streams[i]->mode_changed = false;
1856 :
1857 0 : old_state = dc->current_state;
1858 0 : dc->current_state = context;
1859 :
1860 0 : dc_release_state(old_state);
1861 :
1862 0 : dc_retain_state(dc->current_state);
1863 :
1864 0 : return result;
1865 : }
1866 :
1867 0 : bool dc_commit_state(struct dc *dc, struct dc_state *context)
1868 : {
1869 0 : enum dc_status result = DC_ERROR_UNEXPECTED;
1870 : int i;
1871 :
1872 0 : if (!context_changed(dc, context))
1873 : return DC_OK;
1874 :
1875 0 : DC_LOG_DC("%s: %d streams\n",
1876 : __func__, context->stream_count);
1877 :
1878 0 : for (i = 0; i < context->stream_count; i++) {
1879 0 : struct dc_stream_state *stream = context->streams[i];
1880 :
1881 0 : dc_stream_log(dc, stream);
1882 : }
1883 :
1884 : /*
1885 : * Previous validation was perfomred with fast_validation = true and
1886 : * the full DML state required for hardware programming was skipped.
1887 : *
1888 : * Re-validate here to calculate these parameters / watermarks.
1889 : */
1890 0 : result = dc_validate_global_state(dc, context, false);
1891 0 : if (result != DC_OK) {
1892 0 : DC_LOG_ERROR("DC commit global validation failure: %s (%d)",
1893 : dc_status_to_str(result), result);
1894 0 : return result;
1895 : }
1896 :
1897 0 : result = dc_commit_state_no_check(dc, context);
1898 :
1899 0 : return (result == DC_OK);
1900 : }
1901 :
1902 0 : bool dc_acquire_release_mpc_3dlut(
1903 : struct dc *dc, bool acquire,
1904 : struct dc_stream_state *stream,
1905 : struct dc_3dlut **lut,
1906 : struct dc_transfer_func **shaper)
1907 : {
1908 : int pipe_idx;
1909 0 : bool ret = false;
1910 0 : bool found_pipe_idx = false;
1911 0 : const struct resource_pool *pool = dc->res_pool;
1912 0 : struct resource_context *res_ctx = &dc->current_state->res_ctx;
1913 0 : int mpcc_id = 0;
1914 :
1915 0 : if (pool && res_ctx) {
1916 0 : if (acquire) {
1917 : /*find pipe idx for the given stream*/
1918 0 : for (pipe_idx = 0; pipe_idx < pool->pipe_count; pipe_idx++) {
1919 0 : if (res_ctx->pipe_ctx[pipe_idx].stream == stream) {
1920 0 : found_pipe_idx = true;
1921 0 : mpcc_id = res_ctx->pipe_ctx[pipe_idx].plane_res.hubp->inst;
1922 0 : break;
1923 : }
1924 : }
1925 : } else
1926 : found_pipe_idx = true;/*for release pipe_idx is not required*/
1927 :
1928 0 : if (found_pipe_idx) {
1929 0 : if (acquire && pool->funcs->acquire_post_bldn_3dlut)
1930 0 : ret = pool->funcs->acquire_post_bldn_3dlut(res_ctx, pool, mpcc_id, lut, shaper);
1931 0 : else if (!acquire && pool->funcs->release_post_bldn_3dlut)
1932 0 : ret = pool->funcs->release_post_bldn_3dlut(res_ctx, pool, lut, shaper);
1933 : }
1934 : }
1935 0 : return ret;
1936 : }
1937 :
1938 0 : static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context)
1939 : {
1940 : int i;
1941 : struct pipe_ctx *pipe;
1942 :
1943 0 : for (i = 0; i < MAX_PIPES; i++) {
1944 0 : pipe = &context->res_ctx.pipe_ctx[i];
1945 :
1946 : // Don't check flip pending on phantom pipes
1947 0 : if (!pipe->plane_state || (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM))
1948 0 : continue;
1949 :
1950 : /* Must set to false to start with, due to OR in update function */
1951 0 : pipe->plane_state->status.is_flip_pending = false;
1952 0 : dc->hwss.update_pending_status(pipe);
1953 0 : if (pipe->plane_state->status.is_flip_pending)
1954 : return true;
1955 : }
1956 : return false;
1957 : }
1958 :
1959 : /* Perform updates here which need to be deferred until next vupdate
1960 : *
1961 : * i.e. blnd lut, 3dlut, and shaper lut bypass regs are double buffered
1962 : * but forcing lut memory to shutdown state is immediate. This causes
1963 : * single frame corruption as lut gets disabled mid-frame unless shutdown
1964 : * is deferred until after entering bypass.
1965 : */
1966 0 : static void process_deferred_updates(struct dc *dc)
1967 : {
1968 0 : int i = 0;
1969 :
1970 0 : if (dc->debug.enable_mem_low_power.bits.cm) {
1971 0 : ASSERT(dc->dcn_ip->max_num_dpp);
1972 0 : for (i = 0; i < dc->dcn_ip->max_num_dpp; i++)
1973 0 : if (dc->res_pool->dpps[i]->funcs->dpp_deferred_update)
1974 0 : dc->res_pool->dpps[i]->funcs->dpp_deferred_update(dc->res_pool->dpps[i]);
1975 : }
1976 0 : }
1977 :
1978 0 : void dc_post_update_surfaces_to_stream(struct dc *dc)
1979 : {
1980 : int i;
1981 0 : struct dc_state *context = dc->current_state;
1982 :
1983 0 : if ((!dc->optimized_required) || get_seamless_boot_stream_count(context) > 0)
1984 : return;
1985 :
1986 0 : post_surface_trace(dc);
1987 :
1988 : if (dc->ctx->dce_version >= DCE_VERSION_MAX)
1989 : TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
1990 : else
1991 0 : TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
1992 :
1993 0 : if (is_flip_pending_in_pipes(dc, context))
1994 : return;
1995 :
1996 0 : for (i = 0; i < dc->res_pool->pipe_count; i++)
1997 0 : if (context->res_ctx.pipe_ctx[i].stream == NULL ||
1998 0 : context->res_ctx.pipe_ctx[i].plane_state == NULL) {
1999 0 : context->res_ctx.pipe_ctx[i].pipe_idx = i;
2000 0 : dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]);
2001 : }
2002 :
2003 0 : process_deferred_updates(dc);
2004 :
2005 0 : dc->hwss.optimize_bandwidth(dc, context);
2006 :
2007 0 : dc->optimized_required = false;
2008 0 : dc->wm_optimized_required = false;
2009 : }
2010 :
2011 : static void init_state(struct dc *dc, struct dc_state *context)
2012 : {
2013 : /* Each context must have their own instance of VBA and in order to
2014 : * initialize and obtain IP and SOC the base DML instance from DC is
2015 : * initially copied into every context
2016 : */
2017 0 : memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib));
2018 : }
2019 :
2020 0 : struct dc_state *dc_create_state(struct dc *dc)
2021 : {
2022 0 : struct dc_state *context = kvzalloc(sizeof(struct dc_state),
2023 : GFP_KERNEL);
2024 :
2025 0 : if (!context)
2026 : return NULL;
2027 :
2028 0 : init_state(dc, context);
2029 :
2030 0 : kref_init(&context->refcount);
2031 :
2032 0 : return context;
2033 : }
2034 :
2035 0 : struct dc_state *dc_copy_state(struct dc_state *src_ctx)
2036 : {
2037 : int i, j;
2038 0 : struct dc_state *new_ctx = kvmalloc(sizeof(struct dc_state), GFP_KERNEL);
2039 :
2040 0 : if (!new_ctx)
2041 : return NULL;
2042 0 : memcpy(new_ctx, src_ctx, sizeof(struct dc_state));
2043 :
2044 0 : for (i = 0; i < MAX_PIPES; i++) {
2045 0 : struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i];
2046 :
2047 0 : if (cur_pipe->top_pipe)
2048 0 : cur_pipe->top_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx];
2049 :
2050 0 : if (cur_pipe->bottom_pipe)
2051 0 : cur_pipe->bottom_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx];
2052 :
2053 0 : if (cur_pipe->prev_odm_pipe)
2054 0 : cur_pipe->prev_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->prev_odm_pipe->pipe_idx];
2055 :
2056 0 : if (cur_pipe->next_odm_pipe)
2057 0 : cur_pipe->next_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->next_odm_pipe->pipe_idx];
2058 :
2059 : }
2060 :
2061 0 : for (i = 0; i < new_ctx->stream_count; i++) {
2062 0 : dc_stream_retain(new_ctx->streams[i]);
2063 0 : for (j = 0; j < new_ctx->stream_status[i].plane_count; j++)
2064 0 : dc_plane_state_retain(
2065 : new_ctx->stream_status[i].plane_states[j]);
2066 : }
2067 :
2068 0 : kref_init(&new_ctx->refcount);
2069 :
2070 0 : return new_ctx;
2071 : }
2072 :
2073 0 : void dc_retain_state(struct dc_state *context)
2074 : {
2075 0 : kref_get(&context->refcount);
2076 0 : }
2077 :
2078 0 : static void dc_state_free(struct kref *kref)
2079 : {
2080 0 : struct dc_state *context = container_of(kref, struct dc_state, refcount);
2081 0 : dc_resource_state_destruct(context);
2082 0 : kvfree(context);
2083 0 : }
2084 :
2085 0 : void dc_release_state(struct dc_state *context)
2086 : {
2087 0 : kref_put(&context->refcount, dc_state_free);
2088 0 : }
2089 :
2090 0 : bool dc_set_generic_gpio_for_stereo(bool enable,
2091 : struct gpio_service *gpio_service)
2092 : {
2093 0 : enum gpio_result gpio_result = GPIO_RESULT_NON_SPECIFIC_ERROR;
2094 : struct gpio_pin_info pin_info;
2095 : struct gpio *generic;
2096 0 : struct gpio_generic_mux_config *config = kzalloc(sizeof(struct gpio_generic_mux_config),
2097 : GFP_KERNEL);
2098 :
2099 0 : if (!config)
2100 : return false;
2101 0 : pin_info = dal_gpio_get_generic_pin_info(gpio_service, GPIO_ID_GENERIC, 0);
2102 :
2103 0 : if (pin_info.mask == 0xFFFFFFFF || pin_info.offset == 0xFFFFFFFF) {
2104 0 : kfree(config);
2105 0 : return false;
2106 : } else {
2107 0 : generic = dal_gpio_service_create_generic_mux(
2108 : gpio_service,
2109 : pin_info.offset,
2110 : pin_info.mask);
2111 : }
2112 :
2113 0 : if (!generic) {
2114 0 : kfree(config);
2115 0 : return false;
2116 : }
2117 :
2118 0 : gpio_result = dal_gpio_open(generic, GPIO_MODE_OUTPUT);
2119 :
2120 0 : config->enable_output_from_mux = enable;
2121 0 : config->mux_select = GPIO_SIGNAL_SOURCE_PASS_THROUGH_STEREO_SYNC;
2122 :
2123 0 : if (gpio_result == GPIO_RESULT_OK)
2124 0 : gpio_result = dal_mux_setup_config(generic, config);
2125 :
2126 0 : if (gpio_result == GPIO_RESULT_OK) {
2127 0 : dal_gpio_close(generic);
2128 0 : dal_gpio_destroy_generic_mux(&generic);
2129 0 : kfree(config);
2130 0 : return true;
2131 : } else {
2132 0 : dal_gpio_close(generic);
2133 0 : dal_gpio_destroy_generic_mux(&generic);
2134 0 : kfree(config);
2135 0 : return false;
2136 : }
2137 : }
2138 :
2139 : static bool is_surface_in_context(
2140 : const struct dc_state *context,
2141 : const struct dc_plane_state *plane_state)
2142 : {
2143 : int j;
2144 :
2145 0 : for (j = 0; j < MAX_PIPES; j++) {
2146 0 : const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2147 :
2148 0 : if (plane_state == pipe_ctx->plane_state) {
2149 : return true;
2150 : }
2151 : }
2152 :
2153 : return false;
2154 : }
2155 :
2156 0 : static enum surface_update_type get_plane_info_update_type(const struct dc_surface_update *u)
2157 : {
2158 0 : union surface_update_flags *update_flags = &u->surface->update_flags;
2159 0 : enum surface_update_type update_type = UPDATE_TYPE_FAST;
2160 :
2161 0 : if (!u->plane_info)
2162 : return UPDATE_TYPE_FAST;
2163 :
2164 0 : if (u->plane_info->color_space != u->surface->color_space) {
2165 0 : update_flags->bits.color_space_change = 1;
2166 : elevate_update_type(&update_type, UPDATE_TYPE_MED);
2167 : }
2168 :
2169 0 : if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror) {
2170 0 : update_flags->bits.horizontal_mirror_change = 1;
2171 : elevate_update_type(&update_type, UPDATE_TYPE_MED);
2172 : }
2173 :
2174 0 : if (u->plane_info->rotation != u->surface->rotation) {
2175 0 : update_flags->bits.rotation_change = 1;
2176 : elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2177 : }
2178 :
2179 0 : if (u->plane_info->format != u->surface->format) {
2180 0 : update_flags->bits.pixel_format_change = 1;
2181 : elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2182 : }
2183 :
2184 0 : if (u->plane_info->stereo_format != u->surface->stereo_format) {
2185 0 : update_flags->bits.stereo_format_change = 1;
2186 : elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2187 : }
2188 :
2189 0 : if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha) {
2190 0 : update_flags->bits.per_pixel_alpha_change = 1;
2191 : elevate_update_type(&update_type, UPDATE_TYPE_MED);
2192 : }
2193 :
2194 0 : if (u->plane_info->global_alpha_value != u->surface->global_alpha_value) {
2195 0 : update_flags->bits.global_alpha_change = 1;
2196 : elevate_update_type(&update_type, UPDATE_TYPE_MED);
2197 : }
2198 :
2199 0 : if (u->plane_info->dcc.enable != u->surface->dcc.enable
2200 0 : || u->plane_info->dcc.dcc_ind_blk != u->surface->dcc.dcc_ind_blk
2201 0 : || u->plane_info->dcc.meta_pitch != u->surface->dcc.meta_pitch) {
2202 : /* During DCC on/off, stutter period is calculated before
2203 : * DCC has fully transitioned. This results in incorrect
2204 : * stutter period calculation. Triggering a full update will
2205 : * recalculate stutter period.
2206 : */
2207 0 : update_flags->bits.dcc_change = 1;
2208 : elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2209 : }
2210 :
2211 0 : if (resource_pixel_format_to_bpp(u->plane_info->format) !=
2212 0 : resource_pixel_format_to_bpp(u->surface->format)) {
2213 : /* different bytes per element will require full bandwidth
2214 : * and DML calculation
2215 : */
2216 0 : update_flags->bits.bpp_change = 1;
2217 : elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2218 : }
2219 :
2220 0 : if (u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch
2221 0 : || u->plane_info->plane_size.chroma_pitch != u->surface->plane_size.chroma_pitch) {
2222 0 : update_flags->bits.plane_size_change = 1;
2223 : elevate_update_type(&update_type, UPDATE_TYPE_MED);
2224 : }
2225 :
2226 :
2227 0 : if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info,
2228 : sizeof(union dc_tiling_info)) != 0) {
2229 0 : update_flags->bits.swizzle_change = 1;
2230 0 : elevate_update_type(&update_type, UPDATE_TYPE_MED);
2231 :
2232 : /* todo: below are HW dependent, we should add a hook to
2233 : * DCE/N resource and validated there.
2234 : */
2235 0 : if (u->plane_info->tiling_info.gfx9.swizzle != DC_SW_LINEAR) {
2236 : /* swizzled mode requires RQ to be setup properly,
2237 : * thus need to run DML to calculate RQ settings
2238 : */
2239 0 : update_flags->bits.bandwidth_change = 1;
2240 : elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2241 : }
2242 : }
2243 :
2244 : /* This should be UPDATE_TYPE_FAST if nothing has changed. */
2245 : return update_type;
2246 : }
2247 :
2248 0 : static enum surface_update_type get_scaling_info_update_type(
2249 : const struct dc_surface_update *u)
2250 : {
2251 0 : union surface_update_flags *update_flags = &u->surface->update_flags;
2252 :
2253 0 : if (!u->scaling_info)
2254 : return UPDATE_TYPE_FAST;
2255 :
2256 0 : if (u->scaling_info->clip_rect.width != u->surface->clip_rect.width
2257 0 : || u->scaling_info->clip_rect.height != u->surface->clip_rect.height
2258 0 : || u->scaling_info->dst_rect.width != u->surface->dst_rect.width
2259 0 : || u->scaling_info->dst_rect.height != u->surface->dst_rect.height
2260 0 : || u->scaling_info->scaling_quality.integer_scaling !=
2261 0 : u->surface->scaling_quality.integer_scaling
2262 : ) {
2263 0 : update_flags->bits.scaling_change = 1;
2264 :
2265 0 : if ((u->scaling_info->dst_rect.width < u->surface->dst_rect.width
2266 0 : || u->scaling_info->dst_rect.height < u->surface->dst_rect.height)
2267 0 : && (u->scaling_info->dst_rect.width < u->surface->src_rect.width
2268 0 : || u->scaling_info->dst_rect.height < u->surface->src_rect.height))
2269 : /* Making dst rect smaller requires a bandwidth change */
2270 0 : update_flags->bits.bandwidth_change = 1;
2271 : }
2272 :
2273 0 : if (u->scaling_info->src_rect.width != u->surface->src_rect.width
2274 0 : || u->scaling_info->src_rect.height != u->surface->src_rect.height) {
2275 :
2276 0 : update_flags->bits.scaling_change = 1;
2277 0 : if (u->scaling_info->src_rect.width > u->surface->src_rect.width
2278 0 : || u->scaling_info->src_rect.height > u->surface->src_rect.height)
2279 : /* Making src rect bigger requires a bandwidth change */
2280 0 : update_flags->bits.clock_change = 1;
2281 : }
2282 :
2283 0 : if (u->scaling_info->src_rect.x != u->surface->src_rect.x
2284 0 : || u->scaling_info->src_rect.y != u->surface->src_rect.y
2285 0 : || u->scaling_info->clip_rect.x != u->surface->clip_rect.x
2286 0 : || u->scaling_info->clip_rect.y != u->surface->clip_rect.y
2287 0 : || u->scaling_info->dst_rect.x != u->surface->dst_rect.x
2288 0 : || u->scaling_info->dst_rect.y != u->surface->dst_rect.y)
2289 0 : update_flags->bits.position_change = 1;
2290 :
2291 0 : if (update_flags->bits.clock_change
2292 : || update_flags->bits.bandwidth_change
2293 0 : || update_flags->bits.scaling_change)
2294 : return UPDATE_TYPE_FULL;
2295 :
2296 0 : if (update_flags->bits.position_change)
2297 : return UPDATE_TYPE_MED;
2298 :
2299 : return UPDATE_TYPE_FAST;
2300 : }
2301 :
2302 0 : static enum surface_update_type det_surface_update(const struct dc *dc,
2303 : const struct dc_surface_update *u)
2304 : {
2305 0 : const struct dc_state *context = dc->current_state;
2306 : enum surface_update_type type;
2307 0 : enum surface_update_type overall_type = UPDATE_TYPE_FAST;
2308 0 : union surface_update_flags *update_flags = &u->surface->update_flags;
2309 :
2310 0 : if (u->flip_addr)
2311 0 : update_flags->bits.addr_update = 1;
2312 :
2313 0 : if (!is_surface_in_context(context, u->surface) || u->surface->force_full_update) {
2314 0 : update_flags->raw = 0xFFFFFFFF;
2315 : return UPDATE_TYPE_FULL;
2316 : }
2317 :
2318 0 : update_flags->raw = 0; // Reset all flags
2319 :
2320 0 : type = get_plane_info_update_type(u);
2321 0 : elevate_update_type(&overall_type, type);
2322 :
2323 0 : type = get_scaling_info_update_type(u);
2324 0 : elevate_update_type(&overall_type, type);
2325 :
2326 0 : if (u->flip_addr)
2327 0 : update_flags->bits.addr_update = 1;
2328 :
2329 0 : if (u->in_transfer_func)
2330 0 : update_flags->bits.in_transfer_func_change = 1;
2331 :
2332 0 : if (u->input_csc_color_matrix)
2333 0 : update_flags->bits.input_csc_change = 1;
2334 :
2335 0 : if (u->coeff_reduction_factor)
2336 0 : update_flags->bits.coeff_reduction_change = 1;
2337 :
2338 0 : if (u->gamut_remap_matrix)
2339 0 : update_flags->bits.gamut_remap_change = 1;
2340 :
2341 0 : if (u->gamma) {
2342 0 : enum surface_pixel_format format = SURFACE_PIXEL_FORMAT_GRPH_BEGIN;
2343 :
2344 0 : if (u->plane_info)
2345 0 : format = u->plane_info->format;
2346 0 : else if (u->surface)
2347 0 : format = u->surface->format;
2348 :
2349 0 : if (dce_use_lut(format))
2350 0 : update_flags->bits.gamma_change = 1;
2351 : }
2352 :
2353 0 : if (u->lut3d_func || u->func_shaper)
2354 0 : update_flags->bits.lut_3d = 1;
2355 :
2356 0 : if (u->hdr_mult.value)
2357 0 : if (u->hdr_mult.value != u->surface->hdr_mult.value) {
2358 0 : update_flags->bits.hdr_mult = 1;
2359 : elevate_update_type(&overall_type, UPDATE_TYPE_MED);
2360 : }
2361 :
2362 0 : if (update_flags->bits.in_transfer_func_change) {
2363 0 : type = UPDATE_TYPE_MED;
2364 : elevate_update_type(&overall_type, type);
2365 : }
2366 :
2367 0 : if (update_flags->bits.input_csc_change
2368 : || update_flags->bits.coeff_reduction_change
2369 : || update_flags->bits.lut_3d
2370 : || update_flags->bits.gamma_change
2371 0 : || update_flags->bits.gamut_remap_change) {
2372 0 : type = UPDATE_TYPE_FULL;
2373 : elevate_update_type(&overall_type, type);
2374 : }
2375 :
2376 : return overall_type;
2377 : }
2378 :
2379 0 : static enum surface_update_type check_update_surfaces_for_stream(
2380 : struct dc *dc,
2381 : struct dc_surface_update *updates,
2382 : int surface_count,
2383 : struct dc_stream_update *stream_update,
2384 : const struct dc_stream_status *stream_status)
2385 : {
2386 : int i;
2387 0 : enum surface_update_type overall_type = UPDATE_TYPE_FAST;
2388 :
2389 0 : if (dc->idle_optimizations_allowed)
2390 0 : overall_type = UPDATE_TYPE_FULL;
2391 :
2392 0 : if (stream_status == NULL || stream_status->plane_count != surface_count)
2393 0 : overall_type = UPDATE_TYPE_FULL;
2394 :
2395 0 : if (stream_update && stream_update->pending_test_pattern) {
2396 0 : overall_type = UPDATE_TYPE_FULL;
2397 : }
2398 :
2399 : /* some stream updates require passive update */
2400 0 : if (stream_update) {
2401 0 : union stream_update_flags *su_flags = &stream_update->stream->update_flags;
2402 :
2403 0 : if ((stream_update->src.height != 0 && stream_update->src.width != 0) ||
2404 0 : (stream_update->dst.height != 0 && stream_update->dst.width != 0) ||
2405 0 : stream_update->integer_scaling_update)
2406 0 : su_flags->bits.scaling = 1;
2407 :
2408 0 : if (stream_update->out_transfer_func)
2409 0 : su_flags->bits.out_tf = 1;
2410 :
2411 0 : if (stream_update->abm_level)
2412 0 : su_flags->bits.abm_level = 1;
2413 :
2414 0 : if (stream_update->dpms_off)
2415 0 : su_flags->bits.dpms_off = 1;
2416 :
2417 0 : if (stream_update->gamut_remap)
2418 0 : su_flags->bits.gamut_remap = 1;
2419 :
2420 0 : if (stream_update->wb_update)
2421 0 : su_flags->bits.wb_update = 1;
2422 :
2423 0 : if (stream_update->dsc_config)
2424 0 : su_flags->bits.dsc_changed = 1;
2425 :
2426 0 : if (stream_update->mst_bw_update)
2427 0 : su_flags->bits.mst_bw = 1;
2428 0 : if (stream_update->crtc_timing_adjust && dc_extended_blank_supported(dc))
2429 0 : su_flags->bits.crtc_timing_adjust = 1;
2430 :
2431 0 : if (su_flags->raw != 0)
2432 0 : overall_type = UPDATE_TYPE_FULL;
2433 :
2434 0 : if (stream_update->output_csc_transform || stream_update->output_color_space)
2435 0 : su_flags->bits.out_csc = 1;
2436 : }
2437 :
2438 0 : for (i = 0 ; i < surface_count; i++) {
2439 0 : enum surface_update_type type =
2440 0 : det_surface_update(dc, &updates[i]);
2441 :
2442 0 : elevate_update_type(&overall_type, type);
2443 : }
2444 :
2445 0 : return overall_type;
2446 : }
2447 :
2448 0 : static bool dc_check_is_fullscreen_video(struct rect src, struct rect clip_rect)
2449 : {
2450 : int view_height, view_width, clip_x, clip_y, clip_width, clip_height;
2451 :
2452 0 : view_height = src.height;
2453 0 : view_width = src.width;
2454 :
2455 0 : clip_x = clip_rect.x;
2456 0 : clip_y = clip_rect.y;
2457 :
2458 0 : clip_width = clip_rect.width;
2459 0 : clip_height = clip_rect.height;
2460 :
2461 : /* check for centered video accounting for off by 1 scaling truncation */
2462 0 : if ((view_height - clip_y - clip_height <= clip_y + 1) &&
2463 0 : (view_width - clip_x - clip_width <= clip_x + 1) &&
2464 0 : (view_height - clip_y - clip_height >= clip_y - 1) &&
2465 0 : (view_width - clip_x - clip_width >= clip_x - 1)) {
2466 :
2467 : /* when OS scales up/down to letter box, it may end up
2468 : * with few blank pixels on the border due to truncating.
2469 : * Add offset margin to account for this
2470 : */
2471 0 : if (clip_x <= 4 || clip_y <= 4)
2472 : return true;
2473 : }
2474 :
2475 : return false;
2476 : }
2477 :
2478 0 : static enum surface_update_type check_boundary_crossing_for_windowed_mpo_with_odm(struct dc *dc,
2479 : struct dc_surface_update *srf_updates, int surface_count,
2480 : enum surface_update_type update_type)
2481 : {
2482 0 : enum surface_update_type new_update_type = update_type;
2483 : int i, j;
2484 0 : struct pipe_ctx *pipe = NULL;
2485 : struct dc_stream_state *stream;
2486 :
2487 : /* Check that we are in windowed MPO with ODM
2488 : * - look for MPO pipe by scanning pipes for first pipe matching
2489 : * surface that has moved ( position change )
2490 : * - MPO pipe will have top pipe
2491 : * - check that top pipe has ODM pointer
2492 : */
2493 0 : if ((surface_count > 1) && dc->config.enable_windowed_mpo_odm) {
2494 0 : for (i = 0; i < surface_count; i++) {
2495 0 : if (srf_updates[i].surface && srf_updates[i].scaling_info
2496 0 : && srf_updates[i].surface->update_flags.bits.position_change) {
2497 :
2498 0 : for (j = 0; j < dc->res_pool->pipe_count; j++) {
2499 0 : if (srf_updates[i].surface == dc->current_state->res_ctx.pipe_ctx[j].plane_state) {
2500 0 : pipe = &dc->current_state->res_ctx.pipe_ctx[j];
2501 0 : stream = pipe->stream;
2502 0 : break;
2503 : }
2504 : }
2505 :
2506 0 : if (pipe && pipe->top_pipe && (get_num_odm_splits(pipe->top_pipe) > 0) && stream
2507 0 : && !dc_check_is_fullscreen_video(stream->src, srf_updates[i].scaling_info->clip_rect)) {
2508 : struct rect old_clip_rect, new_clip_rect;
2509 : bool old_clip_rect_left, old_clip_rect_right, old_clip_rect_middle;
2510 : bool new_clip_rect_left, new_clip_rect_right, new_clip_rect_middle;
2511 :
2512 0 : old_clip_rect = srf_updates[i].surface->clip_rect;
2513 0 : new_clip_rect = srf_updates[i].scaling_info->clip_rect;
2514 :
2515 0 : old_clip_rect_left = ((old_clip_rect.x + old_clip_rect.width) <= (stream->src.x + (stream->src.width/2)));
2516 0 : old_clip_rect_right = (old_clip_rect.x >= (stream->src.x + (stream->src.width/2)));
2517 0 : old_clip_rect_middle = !old_clip_rect_left && !old_clip_rect_right;
2518 :
2519 0 : new_clip_rect_left = ((new_clip_rect.x + new_clip_rect.width) <= (stream->src.x + (stream->src.width/2)));
2520 0 : new_clip_rect_right = (new_clip_rect.x >= (stream->src.x + (stream->src.width/2)));
2521 0 : new_clip_rect_middle = !new_clip_rect_left && !new_clip_rect_right;
2522 :
2523 0 : if (old_clip_rect_left && new_clip_rect_middle)
2524 : new_update_type = UPDATE_TYPE_FULL;
2525 0 : else if (old_clip_rect_middle && new_clip_rect_right)
2526 : new_update_type = UPDATE_TYPE_FULL;
2527 0 : else if (old_clip_rect_right && new_clip_rect_middle)
2528 : new_update_type = UPDATE_TYPE_FULL;
2529 0 : else if (old_clip_rect_middle && new_clip_rect_left)
2530 0 : new_update_type = UPDATE_TYPE_FULL;
2531 : }
2532 : }
2533 : }
2534 : }
2535 0 : return new_update_type;
2536 : }
2537 :
2538 : /*
2539 : * dc_check_update_surfaces_for_stream() - Determine update type (fast, med, or full)
2540 : *
2541 : * See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types
2542 : */
2543 0 : enum surface_update_type dc_check_update_surfaces_for_stream(
2544 : struct dc *dc,
2545 : struct dc_surface_update *updates,
2546 : int surface_count,
2547 : struct dc_stream_update *stream_update,
2548 : const struct dc_stream_status *stream_status)
2549 : {
2550 : int i;
2551 : enum surface_update_type type;
2552 :
2553 0 : if (stream_update)
2554 0 : stream_update->stream->update_flags.raw = 0;
2555 0 : for (i = 0; i < surface_count; i++)
2556 0 : updates[i].surface->update_flags.raw = 0;
2557 :
2558 0 : type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status);
2559 0 : if (type == UPDATE_TYPE_FULL) {
2560 0 : if (stream_update) {
2561 0 : uint32_t dsc_changed = stream_update->stream->update_flags.bits.dsc_changed;
2562 0 : stream_update->stream->update_flags.raw = 0xFFFFFFFF;
2563 0 : stream_update->stream->update_flags.bits.dsc_changed = dsc_changed;
2564 : }
2565 0 : for (i = 0; i < surface_count; i++)
2566 0 : updates[i].surface->update_flags.raw = 0xFFFFFFFF;
2567 : }
2568 :
2569 0 : if (type == UPDATE_TYPE_MED)
2570 0 : type = check_boundary_crossing_for_windowed_mpo_with_odm(dc,
2571 : updates, surface_count, type);
2572 :
2573 0 : if (type == UPDATE_TYPE_FAST) {
2574 : // If there's an available clock comparator, we use that.
2575 0 : if (dc->clk_mgr->funcs->are_clock_states_equal) {
2576 0 : if (!dc->clk_mgr->funcs->are_clock_states_equal(&dc->clk_mgr->clks, &dc->current_state->bw_ctx.bw.dcn.clk))
2577 0 : dc->optimized_required = true;
2578 : // Else we fallback to mem compare.
2579 0 : } else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) {
2580 0 : dc->optimized_required = true;
2581 : }
2582 :
2583 0 : dc->optimized_required |= dc->wm_optimized_required;
2584 : }
2585 :
2586 0 : return type;
2587 : }
2588 :
2589 : static struct dc_stream_status *stream_get_status(
2590 : struct dc_state *ctx,
2591 : struct dc_stream_state *stream)
2592 : {
2593 : uint8_t i;
2594 :
2595 0 : for (i = 0; i < ctx->stream_count; i++) {
2596 0 : if (stream == ctx->streams[i]) {
2597 0 : return &ctx->stream_status[i];
2598 : }
2599 : }
2600 :
2601 : return NULL;
2602 : }
2603 :
2604 : static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL;
2605 :
2606 0 : static void copy_surface_update_to_plane(
2607 : struct dc_plane_state *surface,
2608 : struct dc_surface_update *srf_update)
2609 : {
2610 0 : if (srf_update->flip_addr) {
2611 0 : surface->address = srf_update->flip_addr->address;
2612 0 : surface->flip_immediate =
2613 0 : srf_update->flip_addr->flip_immediate;
2614 0 : surface->time.time_elapsed_in_us[surface->time.index] =
2615 0 : srf_update->flip_addr->flip_timestamp_in_us -
2616 0 : surface->time.prev_update_time_in_us;
2617 0 : surface->time.prev_update_time_in_us =
2618 0 : srf_update->flip_addr->flip_timestamp_in_us;
2619 0 : surface->time.index++;
2620 0 : if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX)
2621 0 : surface->time.index = 0;
2622 :
2623 0 : surface->triplebuffer_flips = srf_update->flip_addr->triplebuffer_flips;
2624 : }
2625 :
2626 0 : if (srf_update->scaling_info) {
2627 0 : surface->scaling_quality =
2628 : srf_update->scaling_info->scaling_quality;
2629 0 : surface->dst_rect =
2630 0 : srf_update->scaling_info->dst_rect;
2631 0 : surface->src_rect =
2632 0 : srf_update->scaling_info->src_rect;
2633 0 : surface->clip_rect =
2634 0 : srf_update->scaling_info->clip_rect;
2635 : }
2636 :
2637 0 : if (srf_update->plane_info) {
2638 0 : surface->color_space =
2639 0 : srf_update->plane_info->color_space;
2640 0 : surface->format =
2641 0 : srf_update->plane_info->format;
2642 0 : surface->plane_size =
2643 0 : srf_update->plane_info->plane_size;
2644 0 : surface->rotation =
2645 0 : srf_update->plane_info->rotation;
2646 0 : surface->horizontal_mirror =
2647 0 : srf_update->plane_info->horizontal_mirror;
2648 0 : surface->stereo_format =
2649 0 : srf_update->plane_info->stereo_format;
2650 0 : surface->tiling_info =
2651 0 : srf_update->plane_info->tiling_info;
2652 0 : surface->visible =
2653 0 : srf_update->plane_info->visible;
2654 0 : surface->per_pixel_alpha =
2655 0 : srf_update->plane_info->per_pixel_alpha;
2656 0 : surface->global_alpha =
2657 0 : srf_update->plane_info->global_alpha;
2658 0 : surface->global_alpha_value =
2659 0 : srf_update->plane_info->global_alpha_value;
2660 0 : surface->dcc =
2661 0 : srf_update->plane_info->dcc;
2662 0 : surface->layer_index =
2663 0 : srf_update->plane_info->layer_index;
2664 : }
2665 :
2666 0 : if (srf_update->gamma &&
2667 0 : (surface->gamma_correction !=
2668 : srf_update->gamma)) {
2669 0 : memcpy(&surface->gamma_correction->entries,
2670 0 : &srf_update->gamma->entries,
2671 : sizeof(struct dc_gamma_entries));
2672 0 : surface->gamma_correction->is_identity =
2673 0 : srf_update->gamma->is_identity;
2674 0 : surface->gamma_correction->num_entries =
2675 0 : srf_update->gamma->num_entries;
2676 0 : surface->gamma_correction->type =
2677 0 : srf_update->gamma->type;
2678 : }
2679 :
2680 0 : if (srf_update->in_transfer_func &&
2681 0 : (surface->in_transfer_func !=
2682 : srf_update->in_transfer_func)) {
2683 0 : surface->in_transfer_func->sdr_ref_white_level =
2684 0 : srf_update->in_transfer_func->sdr_ref_white_level;
2685 0 : surface->in_transfer_func->tf =
2686 0 : srf_update->in_transfer_func->tf;
2687 0 : surface->in_transfer_func->type =
2688 0 : srf_update->in_transfer_func->type;
2689 0 : memcpy(&surface->in_transfer_func->tf_pts,
2690 0 : &srf_update->in_transfer_func->tf_pts,
2691 : sizeof(struct dc_transfer_func_distributed_points));
2692 : }
2693 :
2694 0 : if (srf_update->func_shaper &&
2695 0 : (surface->in_shaper_func !=
2696 : srf_update->func_shaper))
2697 0 : memcpy(surface->in_shaper_func, srf_update->func_shaper,
2698 : sizeof(*surface->in_shaper_func));
2699 :
2700 0 : if (srf_update->lut3d_func &&
2701 0 : (surface->lut3d_func !=
2702 : srf_update->lut3d_func))
2703 0 : memcpy(surface->lut3d_func, srf_update->lut3d_func,
2704 : sizeof(*surface->lut3d_func));
2705 :
2706 0 : if (srf_update->hdr_mult.value)
2707 0 : surface->hdr_mult =
2708 : srf_update->hdr_mult;
2709 :
2710 0 : if (srf_update->blend_tf &&
2711 0 : (surface->blend_tf !=
2712 : srf_update->blend_tf))
2713 0 : memcpy(surface->blend_tf, srf_update->blend_tf,
2714 : sizeof(*surface->blend_tf));
2715 :
2716 0 : if (srf_update->input_csc_color_matrix)
2717 0 : surface->input_csc_color_matrix =
2718 : *srf_update->input_csc_color_matrix;
2719 :
2720 0 : if (srf_update->coeff_reduction_factor)
2721 0 : surface->coeff_reduction_factor =
2722 : *srf_update->coeff_reduction_factor;
2723 :
2724 0 : if (srf_update->gamut_remap_matrix)
2725 0 : surface->gamut_remap_matrix =
2726 : *srf_update->gamut_remap_matrix;
2727 0 : }
2728 :
2729 0 : static void copy_stream_update_to_stream(struct dc *dc,
2730 : struct dc_state *context,
2731 : struct dc_stream_state *stream,
2732 : struct dc_stream_update *update)
2733 : {
2734 0 : struct dc_context *dc_ctx = dc->ctx;
2735 :
2736 0 : if (update == NULL || stream == NULL)
2737 : return;
2738 :
2739 0 : if (update->src.height && update->src.width)
2740 0 : stream->src = update->src;
2741 :
2742 0 : if (update->dst.height && update->dst.width)
2743 0 : stream->dst = update->dst;
2744 :
2745 0 : if (update->out_transfer_func &&
2746 0 : stream->out_transfer_func != update->out_transfer_func) {
2747 0 : stream->out_transfer_func->sdr_ref_white_level =
2748 0 : update->out_transfer_func->sdr_ref_white_level;
2749 0 : stream->out_transfer_func->tf = update->out_transfer_func->tf;
2750 0 : stream->out_transfer_func->type =
2751 0 : update->out_transfer_func->type;
2752 0 : memcpy(&stream->out_transfer_func->tf_pts,
2753 0 : &update->out_transfer_func->tf_pts,
2754 : sizeof(struct dc_transfer_func_distributed_points));
2755 : }
2756 :
2757 0 : if (update->hdr_static_metadata)
2758 0 : stream->hdr_static_metadata = *update->hdr_static_metadata;
2759 :
2760 0 : if (update->abm_level)
2761 0 : stream->abm_level = *update->abm_level;
2762 :
2763 0 : if (update->periodic_interrupt0)
2764 0 : stream->periodic_interrupt0 = *update->periodic_interrupt0;
2765 :
2766 0 : if (update->periodic_interrupt1)
2767 0 : stream->periodic_interrupt1 = *update->periodic_interrupt1;
2768 :
2769 0 : if (update->gamut_remap)
2770 0 : stream->gamut_remap_matrix = *update->gamut_remap;
2771 :
2772 : /* Note: this being updated after mode set is currently not a use case
2773 : * however if it arises OCSC would need to be reprogrammed at the
2774 : * minimum
2775 : */
2776 0 : if (update->output_color_space)
2777 0 : stream->output_color_space = *update->output_color_space;
2778 :
2779 0 : if (update->output_csc_transform)
2780 0 : stream->csc_color_matrix = *update->output_csc_transform;
2781 :
2782 0 : if (update->vrr_infopacket)
2783 0 : stream->vrr_infopacket = *update->vrr_infopacket;
2784 :
2785 0 : if (update->allow_freesync)
2786 0 : stream->allow_freesync = *update->allow_freesync;
2787 :
2788 0 : if (update->vrr_active_variable)
2789 0 : stream->vrr_active_variable = *update->vrr_active_variable;
2790 :
2791 0 : if (update->crtc_timing_adjust)
2792 0 : stream->adjust = *update->crtc_timing_adjust;
2793 :
2794 0 : if (update->dpms_off)
2795 0 : stream->dpms_off = *update->dpms_off;
2796 :
2797 0 : if (update->hfvsif_infopacket)
2798 0 : stream->hfvsif_infopacket = *update->hfvsif_infopacket;
2799 :
2800 0 : if (update->vtem_infopacket)
2801 0 : stream->vtem_infopacket = *update->vtem_infopacket;
2802 :
2803 0 : if (update->vsc_infopacket)
2804 0 : stream->vsc_infopacket = *update->vsc_infopacket;
2805 :
2806 0 : if (update->vsp_infopacket)
2807 0 : stream->vsp_infopacket = *update->vsp_infopacket;
2808 :
2809 0 : if (update->dither_option)
2810 0 : stream->dither_option = *update->dither_option;
2811 :
2812 0 : if (update->pending_test_pattern)
2813 0 : stream->test_pattern = *update->pending_test_pattern;
2814 : /* update current stream with writeback info */
2815 0 : if (update->wb_update) {
2816 : int i;
2817 :
2818 0 : stream->num_wb_info = update->wb_update->num_wb_info;
2819 0 : ASSERT(stream->num_wb_info <= MAX_DWB_PIPES);
2820 0 : for (i = 0; i < stream->num_wb_info; i++)
2821 0 : stream->writeback_info[i] =
2822 0 : update->wb_update->writeback_info[i];
2823 : }
2824 0 : if (update->dsc_config) {
2825 0 : struct dc_dsc_config old_dsc_cfg = stream->timing.dsc_cfg;
2826 0 : uint32_t old_dsc_enabled = stream->timing.flags.DSC;
2827 0 : uint32_t enable_dsc = (update->dsc_config->num_slices_h != 0 &&
2828 0 : update->dsc_config->num_slices_v != 0);
2829 :
2830 : /* Use temporarry context for validating new DSC config */
2831 0 : struct dc_state *dsc_validate_context = dc_create_state(dc);
2832 :
2833 0 : if (dsc_validate_context) {
2834 0 : dc_resource_state_copy_construct(dc->current_state, dsc_validate_context);
2835 :
2836 0 : stream->timing.dsc_cfg = *update->dsc_config;
2837 0 : stream->timing.flags.DSC = enable_dsc;
2838 0 : if (!dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true)) {
2839 0 : stream->timing.dsc_cfg = old_dsc_cfg;
2840 0 : stream->timing.flags.DSC = old_dsc_enabled;
2841 0 : update->dsc_config = NULL;
2842 : }
2843 :
2844 : dc_release_state(dsc_validate_context);
2845 : } else {
2846 0 : DC_ERROR("Failed to allocate new validate context for DSC change\n");
2847 0 : update->dsc_config = NULL;
2848 : }
2849 : }
2850 : }
2851 :
2852 0 : void dc_reset_state(struct dc *dc, struct dc_state *context)
2853 : {
2854 0 : dc_resource_state_destruct(context);
2855 :
2856 : /* clear the structure, but don't reset the reference count */
2857 0 : memset(context, 0, offsetof(struct dc_state, refcount));
2858 :
2859 0 : init_state(dc, context);
2860 0 : }
2861 :
2862 0 : static bool update_planes_and_stream_state(struct dc *dc,
2863 : struct dc_surface_update *srf_updates, int surface_count,
2864 : struct dc_stream_state *stream,
2865 : struct dc_stream_update *stream_update,
2866 : enum surface_update_type *new_update_type,
2867 : struct dc_state **new_context)
2868 : {
2869 : struct dc_state *context;
2870 : int i, j;
2871 : enum surface_update_type update_type;
2872 : const struct dc_stream_status *stream_status;
2873 0 : struct dc_context *dc_ctx = dc->ctx;
2874 :
2875 0 : stream_status = dc_stream_get_status(stream);
2876 :
2877 0 : if (!stream_status) {
2878 0 : if (surface_count) /* Only an error condition if surf_count non-zero*/
2879 0 : ASSERT(false);
2880 :
2881 : return false; /* Cannot commit surface to stream that is not committed */
2882 : }
2883 :
2884 0 : context = dc->current_state;
2885 :
2886 0 : update_type = dc_check_update_surfaces_for_stream(
2887 : dc, srf_updates, surface_count, stream_update, stream_status);
2888 :
2889 : /* update current stream with the new updates */
2890 0 : copy_stream_update_to_stream(dc, context, stream, stream_update);
2891 :
2892 : /* do not perform surface update if surface has invalid dimensions
2893 : * (all zero) and no scaling_info is provided
2894 : */
2895 0 : if (surface_count > 0) {
2896 0 : for (i = 0; i < surface_count; i++) {
2897 0 : if ((srf_updates[i].surface->src_rect.width == 0 ||
2898 0 : srf_updates[i].surface->src_rect.height == 0 ||
2899 0 : srf_updates[i].surface->dst_rect.width == 0 ||
2900 0 : srf_updates[i].surface->dst_rect.height == 0) &&
2901 0 : (!srf_updates[i].scaling_info ||
2902 0 : srf_updates[i].scaling_info->src_rect.width == 0 ||
2903 0 : srf_updates[i].scaling_info->src_rect.height == 0 ||
2904 0 : srf_updates[i].scaling_info->dst_rect.width == 0 ||
2905 0 : srf_updates[i].scaling_info->dst_rect.height == 0)) {
2906 0 : DC_ERROR("Invalid src/dst rects in surface update!\n");
2907 0 : return false;
2908 : }
2909 : }
2910 : }
2911 :
2912 0 : if (update_type >= update_surface_trace_level)
2913 0 : update_surface_trace(dc, srf_updates, surface_count);
2914 :
2915 0 : if (update_type >= UPDATE_TYPE_FULL) {
2916 0 : struct dc_plane_state *new_planes[MAX_SURFACES] = {0};
2917 :
2918 0 : for (i = 0; i < surface_count; i++)
2919 0 : new_planes[i] = srf_updates[i].surface;
2920 :
2921 : /* initialize scratch memory for building context */
2922 0 : context = dc_create_state(dc);
2923 0 : if (context == NULL) {
2924 0 : DC_ERROR("Failed to allocate new validate context!\n");
2925 0 : return false;
2926 : }
2927 :
2928 0 : dc_resource_state_copy_construct(
2929 0 : dc->current_state, context);
2930 :
2931 : /*remove old surfaces from context */
2932 0 : if (!dc_rem_all_planes_for_stream(dc, stream, context)) {
2933 :
2934 0 : BREAK_TO_DEBUGGER();
2935 0 : goto fail;
2936 : }
2937 :
2938 : /* add surface to context */
2939 0 : if (!dc_add_all_planes_for_stream(dc, stream, new_planes, surface_count, context)) {
2940 :
2941 0 : BREAK_TO_DEBUGGER();
2942 0 : goto fail;
2943 : }
2944 : }
2945 :
2946 : /* save update parameters into surface */
2947 0 : for (i = 0; i < surface_count; i++) {
2948 0 : struct dc_plane_state *surface = srf_updates[i].surface;
2949 :
2950 0 : copy_surface_update_to_plane(surface, &srf_updates[i]);
2951 :
2952 0 : if (update_type >= UPDATE_TYPE_MED) {
2953 0 : for (j = 0; j < dc->res_pool->pipe_count; j++) {
2954 0 : struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2955 :
2956 0 : if (pipe_ctx->plane_state != surface)
2957 0 : continue;
2958 :
2959 0 : resource_build_scaling_params(pipe_ctx);
2960 : }
2961 : }
2962 : }
2963 :
2964 0 : if (update_type == UPDATE_TYPE_FULL) {
2965 0 : if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
2966 0 : BREAK_TO_DEBUGGER();
2967 0 : goto fail;
2968 : }
2969 : }
2970 :
2971 0 : *new_context = context;
2972 0 : *new_update_type = update_type;
2973 :
2974 0 : return true;
2975 :
2976 : fail:
2977 0 : dc_release_state(context);
2978 :
2979 0 : return false;
2980 :
2981 : }
2982 :
2983 0 : static void commit_planes_do_stream_update(struct dc *dc,
2984 : struct dc_stream_state *stream,
2985 : struct dc_stream_update *stream_update,
2986 : enum surface_update_type update_type,
2987 : struct dc_state *context)
2988 : {
2989 : int j;
2990 :
2991 : // Stream updates
2992 0 : for (j = 0; j < dc->res_pool->pipe_count; j++) {
2993 0 : struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2994 :
2995 0 : if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe && pipe_ctx->stream == stream) {
2996 :
2997 0 : if (stream_update->periodic_interrupt0 &&
2998 0 : dc->hwss.setup_periodic_interrupt)
2999 0 : dc->hwss.setup_periodic_interrupt(dc, pipe_ctx, VLINE0);
3000 :
3001 0 : if (stream_update->periodic_interrupt1 &&
3002 0 : dc->hwss.setup_periodic_interrupt)
3003 0 : dc->hwss.setup_periodic_interrupt(dc, pipe_ctx, VLINE1);
3004 :
3005 0 : if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) ||
3006 0 : stream_update->vrr_infopacket ||
3007 0 : stream_update->vsc_infopacket ||
3008 0 : stream_update->vsp_infopacket ||
3009 0 : stream_update->hfvsif_infopacket ||
3010 0 : stream_update->vtem_infopacket) {
3011 0 : resource_build_info_frame(pipe_ctx);
3012 0 : dc->hwss.update_info_frame(pipe_ctx);
3013 :
3014 0 : if (dc_is_dp_signal(pipe_ctx->stream->signal))
3015 0 : dp_source_sequence_trace(pipe_ctx->stream->link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME);
3016 : }
3017 :
3018 0 : if (stream_update->hdr_static_metadata &&
3019 0 : stream->use_dynamic_meta &&
3020 0 : dc->hwss.set_dmdata_attributes &&
3021 0 : pipe_ctx->stream->dmdata_address.quad_part != 0)
3022 0 : dc->hwss.set_dmdata_attributes(pipe_ctx);
3023 :
3024 0 : if (stream_update->gamut_remap)
3025 : dc_stream_set_gamut_remap(dc, stream);
3026 :
3027 0 : if (stream_update->output_csc_transform)
3028 0 : dc_stream_program_csc_matrix(dc, stream);
3029 :
3030 0 : if (stream_update->dither_option) {
3031 0 : struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe;
3032 0 : resource_build_bit_depth_reduction_params(pipe_ctx->stream,
3033 0 : &pipe_ctx->stream->bit_depth_params);
3034 0 : pipe_ctx->stream_res.opp->funcs->opp_program_fmt(pipe_ctx->stream_res.opp,
3035 : &stream->bit_depth_params,
3036 : &stream->clamping);
3037 0 : while (odm_pipe) {
3038 0 : odm_pipe->stream_res.opp->funcs->opp_program_fmt(odm_pipe->stream_res.opp,
3039 : &stream->bit_depth_params,
3040 : &stream->clamping);
3041 0 : odm_pipe = odm_pipe->next_odm_pipe;
3042 : }
3043 : }
3044 :
3045 :
3046 : /* Full fe update*/
3047 0 : if (update_type == UPDATE_TYPE_FAST)
3048 0 : continue;
3049 :
3050 0 : if (stream_update->dsc_config)
3051 0 : dp_update_dsc_config(pipe_ctx);
3052 :
3053 0 : if (stream_update->mst_bw_update) {
3054 0 : if (stream_update->mst_bw_update->is_increase)
3055 0 : dc_link_increase_mst_payload(pipe_ctx, stream_update->mst_bw_update->mst_stream_bw);
3056 : else
3057 0 : dc_link_reduce_mst_payload(pipe_ctx, stream_update->mst_bw_update->mst_stream_bw);
3058 : }
3059 :
3060 0 : if (stream_update->pending_test_pattern) {
3061 0 : dc_link_dp_set_test_pattern(stream->link,
3062 : stream->test_pattern.type,
3063 : stream->test_pattern.color_space,
3064 : stream->test_pattern.p_link_settings,
3065 : stream->test_pattern.p_custom_pattern,
3066 : stream->test_pattern.cust_pattern_size);
3067 : }
3068 :
3069 0 : if (stream_update->dpms_off) {
3070 0 : if (*stream_update->dpms_off) {
3071 0 : if (dc->hwss.update_phy_state)
3072 0 : dc->hwss.update_phy_state(dc->current_state,
3073 : pipe_ctx, TX_OFF_SYMCLK_ON);
3074 : else
3075 0 : core_link_disable_stream(pipe_ctx);
3076 : /* for dpms, keep acquired resources*/
3077 0 : if (pipe_ctx->stream_res.audio && !dc->debug.az_endpoint_mute_only)
3078 0 : pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
3079 :
3080 0 : dc->optimized_required = true;
3081 :
3082 : } else {
3083 0 : if (get_seamless_boot_stream_count(context) == 0)
3084 0 : dc->hwss.prepare_bandwidth(dc, dc->current_state);
3085 :
3086 0 : if (dc->hwss.update_phy_state)
3087 0 : dc->hwss.update_phy_state(dc->current_state,
3088 : pipe_ctx, TX_ON_SYMCLK_ON);
3089 : else
3090 0 : core_link_enable_stream(dc->current_state, pipe_ctx);
3091 : }
3092 : }
3093 :
3094 0 : if (stream_update->abm_level && pipe_ctx->stream_res.abm) {
3095 0 : bool should_program_abm = true;
3096 :
3097 : // if otg funcs defined check if blanked before programming
3098 0 : if (pipe_ctx->stream_res.tg->funcs->is_blanked)
3099 0 : if (pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg))
3100 0 : should_program_abm = false;
3101 :
3102 0 : if (should_program_abm) {
3103 0 : if (*stream_update->abm_level == ABM_LEVEL_IMMEDIATE_DISABLE) {
3104 0 : dc->hwss.set_abm_immediate_disable(pipe_ctx);
3105 : } else {
3106 0 : pipe_ctx->stream_res.abm->funcs->set_abm_level(
3107 : pipe_ctx->stream_res.abm, stream->abm_level);
3108 : }
3109 : }
3110 : }
3111 : }
3112 : }
3113 0 : }
3114 :
3115 : static bool dc_dmub_should_send_dirty_rect_cmd(struct dc *dc, struct dc_stream_state *stream)
3116 : {
3117 0 : if ((stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1
3118 : || stream->link->psr_settings.psr_version == DC_PSR_VERSION_1)
3119 0 : && stream->ctx->dce_version >= DCN_VERSION_3_1)
3120 : return true;
3121 :
3122 : return false;
3123 : }
3124 :
3125 0 : void dc_dmub_update_dirty_rect(struct dc *dc,
3126 : int surface_count,
3127 : struct dc_stream_state *stream,
3128 : struct dc_surface_update *srf_updates,
3129 : struct dc_state *context)
3130 : {
3131 : union dmub_rb_cmd cmd;
3132 0 : struct dc_context *dc_ctx = dc->ctx;
3133 : struct dmub_cmd_update_dirty_rect_data *update_dirty_rect;
3134 : unsigned int i, j;
3135 0 : unsigned int panel_inst = 0;
3136 :
3137 0 : if (!dc_dmub_should_send_dirty_rect_cmd(dc, stream))
3138 0 : return;
3139 :
3140 0 : if (!dc_get_edp_link_panel_inst(dc, stream->link, &panel_inst))
3141 : return;
3142 :
3143 0 : memset(&cmd, 0x0, sizeof(cmd));
3144 0 : cmd.update_dirty_rect.header.type = DMUB_CMD__UPDATE_DIRTY_RECT;
3145 0 : cmd.update_dirty_rect.header.sub_type = 0;
3146 0 : cmd.update_dirty_rect.header.payload_bytes =
3147 : sizeof(cmd.update_dirty_rect) -
3148 : sizeof(cmd.update_dirty_rect.header);
3149 0 : update_dirty_rect = &cmd.update_dirty_rect.update_dirty_rect_data;
3150 0 : for (i = 0; i < surface_count; i++) {
3151 0 : struct dc_plane_state *plane_state = srf_updates[i].surface;
3152 0 : const struct dc_flip_addrs *flip_addr = srf_updates[i].flip_addr;
3153 :
3154 0 : if (!srf_updates[i].surface || !flip_addr)
3155 0 : continue;
3156 : /* Do not send in immediate flip mode */
3157 0 : if (srf_updates[i].surface->flip_immediate)
3158 0 : continue;
3159 :
3160 0 : update_dirty_rect->dirty_rect_count = flip_addr->dirty_rect_count;
3161 0 : memcpy(update_dirty_rect->src_dirty_rects, flip_addr->dirty_rects,
3162 : sizeof(flip_addr->dirty_rects));
3163 0 : for (j = 0; j < dc->res_pool->pipe_count; j++) {
3164 0 : struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3165 :
3166 0 : if (pipe_ctx->stream != stream)
3167 0 : continue;
3168 0 : if (pipe_ctx->plane_state != plane_state)
3169 0 : continue;
3170 :
3171 0 : update_dirty_rect->panel_inst = panel_inst;
3172 0 : update_dirty_rect->pipe_idx = j;
3173 0 : dc_dmub_srv_cmd_queue(dc_ctx->dmub_srv, &cmd);
3174 0 : dc_dmub_srv_cmd_execute(dc_ctx->dmub_srv);
3175 : }
3176 : }
3177 : }
3178 :
3179 0 : static void commit_planes_for_stream(struct dc *dc,
3180 : struct dc_surface_update *srf_updates,
3181 : int surface_count,
3182 : struct dc_stream_state *stream,
3183 : struct dc_stream_update *stream_update,
3184 : enum surface_update_type update_type,
3185 : struct dc_state *context)
3186 : {
3187 : int i, j;
3188 0 : struct pipe_ctx *top_pipe_to_program = NULL;
3189 0 : bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST);
3190 0 : bool subvp_prev_use = false;
3191 :
3192 : // Once we apply the new subvp context to hardware it won't be in the
3193 : // dc->current_state anymore, so we have to cache it before we apply
3194 : // the new SubVP context
3195 0 : subvp_prev_use = false;
3196 :
3197 :
3198 : dc_z10_restore(dc);
3199 :
3200 0 : if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) {
3201 : /* Optimize seamless boot flag keeps clocks and watermarks high until
3202 : * first flip. After first flip, optimization is required to lower
3203 : * bandwidth. Important to note that it is expected UEFI will
3204 : * only light up a single display on POST, therefore we only expect
3205 : * one stream with seamless boot flag set.
3206 : */
3207 0 : if (stream->apply_seamless_boot_optimization) {
3208 0 : stream->apply_seamless_boot_optimization = false;
3209 :
3210 0 : if (get_seamless_boot_stream_count(context) == 0)
3211 0 : dc->optimized_required = true;
3212 : }
3213 : }
3214 :
3215 0 : if (update_type == UPDATE_TYPE_FULL) {
3216 0 : dc_allow_idle_optimizations(dc, false);
3217 :
3218 0 : if (get_seamless_boot_stream_count(context) == 0)
3219 0 : dc->hwss.prepare_bandwidth(dc, context);
3220 :
3221 0 : context_clock_trace(dc, context);
3222 : }
3223 :
3224 0 : for (j = 0; j < dc->res_pool->pipe_count; j++) {
3225 0 : struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3226 :
3227 0 : if (!pipe_ctx->top_pipe &&
3228 0 : !pipe_ctx->prev_odm_pipe &&
3229 0 : pipe_ctx->stream &&
3230 : pipe_ctx->stream == stream) {
3231 0 : top_pipe_to_program = pipe_ctx;
3232 : }
3233 : }
3234 :
3235 0 : for (i = 0; i < dc->res_pool->pipe_count; i++) {
3236 0 : struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
3237 :
3238 : // Check old context for SubVP
3239 0 : subvp_prev_use |= (old_pipe->stream && old_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM);
3240 0 : if (subvp_prev_use)
3241 : break;
3242 : }
3243 :
3244 0 : if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) {
3245 : struct pipe_ctx *mpcc_pipe;
3246 : struct pipe_ctx *odm_pipe;
3247 :
3248 0 : for (mpcc_pipe = top_pipe_to_program; mpcc_pipe; mpcc_pipe = mpcc_pipe->bottom_pipe)
3249 0 : for (odm_pipe = mpcc_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
3250 0 : odm_pipe->ttu_regs.min_ttu_vblank = MAX_TTU;
3251 : }
3252 :
3253 0 : if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
3254 0 : if (top_pipe_to_program &&
3255 0 : top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
3256 0 : if (should_use_dmub_lock(stream->link)) {
3257 0 : union dmub_hw_lock_flags hw_locks = { 0 };
3258 0 : struct dmub_hw_lock_inst_flags inst_flags = { 0 };
3259 :
3260 0 : hw_locks.bits.lock_dig = 1;
3261 0 : inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst;
3262 :
3263 0 : dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
3264 : true,
3265 : &hw_locks,
3266 : &inst_flags);
3267 : } else
3268 0 : top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable(
3269 : top_pipe_to_program->stream_res.tg);
3270 : }
3271 :
3272 0 : if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
3273 0 : if (dc->hwss.subvp_pipe_control_lock)
3274 0 : dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, NULL, subvp_prev_use);
3275 0 : dc->hwss.interdependent_update_lock(dc, context, true);
3276 :
3277 : } else {
3278 0 : if (dc->hwss.subvp_pipe_control_lock)
3279 0 : dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use);
3280 : /* Lock the top pipe while updating plane addrs, since freesync requires
3281 : * plane addr update event triggers to be synchronized.
3282 : * top_pipe_to_program is expected to never be NULL
3283 : */
3284 0 : dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true);
3285 : }
3286 :
3287 0 : if (update_type != UPDATE_TYPE_FAST) {
3288 0 : for (i = 0; i < dc->res_pool->pipe_count; i++) {
3289 0 : struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
3290 :
3291 0 : if ((new_pipe->stream && new_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) ||
3292 : subvp_prev_use) {
3293 : // If old context or new context has phantom pipes, apply
3294 : // the phantom timings now. We can't change the phantom
3295 : // pipe configuration safely without driver acquiring
3296 : // the DMCUB lock first.
3297 0 : dc->hwss.apply_ctx_to_hw(dc, context);
3298 0 : break;
3299 : }
3300 : }
3301 : }
3302 :
3303 0 : dc_dmub_update_dirty_rect(dc, surface_count, stream, srf_updates, context);
3304 :
3305 0 : if (update_type != UPDATE_TYPE_FAST) {
3306 0 : for (i = 0; i < dc->res_pool->pipe_count; i++) {
3307 0 : struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
3308 :
3309 0 : if ((new_pipe->stream && new_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) ||
3310 : subvp_prev_use) {
3311 : // If old context or new context has phantom pipes, apply
3312 : // the phantom timings now. We can't change the phantom
3313 : // pipe configuration safely without driver acquiring
3314 : // the DMCUB lock first.
3315 0 : dc->hwss.apply_ctx_to_hw(dc, context);
3316 0 : break;
3317 : }
3318 : }
3319 : }
3320 :
3321 : // Stream updates
3322 0 : if (stream_update)
3323 0 : commit_planes_do_stream_update(dc, stream, stream_update, update_type, context);
3324 :
3325 0 : if (surface_count == 0) {
3326 : /*
3327 : * In case of turning off screen, no need to program front end a second time.
3328 : * just return after program blank.
3329 : */
3330 0 : if (dc->hwss.apply_ctx_for_surface)
3331 0 : dc->hwss.apply_ctx_for_surface(dc, stream, 0, context);
3332 0 : if (dc->hwss.program_front_end_for_ctx)
3333 0 : dc->hwss.program_front_end_for_ctx(dc, context);
3334 :
3335 0 : if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
3336 0 : dc->hwss.interdependent_update_lock(dc, context, false);
3337 : } else {
3338 0 : dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
3339 : }
3340 0 : dc->hwss.post_unlock_program_front_end(dc, context);
3341 :
3342 0 : if (update_type != UPDATE_TYPE_FAST)
3343 0 : if (dc->hwss.commit_subvp_config)
3344 0 : dc->hwss.commit_subvp_config(dc, context);
3345 :
3346 : /* Since phantom pipe programming is moved to post_unlock_program_front_end,
3347 : * move the SubVP lock to after the phantom pipes have been setup
3348 : */
3349 0 : if (dc->hwss.subvp_pipe_control_lock)
3350 0 : dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
3351 : return;
3352 : }
3353 :
3354 0 : if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
3355 0 : for (i = 0; i < surface_count; i++) {
3356 0 : struct dc_plane_state *plane_state = srf_updates[i].surface;
3357 : /*set logical flag for lock/unlock use*/
3358 0 : for (j = 0; j < dc->res_pool->pipe_count; j++) {
3359 0 : struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3360 0 : if (!pipe_ctx->plane_state)
3361 0 : continue;
3362 0 : if (should_update_pipe_for_plane(context, pipe_ctx, plane_state))
3363 0 : continue;
3364 0 : pipe_ctx->plane_state->triplebuffer_flips = false;
3365 0 : if (update_type == UPDATE_TYPE_FAST &&
3366 0 : dc->hwss.program_triplebuffer != NULL &&
3367 0 : !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) {
3368 : /*triple buffer for VUpdate only*/
3369 0 : pipe_ctx->plane_state->triplebuffer_flips = true;
3370 : }
3371 : }
3372 0 : if (update_type == UPDATE_TYPE_FULL) {
3373 : /* force vsync flip when reconfiguring pipes to prevent underflow */
3374 0 : plane_state->flip_immediate = false;
3375 : }
3376 : }
3377 : }
3378 :
3379 : // Update Type FULL, Surface updates
3380 0 : for (j = 0; j < dc->res_pool->pipe_count; j++) {
3381 0 : struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3382 :
3383 0 : if (!pipe_ctx->top_pipe &&
3384 0 : !pipe_ctx->prev_odm_pipe &&
3385 0 : should_update_pipe_for_stream(context, pipe_ctx, stream)) {
3386 0 : struct dc_stream_status *stream_status = NULL;
3387 :
3388 0 : if (!pipe_ctx->plane_state)
3389 0 : continue;
3390 :
3391 : /* Full fe update*/
3392 0 : if (update_type == UPDATE_TYPE_FAST)
3393 0 : continue;
3394 :
3395 0 : ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
3396 :
3397 0 : if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
3398 : /*turn off triple buffer for full update*/
3399 0 : dc->hwss.program_triplebuffer(
3400 0 : dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
3401 : }
3402 0 : stream_status =
3403 0 : stream_get_status(context, pipe_ctx->stream);
3404 :
3405 0 : if (dc->hwss.apply_ctx_for_surface)
3406 0 : dc->hwss.apply_ctx_for_surface(
3407 : dc, pipe_ctx->stream, stream_status->plane_count, context);
3408 : }
3409 : }
3410 0 : if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) {
3411 0 : dc->hwss.program_front_end_for_ctx(dc, context);
3412 0 : if (dc->debug.validate_dml_output) {
3413 0 : for (i = 0; i < dc->res_pool->pipe_count; i++) {
3414 0 : struct pipe_ctx *cur_pipe = &context->res_ctx.pipe_ctx[i];
3415 0 : if (cur_pipe->stream == NULL)
3416 0 : continue;
3417 :
3418 0 : cur_pipe->plane_res.hubp->funcs->validate_dml_output(
3419 : cur_pipe->plane_res.hubp, dc->ctx,
3420 : &context->res_ctx.pipe_ctx[i].rq_regs,
3421 : &context->res_ctx.pipe_ctx[i].dlg_regs,
3422 : &context->res_ctx.pipe_ctx[i].ttu_regs);
3423 : }
3424 : }
3425 : }
3426 :
3427 : // Update Type FAST, Surface updates
3428 0 : if (update_type == UPDATE_TYPE_FAST) {
3429 0 : if (dc->hwss.set_flip_control_gsl)
3430 0 : for (i = 0; i < surface_count; i++) {
3431 0 : struct dc_plane_state *plane_state = srf_updates[i].surface;
3432 :
3433 0 : for (j = 0; j < dc->res_pool->pipe_count; j++) {
3434 0 : struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3435 :
3436 0 : if (!should_update_pipe_for_stream(context, pipe_ctx, stream))
3437 0 : continue;
3438 :
3439 0 : if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state))
3440 0 : continue;
3441 :
3442 : // GSL has to be used for flip immediate
3443 0 : dc->hwss.set_flip_control_gsl(pipe_ctx,
3444 0 : pipe_ctx->plane_state->flip_immediate);
3445 : }
3446 : }
3447 :
3448 : /* Perform requested Updates */
3449 0 : for (i = 0; i < surface_count; i++) {
3450 0 : struct dc_plane_state *plane_state = srf_updates[i].surface;
3451 :
3452 0 : for (j = 0; j < dc->res_pool->pipe_count; j++) {
3453 0 : struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3454 :
3455 0 : if (!should_update_pipe_for_stream(context, pipe_ctx, stream))
3456 0 : continue;
3457 :
3458 0 : if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state))
3459 0 : continue;
3460 :
3461 : /*program triple buffer after lock based on flip type*/
3462 0 : if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
3463 : /*only enable triplebuffer for fast_update*/
3464 0 : dc->hwss.program_triplebuffer(
3465 0 : dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
3466 : }
3467 0 : if (pipe_ctx->plane_state->update_flags.bits.addr_update)
3468 0 : dc->hwss.update_plane_addr(dc, pipe_ctx);
3469 : }
3470 : }
3471 :
3472 : }
3473 :
3474 0 : if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
3475 0 : dc->hwss.interdependent_update_lock(dc, context, false);
3476 : } else {
3477 0 : dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
3478 : }
3479 :
3480 0 : if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
3481 0 : if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
3482 0 : top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
3483 : top_pipe_to_program->stream_res.tg,
3484 : CRTC_STATE_VACTIVE);
3485 0 : top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
3486 : top_pipe_to_program->stream_res.tg,
3487 : CRTC_STATE_VBLANK);
3488 0 : top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
3489 : top_pipe_to_program->stream_res.tg,
3490 : CRTC_STATE_VACTIVE);
3491 :
3492 0 : if (should_use_dmub_lock(stream->link)) {
3493 0 : union dmub_hw_lock_flags hw_locks = { 0 };
3494 0 : struct dmub_hw_lock_inst_flags inst_flags = { 0 };
3495 :
3496 0 : hw_locks.bits.lock_dig = 1;
3497 0 : inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst;
3498 :
3499 0 : dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
3500 : false,
3501 : &hw_locks,
3502 : &inst_flags);
3503 : } else
3504 0 : top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_disable(
3505 : top_pipe_to_program->stream_res.tg);
3506 : }
3507 :
3508 0 : if (update_type != UPDATE_TYPE_FAST)
3509 0 : dc->hwss.post_unlock_program_front_end(dc, context);
3510 :
3511 0 : if (update_type != UPDATE_TYPE_FAST)
3512 0 : if (dc->hwss.commit_subvp_config)
3513 0 : dc->hwss.commit_subvp_config(dc, context);
3514 :
3515 : /* Since phantom pipe programming is moved to post_unlock_program_front_end,
3516 : * move the SubVP lock to after the phantom pipes have been setup
3517 : */
3518 0 : if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
3519 0 : if (dc->hwss.subvp_pipe_control_lock)
3520 0 : dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
3521 : } else {
3522 0 : if (dc->hwss.subvp_pipe_control_lock)
3523 0 : dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use);
3524 : }
3525 :
3526 : // Fire manual trigger only when bottom plane is flipped
3527 0 : for (j = 0; j < dc->res_pool->pipe_count; j++) {
3528 0 : struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3529 :
3530 0 : if (!pipe_ctx->plane_state)
3531 0 : continue;
3532 :
3533 0 : if (pipe_ctx->bottom_pipe || pipe_ctx->next_odm_pipe ||
3534 0 : !pipe_ctx->stream || !should_update_pipe_for_stream(context, pipe_ctx, stream) ||
3535 0 : !pipe_ctx->plane_state->update_flags.bits.addr_update ||
3536 0 : pipe_ctx->plane_state->skip_manual_trigger)
3537 0 : continue;
3538 :
3539 0 : if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger)
3540 0 : pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg);
3541 : }
3542 : }
3543 :
3544 0 : static bool commit_minimal_transition_state(struct dc *dc,
3545 : struct dc_state *transition_base_context)
3546 : {
3547 0 : struct dc_state *transition_context = dc_create_state(dc);
3548 : enum pipe_split_policy tmp_policy;
3549 0 : enum dc_status ret = DC_ERROR_UNEXPECTED;
3550 : unsigned int i, j;
3551 :
3552 0 : if (!transition_context)
3553 : return false;
3554 :
3555 0 : if (!dc->config.is_vmin_only_asic) {
3556 0 : tmp_policy = dc->debug.pipe_split_policy;
3557 0 : dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
3558 : }
3559 :
3560 0 : dc_resource_state_copy_construct(transition_base_context, transition_context);
3561 :
3562 : //commit minimal state
3563 0 : if (dc->res_pool->funcs->validate_bandwidth(dc, transition_context, false)) {
3564 0 : for (i = 0; i < transition_context->stream_count; i++) {
3565 : struct dc_stream_status *stream_status = &transition_context->stream_status[i];
3566 :
3567 0 : for (j = 0; j < stream_status->plane_count; j++) {
3568 0 : struct dc_plane_state *plane_state = stream_status->plane_states[j];
3569 :
3570 : /* force vsync flip when reconfiguring pipes to prevent underflow
3571 : * and corruption
3572 : */
3573 0 : plane_state->flip_immediate = false;
3574 : }
3575 : }
3576 :
3577 0 : ret = dc_commit_state_no_check(dc, transition_context);
3578 : }
3579 :
3580 : //always release as dc_commit_state_no_check retains in good case
3581 0 : dc_release_state(transition_context);
3582 :
3583 : //restore previous pipe split policy
3584 0 : if (!dc->config.is_vmin_only_asic)
3585 0 : dc->debug.pipe_split_policy = tmp_policy;
3586 :
3587 0 : if (ret != DC_OK) {
3588 : //this should never happen
3589 0 : BREAK_TO_DEBUGGER();
3590 0 : return false;
3591 : }
3592 :
3593 : //force full surface update
3594 0 : for (i = 0; i < dc->current_state->stream_count; i++) {
3595 0 : for (j = 0; j < dc->current_state->stream_status[i].plane_count; j++) {
3596 0 : dc->current_state->stream_status[i].plane_states[j]->update_flags.raw = 0xFFFFFFFF;
3597 : }
3598 : }
3599 :
3600 : return true;
3601 : }
3602 :
3603 0 : bool dc_update_planes_and_stream(struct dc *dc,
3604 : struct dc_surface_update *srf_updates, int surface_count,
3605 : struct dc_stream_state *stream,
3606 : struct dc_stream_update *stream_update)
3607 : {
3608 : struct dc_state *context;
3609 : enum surface_update_type update_type;
3610 : int i;
3611 :
3612 : /* In cases where MPO and split or ODM are used transitions can
3613 : * cause underflow. Apply stream configuration with minimal pipe
3614 : * split first to avoid unsupported transitions for active pipes.
3615 : */
3616 0 : bool force_minimal_pipe_splitting = false;
3617 0 : bool is_plane_addition = false;
3618 :
3619 0 : struct dc_stream_status *cur_stream_status = stream_get_status(dc->current_state, stream);
3620 :
3621 0 : if (cur_stream_status &&
3622 0 : dc->current_state->stream_count > 0 &&
3623 0 : dc->debug.pipe_split_policy != MPC_SPLIT_AVOID) {
3624 : /* determine if minimal transition is required */
3625 0 : if (cur_stream_status->plane_count > surface_count) {
3626 : force_minimal_pipe_splitting = true;
3627 0 : } else if (cur_stream_status->plane_count < surface_count) {
3628 0 : force_minimal_pipe_splitting = true;
3629 0 : is_plane_addition = true;
3630 : }
3631 : }
3632 :
3633 : /* on plane addition, minimal state is the current one */
3634 0 : if (force_minimal_pipe_splitting && is_plane_addition &&
3635 0 : !commit_minimal_transition_state(dc, dc->current_state))
3636 : return false;
3637 :
3638 0 : if (!update_planes_and_stream_state(
3639 : dc,
3640 : srf_updates,
3641 : surface_count,
3642 : stream,
3643 : stream_update,
3644 : &update_type,
3645 : &context))
3646 : return false;
3647 :
3648 : /* on plane addition, minimal state is the new one */
3649 0 : if (force_minimal_pipe_splitting && !is_plane_addition) {
3650 0 : if (!commit_minimal_transition_state(dc, context)) {
3651 0 : dc_release_state(context);
3652 0 : return false;
3653 : }
3654 :
3655 0 : update_type = UPDATE_TYPE_FULL;
3656 : }
3657 :
3658 0 : commit_planes_for_stream(
3659 : dc,
3660 : srf_updates,
3661 : surface_count,
3662 : stream,
3663 : stream_update,
3664 : update_type,
3665 : context);
3666 :
3667 0 : if (dc->current_state != context) {
3668 :
3669 : /* Since memory free requires elevated IRQL, an interrupt
3670 : * request is generated by mem free. If this happens
3671 : * between freeing and reassigning the context, our vsync
3672 : * interrupt will call into dc and cause a memory
3673 : * corruption BSOD. Hence, we first reassign the context,
3674 : * then free the old context.
3675 : */
3676 :
3677 0 : struct dc_state *old = dc->current_state;
3678 :
3679 0 : dc->current_state = context;
3680 0 : dc_release_state(old);
3681 :
3682 : // clear any forced full updates
3683 0 : for (i = 0; i < dc->res_pool->pipe_count; i++) {
3684 0 : struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
3685 :
3686 0 : if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
3687 0 : pipe_ctx->plane_state->force_full_update = false;
3688 : }
3689 : }
3690 : return true;
3691 : }
3692 :
3693 0 : void dc_commit_updates_for_stream(struct dc *dc,
3694 : struct dc_surface_update *srf_updates,
3695 : int surface_count,
3696 : struct dc_stream_state *stream,
3697 : struct dc_stream_update *stream_update,
3698 : struct dc_state *state)
3699 : {
3700 : const struct dc_stream_status *stream_status;
3701 : enum surface_update_type update_type;
3702 : struct dc_state *context;
3703 0 : struct dc_context *dc_ctx = dc->ctx;
3704 : int i, j;
3705 :
3706 0 : stream_status = dc_stream_get_status(stream);
3707 0 : context = dc->current_state;
3708 :
3709 0 : update_type = dc_check_update_surfaces_for_stream(
3710 : dc, srf_updates, surface_count, stream_update, stream_status);
3711 :
3712 0 : if (update_type >= update_surface_trace_level)
3713 0 : update_surface_trace(dc, srf_updates, surface_count);
3714 :
3715 :
3716 0 : if (update_type >= UPDATE_TYPE_FULL) {
3717 :
3718 : /* initialize scratch memory for building context */
3719 0 : context = dc_create_state(dc);
3720 0 : if (context == NULL) {
3721 0 : DC_ERROR("Failed to allocate new validate context!\n");
3722 0 : return;
3723 : }
3724 :
3725 0 : dc_resource_state_copy_construct(state, context);
3726 :
3727 0 : for (i = 0; i < dc->res_pool->pipe_count; i++) {
3728 0 : struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
3729 0 : struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
3730 :
3731 0 : if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
3732 0 : new_pipe->plane_state->force_full_update = true;
3733 : }
3734 0 : } else if (update_type == UPDATE_TYPE_FAST && dc_ctx->dce_version >= DCE_VERSION_MAX) {
3735 : /*
3736 : * Previous frame finished and HW is ready for optimization.
3737 : *
3738 : * Only relevant for DCN behavior where we can guarantee the optimization
3739 : * is safe to apply - retain the legacy behavior for DCE.
3740 : */
3741 0 : dc_post_update_surfaces_to_stream(dc);
3742 : }
3743 :
3744 :
3745 0 : for (i = 0; i < surface_count; i++) {
3746 0 : struct dc_plane_state *surface = srf_updates[i].surface;
3747 :
3748 0 : copy_surface_update_to_plane(surface, &srf_updates[i]);
3749 :
3750 0 : if (update_type >= UPDATE_TYPE_MED) {
3751 0 : for (j = 0; j < dc->res_pool->pipe_count; j++) {
3752 0 : struct pipe_ctx *pipe_ctx =
3753 : &context->res_ctx.pipe_ctx[j];
3754 :
3755 0 : if (pipe_ctx->plane_state != surface)
3756 0 : continue;
3757 :
3758 0 : resource_build_scaling_params(pipe_ctx);
3759 : }
3760 : }
3761 : }
3762 :
3763 0 : copy_stream_update_to_stream(dc, context, stream, stream_update);
3764 :
3765 0 : if (update_type >= UPDATE_TYPE_FULL) {
3766 0 : if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
3767 0 : DC_ERROR("Mode validation failed for stream update!\n");
3768 : dc_release_state(context);
3769 : return;
3770 : }
3771 : }
3772 :
3773 : TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
3774 :
3775 0 : commit_planes_for_stream(
3776 : dc,
3777 : srf_updates,
3778 : surface_count,
3779 : stream,
3780 : stream_update,
3781 : update_type,
3782 : context);
3783 : /*update current_State*/
3784 0 : if (dc->current_state != context) {
3785 :
3786 0 : struct dc_state *old = dc->current_state;
3787 :
3788 0 : dc->current_state = context;
3789 0 : dc_release_state(old);
3790 :
3791 0 : for (i = 0; i < dc->res_pool->pipe_count; i++) {
3792 0 : struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
3793 :
3794 0 : if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
3795 0 : pipe_ctx->plane_state->force_full_update = false;
3796 : }
3797 : }
3798 :
3799 : /* Legacy optimization path for DCE. */
3800 0 : if (update_type >= UPDATE_TYPE_FULL && dc_ctx->dce_version < DCE_VERSION_MAX) {
3801 0 : dc_post_update_surfaces_to_stream(dc);
3802 0 : TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
3803 : }
3804 :
3805 : return;
3806 :
3807 : }
3808 :
3809 0 : uint8_t dc_get_current_stream_count(struct dc *dc)
3810 : {
3811 0 : return dc->current_state->stream_count;
3812 : }
3813 :
3814 0 : struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i)
3815 : {
3816 0 : if (i < dc->current_state->stream_count)
3817 0 : return dc->current_state->streams[i];
3818 : return NULL;
3819 : }
3820 :
3821 0 : enum dc_irq_source dc_interrupt_to_irq_source(
3822 : struct dc *dc,
3823 : uint32_t src_id,
3824 : uint32_t ext_id)
3825 : {
3826 0 : return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id);
3827 : }
3828 :
3829 : /*
3830 : * dc_interrupt_set() - Enable/disable an AMD hw interrupt source
3831 : */
3832 0 : bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)
3833 : {
3834 :
3835 0 : if (dc == NULL)
3836 : return false;
3837 :
3838 0 : return dal_irq_service_set(dc->res_pool->irqs, src, enable);
3839 : }
3840 :
3841 0 : void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
3842 : {
3843 0 : dal_irq_service_ack(dc->res_pool->irqs, src);
3844 0 : }
3845 :
3846 0 : void dc_power_down_on_boot(struct dc *dc)
3847 : {
3848 0 : if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW &&
3849 0 : dc->hwss.power_down_on_boot)
3850 0 : dc->hwss.power_down_on_boot(dc);
3851 0 : }
3852 :
3853 0 : void dc_set_power_state(
3854 : struct dc *dc,
3855 : enum dc_acpi_cm_power_state power_state)
3856 : {
3857 : struct kref refcount;
3858 : struct display_mode_lib *dml;
3859 :
3860 0 : if (!dc->current_state)
3861 : return;
3862 :
3863 0 : switch (power_state) {
3864 : case DC_ACPI_CM_POWER_STATE_D0:
3865 0 : dc_resource_state_construct(dc, dc->current_state);
3866 :
3867 0 : dc_z10_restore(dc);
3868 :
3869 0 : if (dc->ctx->dmub_srv)
3870 0 : dc_dmub_srv_wait_phy_init(dc->ctx->dmub_srv);
3871 :
3872 0 : dc->hwss.init_hw(dc);
3873 :
3874 0 : if (dc->hwss.init_sys_ctx != NULL &&
3875 0 : dc->vm_pa_config.valid) {
3876 0 : dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config);
3877 : }
3878 :
3879 : break;
3880 : default:
3881 0 : ASSERT(dc->current_state->stream_count == 0);
3882 : /* Zero out the current context so that on resume we start with
3883 : * clean state, and dc hw programming optimizations will not
3884 : * cause any trouble.
3885 : */
3886 0 : dml = kzalloc(sizeof(struct display_mode_lib),
3887 : GFP_KERNEL);
3888 :
3889 0 : ASSERT(dml);
3890 0 : if (!dml)
3891 : return;
3892 :
3893 : /* Preserve refcount */
3894 0 : refcount = dc->current_state->refcount;
3895 : /* Preserve display mode lib */
3896 0 : memcpy(dml, &dc->current_state->bw_ctx.dml, sizeof(struct display_mode_lib));
3897 :
3898 0 : dc_resource_state_destruct(dc->current_state);
3899 0 : memset(dc->current_state, 0,
3900 : sizeof(*dc->current_state));
3901 :
3902 0 : dc->current_state->refcount = refcount;
3903 0 : dc->current_state->bw_ctx.dml = *dml;
3904 :
3905 0 : kfree(dml);
3906 :
3907 0 : break;
3908 : }
3909 : }
3910 :
3911 0 : void dc_resume(struct dc *dc)
3912 : {
3913 : uint32_t i;
3914 :
3915 0 : for (i = 0; i < dc->link_count; i++)
3916 0 : core_link_resume(dc->links[i]);
3917 0 : }
3918 :
3919 0 : bool dc_is_dmcu_initialized(struct dc *dc)
3920 : {
3921 0 : struct dmcu *dmcu = dc->res_pool->dmcu;
3922 :
3923 0 : if (dmcu)
3924 0 : return dmcu->funcs->is_dmcu_initialized(dmcu);
3925 : return false;
3926 : }
3927 :
3928 0 : bool dc_is_oem_i2c_device_present(
3929 : struct dc *dc,
3930 : size_t slave_address)
3931 : {
3932 0 : if (dc->res_pool->oem_device)
3933 0 : return dce_i2c_oem_device_present(
3934 : dc->res_pool,
3935 : dc->res_pool->oem_device,
3936 : slave_address);
3937 :
3938 : return false;
3939 : }
3940 :
3941 0 : bool dc_submit_i2c(
3942 : struct dc *dc,
3943 : uint32_t link_index,
3944 : struct i2c_command *cmd)
3945 : {
3946 :
3947 0 : struct dc_link *link = dc->links[link_index];
3948 0 : struct ddc_service *ddc = link->ddc;
3949 0 : return dce_i2c_submit_command(
3950 : dc->res_pool,
3951 : ddc->ddc_pin,
3952 : cmd);
3953 : }
3954 :
3955 0 : bool dc_submit_i2c_oem(
3956 : struct dc *dc,
3957 : struct i2c_command *cmd)
3958 : {
3959 0 : struct ddc_service *ddc = dc->res_pool->oem_device;
3960 0 : if (ddc)
3961 0 : return dce_i2c_submit_command(
3962 : dc->res_pool,
3963 : ddc->ddc_pin,
3964 : cmd);
3965 :
3966 : return false;
3967 : }
3968 :
3969 0 : static bool link_add_remote_sink_helper(struct dc_link *dc_link, struct dc_sink *sink)
3970 : {
3971 0 : if (dc_link->sink_count >= MAX_SINKS_PER_LINK) {
3972 0 : BREAK_TO_DEBUGGER();
3973 0 : return false;
3974 : }
3975 :
3976 0 : dc_sink_retain(sink);
3977 :
3978 0 : dc_link->remote_sinks[dc_link->sink_count] = sink;
3979 0 : dc_link->sink_count++;
3980 :
3981 0 : return true;
3982 : }
3983 :
3984 : /*
3985 : * dc_link_add_remote_sink() - Create a sink and attach it to an existing link
3986 : *
3987 : * EDID length is in bytes
3988 : */
3989 0 : struct dc_sink *dc_link_add_remote_sink(
3990 : struct dc_link *link,
3991 : const uint8_t *edid,
3992 : int len,
3993 : struct dc_sink_init_data *init_data)
3994 : {
3995 : struct dc_sink *dc_sink;
3996 : enum dc_edid_status edid_status;
3997 :
3998 0 : if (len > DC_MAX_EDID_BUFFER_SIZE) {
3999 0 : dm_error("Max EDID buffer size breached!\n");
4000 0 : return NULL;
4001 : }
4002 :
4003 0 : if (!init_data) {
4004 0 : BREAK_TO_DEBUGGER();
4005 0 : return NULL;
4006 : }
4007 :
4008 0 : if (!init_data->link) {
4009 0 : BREAK_TO_DEBUGGER();
4010 0 : return NULL;
4011 : }
4012 :
4013 0 : dc_sink = dc_sink_create(init_data);
4014 :
4015 0 : if (!dc_sink)
4016 : return NULL;
4017 :
4018 0 : memmove(dc_sink->dc_edid.raw_edid, edid, len);
4019 0 : dc_sink->dc_edid.length = len;
4020 :
4021 0 : if (!link_add_remote_sink_helper(
4022 : link,
4023 : dc_sink))
4024 : goto fail_add_sink;
4025 :
4026 0 : edid_status = dm_helpers_parse_edid_caps(
4027 : link,
4028 0 : &dc_sink->dc_edid,
4029 : &dc_sink->edid_caps);
4030 :
4031 : /*
4032 : * Treat device as no EDID device if EDID
4033 : * parsing fails
4034 : */
4035 0 : if (edid_status != EDID_OK) {
4036 0 : dc_sink->dc_edid.length = 0;
4037 0 : dm_error("Bad EDID, status%d!\n", edid_status);
4038 : }
4039 :
4040 : return dc_sink;
4041 :
4042 : fail_add_sink:
4043 0 : dc_sink_release(dc_sink);
4044 0 : return NULL;
4045 : }
4046 :
4047 : /*
4048 : * dc_link_remove_remote_sink() - Remove a remote sink from a dc_link
4049 : *
4050 : * Note that this just removes the struct dc_sink - it doesn't
4051 : * program hardware or alter other members of dc_link
4052 : */
4053 0 : void dc_link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink)
4054 : {
4055 : int i;
4056 :
4057 0 : if (!link->sink_count) {
4058 0 : BREAK_TO_DEBUGGER();
4059 0 : return;
4060 : }
4061 :
4062 0 : for (i = 0; i < link->sink_count; i++) {
4063 0 : if (link->remote_sinks[i] == sink) {
4064 0 : dc_sink_release(sink);
4065 0 : link->remote_sinks[i] = NULL;
4066 :
4067 : /* shrink array to remove empty place */
4068 0 : while (i < link->sink_count - 1) {
4069 0 : link->remote_sinks[i] = link->remote_sinks[i+1];
4070 0 : i++;
4071 : }
4072 0 : link->remote_sinks[i] = NULL;
4073 0 : link->sink_count--;
4074 0 : return;
4075 : }
4076 : }
4077 : }
4078 :
4079 0 : void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info)
4080 : {
4081 0 : info->displayClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dispclk_khz;
4082 0 : info->engineClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_khz;
4083 0 : info->memoryClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dramclk_khz;
4084 0 : info->maxSupportedDppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz;
4085 0 : info->dppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dppclk_khz;
4086 0 : info->socClock = (unsigned int)state->bw_ctx.bw.dcn.clk.socclk_khz;
4087 0 : info->dcfClockDeepSleep = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz;
4088 0 : info->fClock = (unsigned int)state->bw_ctx.bw.dcn.clk.fclk_khz;
4089 0 : info->phyClock = (unsigned int)state->bw_ctx.bw.dcn.clk.phyclk_khz;
4090 0 : }
4091 0 : enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping)
4092 : {
4093 0 : if (dc->hwss.set_clock)
4094 0 : return dc->hwss.set_clock(dc, clock_type, clk_khz, stepping);
4095 : return DC_ERROR_UNEXPECTED;
4096 : }
4097 0 : void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg)
4098 : {
4099 0 : if (dc->hwss.get_clock)
4100 0 : dc->hwss.get_clock(dc, clock_type, clock_cfg);
4101 0 : }
4102 :
4103 : /* enable/disable eDP PSR without specify stream for eDP */
4104 0 : bool dc_set_psr_allow_active(struct dc *dc, bool enable)
4105 : {
4106 : int i;
4107 : bool allow_active;
4108 :
4109 0 : for (i = 0; i < dc->current_state->stream_count ; i++) {
4110 : struct dc_link *link;
4111 0 : struct dc_stream_state *stream = dc->current_state->streams[i];
4112 :
4113 0 : link = stream->link;
4114 0 : if (!link)
4115 0 : continue;
4116 :
4117 0 : if (link->psr_settings.psr_feature_enabled) {
4118 0 : if (enable && !link->psr_settings.psr_allow_active) {
4119 0 : allow_active = true;
4120 0 : if (!dc_link_set_psr_allow_active(link, &allow_active, false, false, NULL))
4121 : return false;
4122 0 : } else if (!enable && link->psr_settings.psr_allow_active) {
4123 0 : allow_active = false;
4124 0 : if (!dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL))
4125 : return false;
4126 : }
4127 : }
4128 : }
4129 :
4130 : return true;
4131 : }
4132 :
4133 0 : void dc_allow_idle_optimizations(struct dc *dc, bool allow)
4134 : {
4135 0 : if (dc->debug.disable_idle_power_optimizations)
4136 : return;
4137 :
4138 0 : if (dc->clk_mgr != NULL && dc->clk_mgr->funcs->is_smu_present)
4139 0 : if (!dc->clk_mgr->funcs->is_smu_present(dc->clk_mgr))
4140 : return;
4141 :
4142 0 : if (allow == dc->idle_optimizations_allowed)
4143 : return;
4144 :
4145 0 : if (dc->hwss.apply_idle_power_optimizations && dc->hwss.apply_idle_power_optimizations(dc, allow))
4146 0 : dc->idle_optimizations_allowed = allow;
4147 : }
4148 :
4149 : /* set min and max memory clock to lowest and highest DPM level, respectively */
4150 0 : void dc_unlock_memory_clock_frequency(struct dc *dc)
4151 : {
4152 0 : if (dc->clk_mgr->funcs->set_hard_min_memclk)
4153 0 : dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, false);
4154 :
4155 0 : if (dc->clk_mgr->funcs->set_hard_max_memclk)
4156 0 : dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
4157 0 : }
4158 :
4159 : /* set min memory clock to the min required for current mode, max to maxDPM */
4160 0 : void dc_lock_memory_clock_frequency(struct dc *dc)
4161 : {
4162 0 : if (dc->clk_mgr->funcs->get_memclk_states_from_smu)
4163 0 : dc->clk_mgr->funcs->get_memclk_states_from_smu(dc->clk_mgr);
4164 :
4165 0 : if (dc->clk_mgr->funcs->set_hard_min_memclk)
4166 0 : dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, true);
4167 :
4168 0 : if (dc->clk_mgr->funcs->set_hard_max_memclk)
4169 0 : dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
4170 0 : }
4171 :
4172 0 : static void blank_and_force_memclk(struct dc *dc, bool apply, unsigned int memclk_mhz)
4173 : {
4174 0 : struct dc_state *context = dc->current_state;
4175 : struct hubp *hubp;
4176 : struct pipe_ctx *pipe;
4177 : int i;
4178 :
4179 0 : for (i = 0; i < dc->res_pool->pipe_count; i++) {
4180 0 : pipe = &context->res_ctx.pipe_ctx[i];
4181 :
4182 0 : if (pipe->stream != NULL) {
4183 0 : dc->hwss.disable_pixel_data(dc, pipe, true);
4184 :
4185 : // wait for double buffer
4186 0 : pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE);
4187 0 : pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VBLANK);
4188 0 : pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE);
4189 :
4190 0 : hubp = pipe->plane_res.hubp;
4191 0 : hubp->funcs->set_blank_regs(hubp, true);
4192 : }
4193 : }
4194 :
4195 0 : dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, memclk_mhz);
4196 0 : dc->clk_mgr->funcs->set_min_memclk(dc->clk_mgr, memclk_mhz);
4197 :
4198 0 : for (i = 0; i < dc->res_pool->pipe_count; i++) {
4199 0 : pipe = &context->res_ctx.pipe_ctx[i];
4200 :
4201 0 : if (pipe->stream != NULL) {
4202 0 : dc->hwss.disable_pixel_data(dc, pipe, false);
4203 :
4204 0 : hubp = pipe->plane_res.hubp;
4205 0 : hubp->funcs->set_blank_regs(hubp, false);
4206 : }
4207 : }
4208 0 : }
4209 :
4210 :
4211 : /**
4212 : * dc_enable_dcmode_clk_limit() - lower clocks in dc (battery) mode
4213 : * @dc: pointer to dc of the dm calling this
4214 : * @enable: True = transition to DC mode, false = transition back to AC mode
4215 : *
4216 : * Some SoCs define additional clock limits when in DC mode, DM should
4217 : * invoke this function when the platform undergoes a power source transition
4218 : * so DC can apply/unapply the limit. This interface may be disruptive to
4219 : * the onscreen content.
4220 : *
4221 : * Context: Triggered by OS through DM interface, or manually by escape calls.
4222 : * Need to hold a dclock when doing so.
4223 : *
4224 : * Return: none (void function)
4225 : *
4226 : */
4227 0 : void dc_enable_dcmode_clk_limit(struct dc *dc, bool enable)
4228 : {
4229 0 : uint32_t hw_internal_rev = dc->ctx->asic_id.hw_internal_rev;
4230 : unsigned int softMax, maxDPM, funcMin;
4231 : bool p_state_change_support;
4232 :
4233 0 : if (!ASICREV_IS_BEIGE_GOBY_P(hw_internal_rev))
4234 : return;
4235 :
4236 0 : softMax = dc->clk_mgr->bw_params->dc_mode_softmax_memclk;
4237 0 : maxDPM = dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz;
4238 0 : funcMin = (dc->clk_mgr->clks.dramclk_khz + 999) / 1000;
4239 0 : p_state_change_support = dc->clk_mgr->clks.p_state_change_support;
4240 :
4241 0 : if (enable && !dc->clk_mgr->dc_mode_softmax_enabled) {
4242 0 : if (p_state_change_support) {
4243 0 : if (funcMin <= softMax)
4244 0 : dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, softMax);
4245 : // else: No-Op
4246 : } else {
4247 0 : if (funcMin <= softMax)
4248 0 : blank_and_force_memclk(dc, true, softMax);
4249 : // else: No-Op
4250 : }
4251 0 : } else if (!enable && dc->clk_mgr->dc_mode_softmax_enabled) {
4252 0 : if (p_state_change_support) {
4253 0 : if (funcMin <= softMax)
4254 0 : dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, maxDPM);
4255 : // else: No-Op
4256 : } else {
4257 0 : if (funcMin <= softMax)
4258 0 : blank_and_force_memclk(dc, true, maxDPM);
4259 : // else: No-Op
4260 : }
4261 : }
4262 0 : dc->clk_mgr->dc_mode_softmax_enabled = enable;
4263 : }
4264 0 : bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, struct dc_plane_state *plane,
4265 : struct dc_cursor_attributes *cursor_attr)
4266 : {
4267 0 : if (dc->hwss.does_plane_fit_in_mall && dc->hwss.does_plane_fit_in_mall(dc, plane, cursor_attr))
4268 : return true;
4269 : return false;
4270 : }
4271 :
4272 : /* cleanup on driver unload */
4273 0 : void dc_hardware_release(struct dc *dc)
4274 : {
4275 0 : dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(dc);
4276 :
4277 0 : if (dc->hwss.hardware_release)
4278 0 : dc->hwss.hardware_release(dc);
4279 0 : }
4280 :
4281 0 : void dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(struct dc *dc)
4282 : {
4283 0 : if (dc->current_state)
4284 0 : dc->current_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching_shut_down = true;
4285 0 : }
4286 :
4287 : /*
4288 : *****************************************************************************
4289 : * Function: dc_is_dmub_outbox_supported -
4290 : *
4291 : * @brief
4292 : * Checks whether DMUB FW supports outbox notifications, if supported
4293 : * DM should register outbox interrupt prior to actually enabling interrupts
4294 : * via dc_enable_dmub_outbox
4295 : *
4296 : * @param
4297 : * [in] dc: dc structure
4298 : *
4299 : * @return
4300 : * True if DMUB FW supports outbox notifications, False otherwise
4301 : *****************************************************************************
4302 : */
4303 0 : bool dc_is_dmub_outbox_supported(struct dc *dc)
4304 : {
4305 : /* DCN31 B0 USB4 DPIA needs dmub notifications for interrupts */
4306 0 : if (dc->ctx->asic_id.chip_family == FAMILY_YELLOW_CARP &&
4307 0 : dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0 &&
4308 0 : !dc->debug.dpia_debug.bits.disable_dpia)
4309 : return true;
4310 :
4311 0 : if (dc->ctx->asic_id.chip_family == AMDGPU_FAMILY_GC_11_0_1 &&
4312 0 : !dc->debug.dpia_debug.bits.disable_dpia)
4313 : return true;
4314 :
4315 : /* dmub aux needs dmub notifications to be enabled */
4316 0 : return dc->debug.enable_dmub_aux_for_legacy_ddc;
4317 : }
4318 :
4319 : /*
4320 : *****************************************************************************
4321 : * Function: dc_enable_dmub_notifications
4322 : *
4323 : * @brief
4324 : * Calls dc_is_dmub_outbox_supported to check if dmub fw supports outbox
4325 : * notifications. All DMs shall switch to dc_is_dmub_outbox_supported.
4326 : * This API shall be removed after switching.
4327 : *
4328 : * @param
4329 : * [in] dc: dc structure
4330 : *
4331 : * @return
4332 : * True if DMUB FW supports outbox notifications, False otherwise
4333 : *****************************************************************************
4334 : */
4335 0 : bool dc_enable_dmub_notifications(struct dc *dc)
4336 : {
4337 0 : return dc_is_dmub_outbox_supported(dc);
4338 : }
4339 :
4340 : /**
4341 : *****************************************************************************
4342 : * Function: dc_enable_dmub_outbox
4343 : *
4344 : * @brief
4345 : * Enables DMUB unsolicited notifications to x86 via outbox
4346 : *
4347 : * @param
4348 : * [in] dc: dc structure
4349 : *
4350 : * @return
4351 : * None
4352 : *****************************************************************************
4353 : */
4354 0 : void dc_enable_dmub_outbox(struct dc *dc)
4355 : {
4356 0 : struct dc_context *dc_ctx = dc->ctx;
4357 :
4358 0 : dmub_enable_outbox_notification(dc_ctx->dmub_srv);
4359 0 : DC_LOG_DC("%s: dmub outbox notifications enabled\n", __func__);
4360 0 : }
4361 :
4362 : /**
4363 : * dc_process_dmub_aux_transfer_async - Submits aux command to dmub via inbox message
4364 : * Sets port index appropriately for legacy DDC
4365 : * @dc: dc structure
4366 : * @link_index: link index
4367 : * @payload: aux payload
4368 : *
4369 : * Returns: True if successful, False if failure
4370 : */
4371 0 : bool dc_process_dmub_aux_transfer_async(struct dc *dc,
4372 : uint32_t link_index,
4373 : struct aux_payload *payload)
4374 : {
4375 : uint8_t action;
4376 0 : union dmub_rb_cmd cmd = {0};
4377 0 : struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv;
4378 :
4379 0 : ASSERT(payload->length <= 16);
4380 :
4381 0 : cmd.dp_aux_access.header.type = DMUB_CMD__DP_AUX_ACCESS;
4382 0 : cmd.dp_aux_access.header.payload_bytes = 0;
4383 : /* For dpia, ddc_pin is set to NULL */
4384 0 : if (!dc->links[link_index]->ddc->ddc_pin)
4385 0 : cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_DPIA;
4386 : else
4387 0 : cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_LEGACY_DDC;
4388 :
4389 0 : cmd.dp_aux_access.aux_control.instance = dc->links[link_index]->ddc_hw_inst;
4390 0 : cmd.dp_aux_access.aux_control.sw_crc_enabled = 0;
4391 0 : cmd.dp_aux_access.aux_control.timeout = 0;
4392 0 : cmd.dp_aux_access.aux_control.dpaux.address = payload->address;
4393 0 : cmd.dp_aux_access.aux_control.dpaux.is_i2c_over_aux = payload->i2c_over_aux;
4394 0 : cmd.dp_aux_access.aux_control.dpaux.length = payload->length;
4395 :
4396 : /* set aux action */
4397 0 : if (payload->i2c_over_aux) {
4398 0 : if (payload->write) {
4399 0 : if (payload->mot)
4400 : action = DP_AUX_REQ_ACTION_I2C_WRITE_MOT;
4401 : else
4402 0 : action = DP_AUX_REQ_ACTION_I2C_WRITE;
4403 : } else {
4404 0 : if (payload->mot)
4405 : action = DP_AUX_REQ_ACTION_I2C_READ_MOT;
4406 : else
4407 0 : action = DP_AUX_REQ_ACTION_I2C_READ;
4408 : }
4409 : } else {
4410 0 : if (payload->write)
4411 : action = DP_AUX_REQ_ACTION_DPCD_WRITE;
4412 : else
4413 0 : action = DP_AUX_REQ_ACTION_DPCD_READ;
4414 : }
4415 :
4416 0 : cmd.dp_aux_access.aux_control.dpaux.action = action;
4417 :
4418 0 : if (payload->length && payload->write) {
4419 0 : memcpy(cmd.dp_aux_access.aux_control.dpaux.data,
4420 0 : payload->data,
4421 : payload->length
4422 : );
4423 : }
4424 :
4425 0 : dc_dmub_srv_cmd_queue(dmub_srv, &cmd);
4426 0 : dc_dmub_srv_cmd_execute(dmub_srv);
4427 0 : dc_dmub_srv_wait_idle(dmub_srv);
4428 :
4429 0 : return true;
4430 : }
4431 :
4432 0 : uint8_t get_link_index_from_dpia_port_index(const struct dc *dc,
4433 : uint8_t dpia_port_index)
4434 : {
4435 0 : uint8_t index, link_index = 0xFF;
4436 :
4437 0 : for (index = 0; index < dc->link_count; index++) {
4438 : /* ddc_hw_inst has dpia port index for dpia links
4439 : * and ddc instance for legacy links
4440 : */
4441 0 : if (!dc->links[index]->ddc->ddc_pin) {
4442 0 : if (dc->links[index]->ddc_hw_inst == dpia_port_index) {
4443 : link_index = index;
4444 : break;
4445 : }
4446 : }
4447 : }
4448 0 : ASSERT(link_index != 0xFF);
4449 0 : return link_index;
4450 : }
4451 :
4452 : /**
4453 : *****************************************************************************
4454 : * Function: dc_process_dmub_set_config_async
4455 : *
4456 : * @brief
4457 : * Submits set_config command to dmub via inbox message
4458 : *
4459 : * @param
4460 : * [in] dc: dc structure
4461 : * [in] link_index: link index
4462 : * [in] payload: aux payload
4463 : * [out] notify: set_config immediate reply
4464 : *
4465 : * @return
4466 : * True if successful, False if failure
4467 : *****************************************************************************
4468 : */
4469 0 : bool dc_process_dmub_set_config_async(struct dc *dc,
4470 : uint32_t link_index,
4471 : struct set_config_cmd_payload *payload,
4472 : struct dmub_notification *notify)
4473 : {
4474 0 : union dmub_rb_cmd cmd = {0};
4475 0 : struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv;
4476 0 : bool is_cmd_complete = true;
4477 :
4478 : /* prepare SET_CONFIG command */
4479 0 : cmd.set_config_access.header.type = DMUB_CMD__DPIA;
4480 0 : cmd.set_config_access.header.sub_type = DMUB_CMD__DPIA_SET_CONFIG_ACCESS;
4481 :
4482 0 : cmd.set_config_access.set_config_control.instance = dc->links[link_index]->ddc_hw_inst;
4483 0 : cmd.set_config_access.set_config_control.cmd_pkt.msg_type = payload->msg_type;
4484 0 : cmd.set_config_access.set_config_control.cmd_pkt.msg_data = payload->msg_data;
4485 :
4486 0 : if (!dc_dmub_srv_cmd_with_reply_data(dmub_srv, &cmd)) {
4487 : /* command is not processed by dmub */
4488 0 : notify->sc_status = SET_CONFIG_UNKNOWN_ERROR;
4489 0 : return is_cmd_complete;
4490 : }
4491 :
4492 : /* command processed by dmub, if ret_status is 1, it is completed instantly */
4493 0 : if (cmd.set_config_access.header.ret_status == 1)
4494 0 : notify->sc_status = cmd.set_config_access.set_config_control.immed_status;
4495 : else
4496 : /* cmd pending, will receive notification via outbox */
4497 : is_cmd_complete = false;
4498 :
4499 : return is_cmd_complete;
4500 : }
4501 :
4502 : /**
4503 : *****************************************************************************
4504 : * Function: dc_process_dmub_set_mst_slots
4505 : *
4506 : * @brief
4507 : * Submits mst slot allocation command to dmub via inbox message
4508 : *
4509 : * @param
4510 : * [in] dc: dc structure
4511 : * [in] link_index: link index
4512 : * [in] mst_alloc_slots: mst slots to be allotted
4513 : * [out] mst_slots_in_use: mst slots in use returned in failure case
4514 : *
4515 : * @return
4516 : * DC_OK if successful, DC_ERROR if failure
4517 : *****************************************************************************
4518 : */
4519 0 : enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc,
4520 : uint32_t link_index,
4521 : uint8_t mst_alloc_slots,
4522 : uint8_t *mst_slots_in_use)
4523 : {
4524 0 : union dmub_rb_cmd cmd = {0};
4525 0 : struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv;
4526 :
4527 : /* prepare MST_ALLOC_SLOTS command */
4528 0 : cmd.set_mst_alloc_slots.header.type = DMUB_CMD__DPIA;
4529 0 : cmd.set_mst_alloc_slots.header.sub_type = DMUB_CMD__DPIA_MST_ALLOC_SLOTS;
4530 :
4531 0 : cmd.set_mst_alloc_slots.mst_slots_control.instance = dc->links[link_index]->ddc_hw_inst;
4532 0 : cmd.set_mst_alloc_slots.mst_slots_control.mst_alloc_slots = mst_alloc_slots;
4533 :
4534 0 : if (!dc_dmub_srv_cmd_with_reply_data(dmub_srv, &cmd))
4535 : /* command is not processed by dmub */
4536 : return DC_ERROR_UNEXPECTED;
4537 :
4538 : /* command processed by dmub, if ret_status is 1 */
4539 0 : if (cmd.set_config_access.header.ret_status != 1)
4540 : /* command processing error */
4541 : return DC_ERROR_UNEXPECTED;
4542 :
4543 : /* command processed and we have a status of 2, mst not enabled in dpia */
4544 0 : if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 2)
4545 : return DC_FAIL_UNSUPPORTED_1;
4546 :
4547 : /* previously configured mst alloc and used slots did not match */
4548 0 : if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 3) {
4549 0 : *mst_slots_in_use = cmd.set_mst_alloc_slots.mst_slots_control.mst_slots_in_use;
4550 0 : return DC_NOT_SUPPORTED;
4551 : }
4552 :
4553 : return DC_OK;
4554 : }
4555 :
4556 : /**
4557 : * dc_disable_accelerated_mode - disable accelerated mode
4558 : * @dc: dc structure
4559 : */
4560 0 : void dc_disable_accelerated_mode(struct dc *dc)
4561 : {
4562 0 : bios_set_scratch_acc_mode_change(dc->ctx->dc_bios, 0);
4563 0 : }
4564 :
4565 :
4566 : /**
4567 : *****************************************************************************
4568 : * dc_notify_vsync_int_state() - notifies vsync enable/disable state
4569 : * @dc: dc structure
4570 : * @stream: stream where vsync int state changed
4571 : * @enable: whether vsync is enabled or disabled
4572 : *
4573 : * Called when vsync is enabled/disabled
4574 : * Will notify DMUB to start/stop ABM interrupts after steady state is reached
4575 : *
4576 : *****************************************************************************
4577 : */
4578 0 : void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bool enable)
4579 : {
4580 : int i;
4581 : int edp_num;
4582 0 : struct pipe_ctx *pipe = NULL;
4583 0 : struct dc_link *link = stream->sink->link;
4584 : struct dc_link *edp_links[MAX_NUM_EDP];
4585 :
4586 :
4587 0 : if (link->psr_settings.psr_feature_enabled)
4588 0 : return;
4589 :
4590 : /*find primary pipe associated with stream*/
4591 0 : for (i = 0; i < MAX_PIPES; i++) {
4592 0 : pipe = &dc->current_state->res_ctx.pipe_ctx[i];
4593 :
4594 0 : if (pipe->stream == stream && pipe->stream_res.tg)
4595 : break;
4596 : }
4597 :
4598 0 : if (i == MAX_PIPES) {
4599 0 : ASSERT(0);
4600 : return;
4601 : }
4602 :
4603 0 : get_edp_links(dc, edp_links, &edp_num);
4604 :
4605 : /* Determine panel inst */
4606 0 : for (i = 0; i < edp_num; i++) {
4607 0 : if (edp_links[i] == link)
4608 : break;
4609 : }
4610 :
4611 0 : if (i == edp_num) {
4612 : return;
4613 : }
4614 :
4615 0 : if (pipe->stream_res.abm && pipe->stream_res.abm->funcs->set_abm_pause)
4616 0 : pipe->stream_res.abm->funcs->set_abm_pause(pipe->stream_res.abm, !enable, i, pipe->stream_res.tg->inst);
4617 : }
4618 : /*
4619 : * dc_extended_blank_supported: Decide whether extended blank is supported
4620 : *
4621 : * Extended blank is a freesync optimization feature to be enabled in the future.
4622 : * During the extra vblank period gained from freesync, we have the ability to enter z9/z10.
4623 : *
4624 : * @param [in] dc: Current DC state
4625 : * @return: Indicate whether extended blank is supported (true or false)
4626 : */
4627 0 : bool dc_extended_blank_supported(struct dc *dc)
4628 : {
4629 0 : return dc->debug.extended_blank_optimization && !dc->debug.disable_z10
4630 0 : && dc->caps.zstate_support && dc->caps.is_apu;
4631 : }
|